virtio-scsi: suppress virtqueue kick during processing
[qemu.git] / target / arm / helper.c
1 #include "qemu/osdep.h"
2 #include "trace.h"
3 #include "cpu.h"
4 #include "internals.h"
5 #include "exec/gdbstub.h"
6 #include "exec/helper-proto.h"
7 #include "qemu/host-utils.h"
8 #include "sysemu/arch_init.h"
9 #include "sysemu/sysemu.h"
10 #include "qemu/bitops.h"
11 #include "qemu/crc32c.h"
12 #include "exec/exec-all.h"
13 #include "exec/cpu_ldst.h"
14 #include "arm_ldst.h"
15 #include <zlib.h> /* For crc32 */
16 #include "exec/semihost.h"
17 #include "sysemu/kvm.h"
18
19 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
20
21 #ifndef CONFIG_USER_ONLY
22 static bool get_phys_addr(CPUARMState *env, target_ulong address,
23 int access_type, ARMMMUIdx mmu_idx,
24 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
25 target_ulong *page_size, uint32_t *fsr,
26 ARMMMUFaultInfo *fi);
27
28 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
29 int access_type, ARMMMUIdx mmu_idx,
30 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
31 target_ulong *page_size_ptr, uint32_t *fsr,
32 ARMMMUFaultInfo *fi);
33
34 /* Definitions for the PMCCNTR and PMCR registers */
35 #define PMCRD 0x8
36 #define PMCRC 0x4
37 #define PMCRE 0x1
38 #endif
39
40 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
41 {
42 int nregs;
43
44 /* VFP data registers are always little-endian. */
45 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
46 if (reg < nregs) {
47 stfq_le_p(buf, env->vfp.regs[reg]);
48 return 8;
49 }
50 if (arm_feature(env, ARM_FEATURE_NEON)) {
51 /* Aliases for Q regs. */
52 nregs += 16;
53 if (reg < nregs) {
54 stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
55 stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
56 return 16;
57 }
58 }
59 switch (reg - nregs) {
60 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
61 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
62 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
63 }
64 return 0;
65 }
66
67 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
68 {
69 int nregs;
70
71 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
72 if (reg < nregs) {
73 env->vfp.regs[reg] = ldfq_le_p(buf);
74 return 8;
75 }
76 if (arm_feature(env, ARM_FEATURE_NEON)) {
77 nregs += 16;
78 if (reg < nregs) {
79 env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
80 env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
81 return 16;
82 }
83 }
84 switch (reg - nregs) {
85 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
86 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
87 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
88 }
89 return 0;
90 }
91
92 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
93 {
94 switch (reg) {
95 case 0 ... 31:
96 /* 128 bit FP register */
97 stfq_le_p(buf, env->vfp.regs[reg * 2]);
98 stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
99 return 16;
100 case 32:
101 /* FPSR */
102 stl_p(buf, vfp_get_fpsr(env));
103 return 4;
104 case 33:
105 /* FPCR */
106 stl_p(buf, vfp_get_fpcr(env));
107 return 4;
108 default:
109 return 0;
110 }
111 }
112
113 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
114 {
115 switch (reg) {
116 case 0 ... 31:
117 /* 128 bit FP register */
118 env->vfp.regs[reg * 2] = ldfq_le_p(buf);
119 env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
120 return 16;
121 case 32:
122 /* FPSR */
123 vfp_set_fpsr(env, ldl_p(buf));
124 return 4;
125 case 33:
126 /* FPCR */
127 vfp_set_fpcr(env, ldl_p(buf));
128 return 4;
129 default:
130 return 0;
131 }
132 }
133
134 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
135 {
136 assert(ri->fieldoffset);
137 if (cpreg_field_is_64bit(ri)) {
138 return CPREG_FIELD64(env, ri);
139 } else {
140 return CPREG_FIELD32(env, ri);
141 }
142 }
143
144 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
145 uint64_t value)
146 {
147 assert(ri->fieldoffset);
148 if (cpreg_field_is_64bit(ri)) {
149 CPREG_FIELD64(env, ri) = value;
150 } else {
151 CPREG_FIELD32(env, ri) = value;
152 }
153 }
154
155 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
156 {
157 return (char *)env + ri->fieldoffset;
158 }
159
160 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
161 {
162 /* Raw read of a coprocessor register (as needed for migration, etc). */
163 if (ri->type & ARM_CP_CONST) {
164 return ri->resetvalue;
165 } else if (ri->raw_readfn) {
166 return ri->raw_readfn(env, ri);
167 } else if (ri->readfn) {
168 return ri->readfn(env, ri);
169 } else {
170 return raw_read(env, ri);
171 }
172 }
173
174 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
175 uint64_t v)
176 {
177 /* Raw write of a coprocessor register (as needed for migration, etc).
178 * Note that constant registers are treated as write-ignored; the
179 * caller should check for success by whether a readback gives the
180 * value written.
181 */
182 if (ri->type & ARM_CP_CONST) {
183 return;
184 } else if (ri->raw_writefn) {
185 ri->raw_writefn(env, ri, v);
186 } else if (ri->writefn) {
187 ri->writefn(env, ri, v);
188 } else {
189 raw_write(env, ri, v);
190 }
191 }
192
193 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
194 {
195 /* Return true if the regdef would cause an assertion if you called
196 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
197 * program bug for it not to have the NO_RAW flag).
198 * NB that returning false here doesn't necessarily mean that calling
199 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
200 * read/write access functions which are safe for raw use" from "has
201 * read/write access functions which have side effects but has forgotten
202 * to provide raw access functions".
203 * The tests here line up with the conditions in read/write_raw_cp_reg()
204 * and assertions in raw_read()/raw_write().
205 */
206 if ((ri->type & ARM_CP_CONST) ||
207 ri->fieldoffset ||
208 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
209 return false;
210 }
211 return true;
212 }
213
214 bool write_cpustate_to_list(ARMCPU *cpu)
215 {
216 /* Write the coprocessor state from cpu->env to the (index,value) list. */
217 int i;
218 bool ok = true;
219
220 for (i = 0; i < cpu->cpreg_array_len; i++) {
221 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
222 const ARMCPRegInfo *ri;
223
224 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
225 if (!ri) {
226 ok = false;
227 continue;
228 }
229 if (ri->type & ARM_CP_NO_RAW) {
230 continue;
231 }
232 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
233 }
234 return ok;
235 }
236
237 bool write_list_to_cpustate(ARMCPU *cpu)
238 {
239 int i;
240 bool ok = true;
241
242 for (i = 0; i < cpu->cpreg_array_len; i++) {
243 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
244 uint64_t v = cpu->cpreg_values[i];
245 const ARMCPRegInfo *ri;
246
247 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
248 if (!ri) {
249 ok = false;
250 continue;
251 }
252 if (ri->type & ARM_CP_NO_RAW) {
253 continue;
254 }
255 /* Write value and confirm it reads back as written
256 * (to catch read-only registers and partially read-only
257 * registers where the incoming migration value doesn't match)
258 */
259 write_raw_cp_reg(&cpu->env, ri, v);
260 if (read_raw_cp_reg(&cpu->env, ri) != v) {
261 ok = false;
262 }
263 }
264 return ok;
265 }
266
267 static void add_cpreg_to_list(gpointer key, gpointer opaque)
268 {
269 ARMCPU *cpu = opaque;
270 uint64_t regidx;
271 const ARMCPRegInfo *ri;
272
273 regidx = *(uint32_t *)key;
274 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
275
276 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
277 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
278 /* The value array need not be initialized at this point */
279 cpu->cpreg_array_len++;
280 }
281 }
282
283 static void count_cpreg(gpointer key, gpointer opaque)
284 {
285 ARMCPU *cpu = opaque;
286 uint64_t regidx;
287 const ARMCPRegInfo *ri;
288
289 regidx = *(uint32_t *)key;
290 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
291
292 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
293 cpu->cpreg_array_len++;
294 }
295 }
296
297 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
298 {
299 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
300 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
301
302 if (aidx > bidx) {
303 return 1;
304 }
305 if (aidx < bidx) {
306 return -1;
307 }
308 return 0;
309 }
310
311 void init_cpreg_list(ARMCPU *cpu)
312 {
313 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
314 * Note that we require cpreg_tuples[] to be sorted by key ID.
315 */
316 GList *keys;
317 int arraylen;
318
319 keys = g_hash_table_get_keys(cpu->cp_regs);
320 keys = g_list_sort(keys, cpreg_key_compare);
321
322 cpu->cpreg_array_len = 0;
323
324 g_list_foreach(keys, count_cpreg, cpu);
325
326 arraylen = cpu->cpreg_array_len;
327 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
328 cpu->cpreg_values = g_new(uint64_t, arraylen);
329 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
330 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
331 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
332 cpu->cpreg_array_len = 0;
333
334 g_list_foreach(keys, add_cpreg_to_list, cpu);
335
336 assert(cpu->cpreg_array_len == arraylen);
337
338 g_list_free(keys);
339 }
340
341 /*
342 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
343 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
344 *
345 * access_el3_aa32ns: Used to check AArch32 register views.
346 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
347 */
348 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
349 const ARMCPRegInfo *ri,
350 bool isread)
351 {
352 bool secure = arm_is_secure_below_el3(env);
353
354 assert(!arm_el_is_aa64(env, 3));
355 if (secure) {
356 return CP_ACCESS_TRAP_UNCATEGORIZED;
357 }
358 return CP_ACCESS_OK;
359 }
360
361 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
362 const ARMCPRegInfo *ri,
363 bool isread)
364 {
365 if (!arm_el_is_aa64(env, 3)) {
366 return access_el3_aa32ns(env, ri, isread);
367 }
368 return CP_ACCESS_OK;
369 }
370
371 /* Some secure-only AArch32 registers trap to EL3 if used from
372 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
373 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
374 * We assume that the .access field is set to PL1_RW.
375 */
376 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
377 const ARMCPRegInfo *ri,
378 bool isread)
379 {
380 if (arm_current_el(env) == 3) {
381 return CP_ACCESS_OK;
382 }
383 if (arm_is_secure_below_el3(env)) {
384 return CP_ACCESS_TRAP_EL3;
385 }
386 /* This will be EL1 NS and EL2 NS, which just UNDEF */
387 return CP_ACCESS_TRAP_UNCATEGORIZED;
388 }
389
390 /* Check for traps to "powerdown debug" registers, which are controlled
391 * by MDCR.TDOSA
392 */
393 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
394 bool isread)
395 {
396 int el = arm_current_el(env);
397
398 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA)
399 && !arm_is_secure_below_el3(env)) {
400 return CP_ACCESS_TRAP_EL2;
401 }
402 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
403 return CP_ACCESS_TRAP_EL3;
404 }
405 return CP_ACCESS_OK;
406 }
407
408 /* Check for traps to "debug ROM" registers, which are controlled
409 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
410 */
411 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
412 bool isread)
413 {
414 int el = arm_current_el(env);
415
416 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA)
417 && !arm_is_secure_below_el3(env)) {
418 return CP_ACCESS_TRAP_EL2;
419 }
420 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
421 return CP_ACCESS_TRAP_EL3;
422 }
423 return CP_ACCESS_OK;
424 }
425
426 /* Check for traps to general debug registers, which are controlled
427 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
428 */
429 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
430 bool isread)
431 {
432 int el = arm_current_el(env);
433
434 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA)
435 && !arm_is_secure_below_el3(env)) {
436 return CP_ACCESS_TRAP_EL2;
437 }
438 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
439 return CP_ACCESS_TRAP_EL3;
440 }
441 return CP_ACCESS_OK;
442 }
443
444 /* Check for traps to performance monitor registers, which are controlled
445 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
446 */
447 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
448 bool isread)
449 {
450 int el = arm_current_el(env);
451
452 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
453 && !arm_is_secure_below_el3(env)) {
454 return CP_ACCESS_TRAP_EL2;
455 }
456 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
457 return CP_ACCESS_TRAP_EL3;
458 }
459 return CP_ACCESS_OK;
460 }
461
462 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
463 {
464 ARMCPU *cpu = arm_env_get_cpu(env);
465
466 raw_write(env, ri, value);
467 tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
468 }
469
470 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
471 {
472 ARMCPU *cpu = arm_env_get_cpu(env);
473
474 if (raw_read(env, ri) != value) {
475 /* Unlike real hardware the qemu TLB uses virtual addresses,
476 * not modified virtual addresses, so this causes a TLB flush.
477 */
478 tlb_flush(CPU(cpu), 1);
479 raw_write(env, ri, value);
480 }
481 }
482
483 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
484 uint64_t value)
485 {
486 ARMCPU *cpu = arm_env_get_cpu(env);
487
488 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU)
489 && !extended_addresses_enabled(env)) {
490 /* For VMSA (when not using the LPAE long descriptor page table
491 * format) this register includes the ASID, so do a TLB flush.
492 * For PMSA it is purely a process ID and no action is needed.
493 */
494 tlb_flush(CPU(cpu), 1);
495 }
496 raw_write(env, ri, value);
497 }
498
499 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
500 uint64_t value)
501 {
502 /* Invalidate all (TLBIALL) */
503 ARMCPU *cpu = arm_env_get_cpu(env);
504
505 tlb_flush(CPU(cpu), 1);
506 }
507
508 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
509 uint64_t value)
510 {
511 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
512 ARMCPU *cpu = arm_env_get_cpu(env);
513
514 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
515 }
516
517 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
518 uint64_t value)
519 {
520 /* Invalidate by ASID (TLBIASID) */
521 ARMCPU *cpu = arm_env_get_cpu(env);
522
523 tlb_flush(CPU(cpu), value == 0);
524 }
525
526 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
527 uint64_t value)
528 {
529 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
530 ARMCPU *cpu = arm_env_get_cpu(env);
531
532 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
533 }
534
535 /* IS variants of TLB operations must affect all cores */
536 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
537 uint64_t value)
538 {
539 CPUState *other_cs;
540
541 CPU_FOREACH(other_cs) {
542 tlb_flush(other_cs, 1);
543 }
544 }
545
546 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
547 uint64_t value)
548 {
549 CPUState *other_cs;
550
551 CPU_FOREACH(other_cs) {
552 tlb_flush(other_cs, value == 0);
553 }
554 }
555
556 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
557 uint64_t value)
558 {
559 CPUState *other_cs;
560
561 CPU_FOREACH(other_cs) {
562 tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
563 }
564 }
565
566 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
567 uint64_t value)
568 {
569 CPUState *other_cs;
570
571 CPU_FOREACH(other_cs) {
572 tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
573 }
574 }
575
576 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
577 uint64_t value)
578 {
579 CPUState *cs = ENV_GET_CPU(env);
580
581 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
582 ARMMMUIdx_S2NS, -1);
583 }
584
585 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
586 uint64_t value)
587 {
588 CPUState *other_cs;
589
590 CPU_FOREACH(other_cs) {
591 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
592 ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1);
593 }
594 }
595
596 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
597 uint64_t value)
598 {
599 /* Invalidate by IPA. This has to invalidate any structures that
600 * contain only stage 2 translation information, but does not need
601 * to apply to structures that contain combined stage 1 and stage 2
602 * translation information.
603 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
604 */
605 CPUState *cs = ENV_GET_CPU(env);
606 uint64_t pageaddr;
607
608 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
609 return;
610 }
611
612 pageaddr = sextract64(value << 12, 0, 40);
613
614 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1);
615 }
616
617 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
618 uint64_t value)
619 {
620 CPUState *other_cs;
621 uint64_t pageaddr;
622
623 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
624 return;
625 }
626
627 pageaddr = sextract64(value << 12, 0, 40);
628
629 CPU_FOREACH(other_cs) {
630 tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1);
631 }
632 }
633
634 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
635 uint64_t value)
636 {
637 CPUState *cs = ENV_GET_CPU(env);
638
639 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1);
640 }
641
642 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
643 uint64_t value)
644 {
645 CPUState *other_cs;
646
647 CPU_FOREACH(other_cs) {
648 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1);
649 }
650 }
651
652 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
653 uint64_t value)
654 {
655 CPUState *cs = ENV_GET_CPU(env);
656 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
657
658 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1);
659 }
660
661 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
662 uint64_t value)
663 {
664 CPUState *other_cs;
665 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
666
667 CPU_FOREACH(other_cs) {
668 tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1);
669 }
670 }
671
672 static const ARMCPRegInfo cp_reginfo[] = {
673 /* Define the secure and non-secure FCSE identifier CP registers
674 * separately because there is no secure bank in V8 (no _EL3). This allows
675 * the secure register to be properly reset and migrated. There is also no
676 * v8 EL1 version of the register so the non-secure instance stands alone.
677 */
678 { .name = "FCSEIDR(NS)",
679 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
680 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
681 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
682 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
683 { .name = "FCSEIDR(S)",
684 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
685 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
686 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
687 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
688 /* Define the secure and non-secure context identifier CP registers
689 * separately because there is no secure bank in V8 (no _EL3). This allows
690 * the secure register to be properly reset and migrated. In the
691 * non-secure case, the 32-bit register will have reset and migration
692 * disabled during registration as it is handled by the 64-bit instance.
693 */
694 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
695 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
696 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
697 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
698 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
699 { .name = "CONTEXTIDR(S)", .state = ARM_CP_STATE_AA32,
700 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
701 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
702 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
703 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
704 REGINFO_SENTINEL
705 };
706
707 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
708 /* NB: Some of these registers exist in v8 but with more precise
709 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
710 */
711 /* MMU Domain access control / MPU write buffer control */
712 { .name = "DACR",
713 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
714 .access = PL1_RW, .resetvalue = 0,
715 .writefn = dacr_write, .raw_writefn = raw_write,
716 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
717 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
718 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
719 * For v6 and v5, these mappings are overly broad.
720 */
721 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
722 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
723 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
724 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
725 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
726 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
727 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
728 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
729 /* Cache maintenance ops; some of this space may be overridden later. */
730 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
731 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
732 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
733 REGINFO_SENTINEL
734 };
735
736 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
737 /* Not all pre-v6 cores implemented this WFI, so this is slightly
738 * over-broad.
739 */
740 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
741 .access = PL1_W, .type = ARM_CP_WFI },
742 REGINFO_SENTINEL
743 };
744
745 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
746 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
747 * is UNPREDICTABLE; we choose to NOP as most implementations do).
748 */
749 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
750 .access = PL1_W, .type = ARM_CP_WFI },
751 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
752 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
753 * OMAPCP will override this space.
754 */
755 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
756 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
757 .resetvalue = 0 },
758 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
759 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
760 .resetvalue = 0 },
761 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
762 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
763 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
764 .resetvalue = 0 },
765 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
766 * implementing it as RAZ means the "debug architecture version" bits
767 * will read as a reserved value, which should cause Linux to not try
768 * to use the debug hardware.
769 */
770 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
771 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
772 /* MMU TLB control. Note that the wildcarding means we cover not just
773 * the unified TLB ops but also the dside/iside/inner-shareable variants.
774 */
775 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
776 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
777 .type = ARM_CP_NO_RAW },
778 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
779 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
780 .type = ARM_CP_NO_RAW },
781 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
782 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
783 .type = ARM_CP_NO_RAW },
784 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
785 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
786 .type = ARM_CP_NO_RAW },
787 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
788 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
789 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
790 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
791 REGINFO_SENTINEL
792 };
793
794 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
795 uint64_t value)
796 {
797 uint32_t mask = 0;
798
799 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
800 if (!arm_feature(env, ARM_FEATURE_V8)) {
801 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
802 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
803 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
804 */
805 if (arm_feature(env, ARM_FEATURE_VFP)) {
806 /* VFP coprocessor: cp10 & cp11 [23:20] */
807 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
808
809 if (!arm_feature(env, ARM_FEATURE_NEON)) {
810 /* ASEDIS [31] bit is RAO/WI */
811 value |= (1 << 31);
812 }
813
814 /* VFPv3 and upwards with NEON implement 32 double precision
815 * registers (D0-D31).
816 */
817 if (!arm_feature(env, ARM_FEATURE_NEON) ||
818 !arm_feature(env, ARM_FEATURE_VFP3)) {
819 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
820 value |= (1 << 30);
821 }
822 }
823 value &= mask;
824 }
825 env->cp15.cpacr_el1 = value;
826 }
827
828 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
829 bool isread)
830 {
831 if (arm_feature(env, ARM_FEATURE_V8)) {
832 /* Check if CPACR accesses are to be trapped to EL2 */
833 if (arm_current_el(env) == 1 &&
834 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
835 return CP_ACCESS_TRAP_EL2;
836 /* Check if CPACR accesses are to be trapped to EL3 */
837 } else if (arm_current_el(env) < 3 &&
838 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
839 return CP_ACCESS_TRAP_EL3;
840 }
841 }
842
843 return CP_ACCESS_OK;
844 }
845
846 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
847 bool isread)
848 {
849 /* Check if CPTR accesses are set to trap to EL3 */
850 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
851 return CP_ACCESS_TRAP_EL3;
852 }
853
854 return CP_ACCESS_OK;
855 }
856
857 static const ARMCPRegInfo v6_cp_reginfo[] = {
858 /* prefetch by MVA in v6, NOP in v7 */
859 { .name = "MVA_prefetch",
860 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
861 .access = PL1_W, .type = ARM_CP_NOP },
862 /* We need to break the TB after ISB to execute self-modifying code
863 * correctly and also to take any pending interrupts immediately.
864 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
865 */
866 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
867 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
868 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
869 .access = PL0_W, .type = ARM_CP_NOP },
870 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
871 .access = PL0_W, .type = ARM_CP_NOP },
872 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
873 .access = PL1_RW,
874 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
875 offsetof(CPUARMState, cp15.ifar_ns) },
876 .resetvalue = 0, },
877 /* Watchpoint Fault Address Register : should actually only be present
878 * for 1136, 1176, 11MPCore.
879 */
880 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
881 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
882 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
883 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
884 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
885 .resetvalue = 0, .writefn = cpacr_write },
886 REGINFO_SENTINEL
887 };
888
889 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
890 bool isread)
891 {
892 /* Performance monitor registers user accessibility is controlled
893 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
894 * trapping to EL2 or EL3 for other accesses.
895 */
896 int el = arm_current_el(env);
897
898 if (el == 0 && !env->cp15.c9_pmuserenr) {
899 return CP_ACCESS_TRAP;
900 }
901 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
902 && !arm_is_secure_below_el3(env)) {
903 return CP_ACCESS_TRAP_EL2;
904 }
905 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
906 return CP_ACCESS_TRAP_EL3;
907 }
908
909 return CP_ACCESS_OK;
910 }
911
912 #ifndef CONFIG_USER_ONLY
913
914 static inline bool arm_ccnt_enabled(CPUARMState *env)
915 {
916 /* This does not support checking PMCCFILTR_EL0 register */
917
918 if (!(env->cp15.c9_pmcr & PMCRE)) {
919 return false;
920 }
921
922 return true;
923 }
924
925 void pmccntr_sync(CPUARMState *env)
926 {
927 uint64_t temp_ticks;
928
929 temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
930 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
931
932 if (env->cp15.c9_pmcr & PMCRD) {
933 /* Increment once every 64 processor clock cycles */
934 temp_ticks /= 64;
935 }
936
937 if (arm_ccnt_enabled(env)) {
938 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
939 }
940 }
941
942 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
943 uint64_t value)
944 {
945 pmccntr_sync(env);
946
947 if (value & PMCRC) {
948 /* The counter has been reset */
949 env->cp15.c15_ccnt = 0;
950 }
951
952 /* only the DP, X, D and E bits are writable */
953 env->cp15.c9_pmcr &= ~0x39;
954 env->cp15.c9_pmcr |= (value & 0x39);
955
956 pmccntr_sync(env);
957 }
958
959 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
960 {
961 uint64_t total_ticks;
962
963 if (!arm_ccnt_enabled(env)) {
964 /* Counter is disabled, do not change value */
965 return env->cp15.c15_ccnt;
966 }
967
968 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
969 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
970
971 if (env->cp15.c9_pmcr & PMCRD) {
972 /* Increment once every 64 processor clock cycles */
973 total_ticks /= 64;
974 }
975 return total_ticks - env->cp15.c15_ccnt;
976 }
977
978 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
979 uint64_t value)
980 {
981 uint64_t total_ticks;
982
983 if (!arm_ccnt_enabled(env)) {
984 /* Counter is disabled, set the absolute value */
985 env->cp15.c15_ccnt = value;
986 return;
987 }
988
989 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
990 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
991
992 if (env->cp15.c9_pmcr & PMCRD) {
993 /* Increment once every 64 processor clock cycles */
994 total_ticks /= 64;
995 }
996 env->cp15.c15_ccnt = total_ticks - value;
997 }
998
999 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1000 uint64_t value)
1001 {
1002 uint64_t cur_val = pmccntr_read(env, NULL);
1003
1004 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1005 }
1006
1007 #else /* CONFIG_USER_ONLY */
1008
1009 void pmccntr_sync(CPUARMState *env)
1010 {
1011 }
1012
1013 #endif
1014
1015 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1016 uint64_t value)
1017 {
1018 pmccntr_sync(env);
1019 env->cp15.pmccfiltr_el0 = value & 0x7E000000;
1020 pmccntr_sync(env);
1021 }
1022
1023 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1024 uint64_t value)
1025 {
1026 value &= (1 << 31);
1027 env->cp15.c9_pmcnten |= value;
1028 }
1029
1030 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1031 uint64_t value)
1032 {
1033 value &= (1 << 31);
1034 env->cp15.c9_pmcnten &= ~value;
1035 }
1036
1037 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1038 uint64_t value)
1039 {
1040 env->cp15.c9_pmovsr &= ~value;
1041 }
1042
1043 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1044 uint64_t value)
1045 {
1046 env->cp15.c9_pmxevtyper = value & 0xff;
1047 }
1048
1049 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1050 uint64_t value)
1051 {
1052 env->cp15.c9_pmuserenr = value & 1;
1053 }
1054
1055 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1056 uint64_t value)
1057 {
1058 /* We have no event counters so only the C bit can be changed */
1059 value &= (1 << 31);
1060 env->cp15.c9_pminten |= value;
1061 }
1062
1063 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1064 uint64_t value)
1065 {
1066 value &= (1 << 31);
1067 env->cp15.c9_pminten &= ~value;
1068 }
1069
1070 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1071 uint64_t value)
1072 {
1073 /* Note that even though the AArch64 view of this register has bits
1074 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1075 * architectural requirements for bits which are RES0 only in some
1076 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1077 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1078 */
1079 raw_write(env, ri, value & ~0x1FULL);
1080 }
1081
1082 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1083 {
1084 /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1085 * For bits that vary between AArch32/64, code needs to check the
1086 * current execution mode before directly using the feature bit.
1087 */
1088 uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
1089
1090 if (!arm_feature(env, ARM_FEATURE_EL2)) {
1091 valid_mask &= ~SCR_HCE;
1092
1093 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1094 * supported if EL2 exists. The bit is UNK/SBZP when
1095 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1096 * when EL2 is unavailable.
1097 * On ARMv8, this bit is always available.
1098 */
1099 if (arm_feature(env, ARM_FEATURE_V7) &&
1100 !arm_feature(env, ARM_FEATURE_V8)) {
1101 valid_mask &= ~SCR_SMD;
1102 }
1103 }
1104
1105 /* Clear all-context RES0 bits. */
1106 value &= valid_mask;
1107 raw_write(env, ri, value);
1108 }
1109
1110 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1111 {
1112 ARMCPU *cpu = arm_env_get_cpu(env);
1113
1114 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1115 * bank
1116 */
1117 uint32_t index = A32_BANKED_REG_GET(env, csselr,
1118 ri->secure & ARM_CP_SECSTATE_S);
1119
1120 return cpu->ccsidr[index];
1121 }
1122
1123 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1124 uint64_t value)
1125 {
1126 raw_write(env, ri, value & 0xf);
1127 }
1128
1129 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1130 {
1131 CPUState *cs = ENV_GET_CPU(env);
1132 uint64_t ret = 0;
1133
1134 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1135 ret |= CPSR_I;
1136 }
1137 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1138 ret |= CPSR_F;
1139 }
1140 /* External aborts are not possible in QEMU so A bit is always clear */
1141 return ret;
1142 }
1143
1144 static const ARMCPRegInfo v7_cp_reginfo[] = {
1145 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1146 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1147 .access = PL1_W, .type = ARM_CP_NOP },
1148 /* Performance monitors are implementation defined in v7,
1149 * but with an ARM recommended set of registers, which we
1150 * follow (although we don't actually implement any counters)
1151 *
1152 * Performance registers fall into three categories:
1153 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1154 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1155 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1156 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1157 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1158 */
1159 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1160 .access = PL0_RW, .type = ARM_CP_ALIAS,
1161 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1162 .writefn = pmcntenset_write,
1163 .accessfn = pmreg_access,
1164 .raw_writefn = raw_write },
1165 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1166 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1167 .access = PL0_RW, .accessfn = pmreg_access,
1168 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1169 .writefn = pmcntenset_write, .raw_writefn = raw_write },
1170 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1171 .access = PL0_RW,
1172 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1173 .accessfn = pmreg_access,
1174 .writefn = pmcntenclr_write,
1175 .type = ARM_CP_ALIAS },
1176 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1177 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1178 .access = PL0_RW, .accessfn = pmreg_access,
1179 .type = ARM_CP_ALIAS,
1180 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1181 .writefn = pmcntenclr_write },
1182 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1183 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1184 .accessfn = pmreg_access,
1185 .writefn = pmovsr_write,
1186 .raw_writefn = raw_write },
1187 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1188 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1189 .access = PL0_RW, .accessfn = pmreg_access,
1190 .type = ARM_CP_ALIAS,
1191 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1192 .writefn = pmovsr_write,
1193 .raw_writefn = raw_write },
1194 /* Unimplemented so WI. */
1195 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1196 .access = PL0_W, .accessfn = pmreg_access, .type = ARM_CP_NOP },
1197 /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
1198 * We choose to RAZ/WI.
1199 */
1200 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1201 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1202 .accessfn = pmreg_access },
1203 #ifndef CONFIG_USER_ONLY
1204 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1205 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
1206 .readfn = pmccntr_read, .writefn = pmccntr_write32,
1207 .accessfn = pmreg_access },
1208 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1209 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1210 .access = PL0_RW, .accessfn = pmreg_access,
1211 .type = ARM_CP_IO,
1212 .readfn = pmccntr_read, .writefn = pmccntr_write, },
1213 #endif
1214 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1215 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1216 .writefn = pmccfiltr_write,
1217 .access = PL0_RW, .accessfn = pmreg_access,
1218 .type = ARM_CP_IO,
1219 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1220 .resetvalue = 0, },
1221 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1222 .access = PL0_RW,
1223 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
1224 .accessfn = pmreg_access, .writefn = pmxevtyper_write,
1225 .raw_writefn = raw_write },
1226 /* Unimplemented, RAZ/WI. */
1227 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1228 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1229 .accessfn = pmreg_access },
1230 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1231 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1232 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1233 .resetvalue = 0,
1234 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1235 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1236 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1237 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1238 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1239 .resetvalue = 0,
1240 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1241 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1242 .access = PL1_RW, .accessfn = access_tpm,
1243 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1244 .resetvalue = 0,
1245 .writefn = pmintenset_write, .raw_writefn = raw_write },
1246 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1247 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1248 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1249 .writefn = pmintenclr_write, },
1250 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1251 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1252 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1253 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1254 .writefn = pmintenclr_write },
1255 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1256 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
1257 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
1258 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1259 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1260 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1261 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1262 offsetof(CPUARMState, cp15.csselr_ns) } },
1263 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1264 * just RAZ for all cores:
1265 */
1266 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1267 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1268 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1269 /* Auxiliary fault status registers: these also are IMPDEF, and we
1270 * choose to RAZ/WI for all cores.
1271 */
1272 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1273 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1274 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1275 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1276 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1277 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1278 /* MAIR can just read-as-written because we don't implement caches
1279 * and so don't need to care about memory attributes.
1280 */
1281 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1282 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1283 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1284 .resetvalue = 0 },
1285 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
1286 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1287 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1288 .resetvalue = 0 },
1289 /* For non-long-descriptor page tables these are PRRR and NMRR;
1290 * regardless they still act as reads-as-written for QEMU.
1291 */
1292 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1293 * allows them to assign the correct fieldoffset based on the endianness
1294 * handled in the field definitions.
1295 */
1296 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1297 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1298 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1299 offsetof(CPUARMState, cp15.mair0_ns) },
1300 .resetfn = arm_cp_reset_ignore },
1301 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1302 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1303 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1304 offsetof(CPUARMState, cp15.mair1_ns) },
1305 .resetfn = arm_cp_reset_ignore },
1306 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1307 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1308 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1309 /* 32 bit ITLB invalidates */
1310 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1311 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1312 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1313 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1314 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1315 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1316 /* 32 bit DTLB invalidates */
1317 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1318 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1319 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1320 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1321 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1322 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1323 /* 32 bit TLB invalidates */
1324 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1325 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1326 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1327 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1328 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1329 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1330 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1331 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1332 REGINFO_SENTINEL
1333 };
1334
1335 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1336 /* 32 bit TLB invalidates, Inner Shareable */
1337 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1338 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1339 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1340 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1341 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1342 .type = ARM_CP_NO_RAW, .access = PL1_W,
1343 .writefn = tlbiasid_is_write },
1344 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1345 .type = ARM_CP_NO_RAW, .access = PL1_W,
1346 .writefn = tlbimvaa_is_write },
1347 REGINFO_SENTINEL
1348 };
1349
1350 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1351 uint64_t value)
1352 {
1353 value &= 1;
1354 env->teecr = value;
1355 }
1356
1357 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1358 bool isread)
1359 {
1360 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1361 return CP_ACCESS_TRAP;
1362 }
1363 return CP_ACCESS_OK;
1364 }
1365
1366 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1367 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1368 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1369 .resetvalue = 0,
1370 .writefn = teecr_write },
1371 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1372 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1373 .accessfn = teehbr_access, .resetvalue = 0 },
1374 REGINFO_SENTINEL
1375 };
1376
1377 static const ARMCPRegInfo v6k_cp_reginfo[] = {
1378 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1379 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1380 .access = PL0_RW,
1381 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1382 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1383 .access = PL0_RW,
1384 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1385 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1386 .resetfn = arm_cp_reset_ignore },
1387 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1388 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1389 .access = PL0_R|PL1_W,
1390 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1391 .resetvalue = 0},
1392 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1393 .access = PL0_R|PL1_W,
1394 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1395 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1396 .resetfn = arm_cp_reset_ignore },
1397 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1398 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1399 .access = PL1_RW,
1400 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1401 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1402 .access = PL1_RW,
1403 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1404 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1405 .resetvalue = 0 },
1406 REGINFO_SENTINEL
1407 };
1408
1409 #ifndef CONFIG_USER_ONLY
1410
1411 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1412 bool isread)
1413 {
1414 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1415 * Writable only at the highest implemented exception level.
1416 */
1417 int el = arm_current_el(env);
1418
1419 switch (el) {
1420 case 0:
1421 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
1422 return CP_ACCESS_TRAP;
1423 }
1424 break;
1425 case 1:
1426 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1427 arm_is_secure_below_el3(env)) {
1428 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1429 return CP_ACCESS_TRAP_UNCATEGORIZED;
1430 }
1431 break;
1432 case 2:
1433 case 3:
1434 break;
1435 }
1436
1437 if (!isread && el < arm_highest_el(env)) {
1438 return CP_ACCESS_TRAP_UNCATEGORIZED;
1439 }
1440
1441 return CP_ACCESS_OK;
1442 }
1443
1444 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1445 bool isread)
1446 {
1447 unsigned int cur_el = arm_current_el(env);
1448 bool secure = arm_is_secure(env);
1449
1450 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1451 if (cur_el == 0 &&
1452 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1453 return CP_ACCESS_TRAP;
1454 }
1455
1456 if (arm_feature(env, ARM_FEATURE_EL2) &&
1457 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1458 !extract32(env->cp15.cnthctl_el2, 0, 1)) {
1459 return CP_ACCESS_TRAP_EL2;
1460 }
1461 return CP_ACCESS_OK;
1462 }
1463
1464 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1465 bool isread)
1466 {
1467 unsigned int cur_el = arm_current_el(env);
1468 bool secure = arm_is_secure(env);
1469
1470 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1471 * EL0[PV]TEN is zero.
1472 */
1473 if (cur_el == 0 &&
1474 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1475 return CP_ACCESS_TRAP;
1476 }
1477
1478 if (arm_feature(env, ARM_FEATURE_EL2) &&
1479 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1480 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
1481 return CP_ACCESS_TRAP_EL2;
1482 }
1483 return CP_ACCESS_OK;
1484 }
1485
1486 static CPAccessResult gt_pct_access(CPUARMState *env,
1487 const ARMCPRegInfo *ri,
1488 bool isread)
1489 {
1490 return gt_counter_access(env, GTIMER_PHYS, isread);
1491 }
1492
1493 static CPAccessResult gt_vct_access(CPUARMState *env,
1494 const ARMCPRegInfo *ri,
1495 bool isread)
1496 {
1497 return gt_counter_access(env, GTIMER_VIRT, isread);
1498 }
1499
1500 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1501 bool isread)
1502 {
1503 return gt_timer_access(env, GTIMER_PHYS, isread);
1504 }
1505
1506 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1507 bool isread)
1508 {
1509 return gt_timer_access(env, GTIMER_VIRT, isread);
1510 }
1511
1512 static CPAccessResult gt_stimer_access(CPUARMState *env,
1513 const ARMCPRegInfo *ri,
1514 bool isread)
1515 {
1516 /* The AArch64 register view of the secure physical timer is
1517 * always accessible from EL3, and configurably accessible from
1518 * Secure EL1.
1519 */
1520 switch (arm_current_el(env)) {
1521 case 1:
1522 if (!arm_is_secure(env)) {
1523 return CP_ACCESS_TRAP;
1524 }
1525 if (!(env->cp15.scr_el3 & SCR_ST)) {
1526 return CP_ACCESS_TRAP_EL3;
1527 }
1528 return CP_ACCESS_OK;
1529 case 0:
1530 case 2:
1531 return CP_ACCESS_TRAP;
1532 case 3:
1533 return CP_ACCESS_OK;
1534 default:
1535 g_assert_not_reached();
1536 }
1537 }
1538
1539 static uint64_t gt_get_countervalue(CPUARMState *env)
1540 {
1541 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1542 }
1543
1544 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1545 {
1546 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1547
1548 if (gt->ctl & 1) {
1549 /* Timer enabled: calculate and set current ISTATUS, irq, and
1550 * reset timer to when ISTATUS next has to change
1551 */
1552 uint64_t offset = timeridx == GTIMER_VIRT ?
1553 cpu->env.cp15.cntvoff_el2 : 0;
1554 uint64_t count = gt_get_countervalue(&cpu->env);
1555 /* Note that this must be unsigned 64 bit arithmetic: */
1556 int istatus = count - offset >= gt->cval;
1557 uint64_t nexttick;
1558 int irqstate;
1559
1560 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1561
1562 irqstate = (istatus && !(gt->ctl & 2));
1563 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1564
1565 if (istatus) {
1566 /* Next transition is when count rolls back over to zero */
1567 nexttick = UINT64_MAX;
1568 } else {
1569 /* Next transition is when we hit cval */
1570 nexttick = gt->cval + offset;
1571 }
1572 /* Note that the desired next expiry time might be beyond the
1573 * signed-64-bit range of a QEMUTimer -- in this case we just
1574 * set the timer for as far in the future as possible. When the
1575 * timer expires we will reset the timer for any remaining period.
1576 */
1577 if (nexttick > INT64_MAX / GTIMER_SCALE) {
1578 nexttick = INT64_MAX / GTIMER_SCALE;
1579 }
1580 timer_mod(cpu->gt_timer[timeridx], nexttick);
1581 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
1582 } else {
1583 /* Timer disabled: ISTATUS and timer output always clear */
1584 gt->ctl &= ~4;
1585 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1586 timer_del(cpu->gt_timer[timeridx]);
1587 trace_arm_gt_recalc_disabled(timeridx);
1588 }
1589 }
1590
1591 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1592 int timeridx)
1593 {
1594 ARMCPU *cpu = arm_env_get_cpu(env);
1595
1596 timer_del(cpu->gt_timer[timeridx]);
1597 }
1598
1599 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1600 {
1601 return gt_get_countervalue(env);
1602 }
1603
1604 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1605 {
1606 return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
1607 }
1608
1609 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1610 int timeridx,
1611 uint64_t value)
1612 {
1613 trace_arm_gt_cval_write(timeridx, value);
1614 env->cp15.c14_timer[timeridx].cval = value;
1615 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1616 }
1617
1618 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1619 int timeridx)
1620 {
1621 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1622
1623 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1624 (gt_get_countervalue(env) - offset));
1625 }
1626
1627 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1628 int timeridx,
1629 uint64_t value)
1630 {
1631 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1632
1633 trace_arm_gt_tval_write(timeridx, value);
1634 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1635 sextract64(value, 0, 32);
1636 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1637 }
1638
1639 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1640 int timeridx,
1641 uint64_t value)
1642 {
1643 ARMCPU *cpu = arm_env_get_cpu(env);
1644 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1645
1646 trace_arm_gt_ctl_write(timeridx, value);
1647 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1648 if ((oldval ^ value) & 1) {
1649 /* Enable toggled */
1650 gt_recalc_timer(cpu, timeridx);
1651 } else if ((oldval ^ value) & 2) {
1652 /* IMASK toggled: don't need to recalculate,
1653 * just set the interrupt line based on ISTATUS
1654 */
1655 int irqstate = (oldval & 4) && !(value & 2);
1656
1657 trace_arm_gt_imask_toggle(timeridx, irqstate);
1658 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1659 }
1660 }
1661
1662 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1663 {
1664 gt_timer_reset(env, ri, GTIMER_PHYS);
1665 }
1666
1667 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1668 uint64_t value)
1669 {
1670 gt_cval_write(env, ri, GTIMER_PHYS, value);
1671 }
1672
1673 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1674 {
1675 return gt_tval_read(env, ri, GTIMER_PHYS);
1676 }
1677
1678 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1679 uint64_t value)
1680 {
1681 gt_tval_write(env, ri, GTIMER_PHYS, value);
1682 }
1683
1684 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1685 uint64_t value)
1686 {
1687 gt_ctl_write(env, ri, GTIMER_PHYS, value);
1688 }
1689
1690 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1691 {
1692 gt_timer_reset(env, ri, GTIMER_VIRT);
1693 }
1694
1695 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1696 uint64_t value)
1697 {
1698 gt_cval_write(env, ri, GTIMER_VIRT, value);
1699 }
1700
1701 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1702 {
1703 return gt_tval_read(env, ri, GTIMER_VIRT);
1704 }
1705
1706 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1707 uint64_t value)
1708 {
1709 gt_tval_write(env, ri, GTIMER_VIRT, value);
1710 }
1711
1712 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1713 uint64_t value)
1714 {
1715 gt_ctl_write(env, ri, GTIMER_VIRT, value);
1716 }
1717
1718 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1719 uint64_t value)
1720 {
1721 ARMCPU *cpu = arm_env_get_cpu(env);
1722
1723 trace_arm_gt_cntvoff_write(value);
1724 raw_write(env, ri, value);
1725 gt_recalc_timer(cpu, GTIMER_VIRT);
1726 }
1727
1728 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1729 {
1730 gt_timer_reset(env, ri, GTIMER_HYP);
1731 }
1732
1733 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1734 uint64_t value)
1735 {
1736 gt_cval_write(env, ri, GTIMER_HYP, value);
1737 }
1738
1739 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1740 {
1741 return gt_tval_read(env, ri, GTIMER_HYP);
1742 }
1743
1744 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1745 uint64_t value)
1746 {
1747 gt_tval_write(env, ri, GTIMER_HYP, value);
1748 }
1749
1750 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1751 uint64_t value)
1752 {
1753 gt_ctl_write(env, ri, GTIMER_HYP, value);
1754 }
1755
1756 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1757 {
1758 gt_timer_reset(env, ri, GTIMER_SEC);
1759 }
1760
1761 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1762 uint64_t value)
1763 {
1764 gt_cval_write(env, ri, GTIMER_SEC, value);
1765 }
1766
1767 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1768 {
1769 return gt_tval_read(env, ri, GTIMER_SEC);
1770 }
1771
1772 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1773 uint64_t value)
1774 {
1775 gt_tval_write(env, ri, GTIMER_SEC, value);
1776 }
1777
1778 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1779 uint64_t value)
1780 {
1781 gt_ctl_write(env, ri, GTIMER_SEC, value);
1782 }
1783
1784 void arm_gt_ptimer_cb(void *opaque)
1785 {
1786 ARMCPU *cpu = opaque;
1787
1788 gt_recalc_timer(cpu, GTIMER_PHYS);
1789 }
1790
1791 void arm_gt_vtimer_cb(void *opaque)
1792 {
1793 ARMCPU *cpu = opaque;
1794
1795 gt_recalc_timer(cpu, GTIMER_VIRT);
1796 }
1797
1798 void arm_gt_htimer_cb(void *opaque)
1799 {
1800 ARMCPU *cpu = opaque;
1801
1802 gt_recalc_timer(cpu, GTIMER_HYP);
1803 }
1804
1805 void arm_gt_stimer_cb(void *opaque)
1806 {
1807 ARMCPU *cpu = opaque;
1808
1809 gt_recalc_timer(cpu, GTIMER_SEC);
1810 }
1811
1812 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1813 /* Note that CNTFRQ is purely reads-as-written for the benefit
1814 * of software; writing it doesn't actually change the timer frequency.
1815 * Our reset value matches the fixed frequency we implement the timer at.
1816 */
1817 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1818 .type = ARM_CP_ALIAS,
1819 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1820 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1821 },
1822 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1823 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1824 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1825 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1826 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
1827 },
1828 /* overall control: mostly access permissions */
1829 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
1830 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
1831 .access = PL1_RW,
1832 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
1833 .resetvalue = 0,
1834 },
1835 /* per-timer control */
1836 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1837 .secure = ARM_CP_SECSTATE_NS,
1838 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1839 .accessfn = gt_ptimer_access,
1840 .fieldoffset = offsetoflow32(CPUARMState,
1841 cp15.c14_timer[GTIMER_PHYS].ctl),
1842 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1843 },
1844 { .name = "CNTP_CTL(S)",
1845 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1846 .secure = ARM_CP_SECSTATE_S,
1847 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1848 .accessfn = gt_ptimer_access,
1849 .fieldoffset = offsetoflow32(CPUARMState,
1850 cp15.c14_timer[GTIMER_SEC].ctl),
1851 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
1852 },
1853 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
1854 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
1855 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1856 .accessfn = gt_ptimer_access,
1857 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
1858 .resetvalue = 0,
1859 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1860 },
1861 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
1862 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1863 .accessfn = gt_vtimer_access,
1864 .fieldoffset = offsetoflow32(CPUARMState,
1865 cp15.c14_timer[GTIMER_VIRT].ctl),
1866 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1867 },
1868 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
1869 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
1870 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1871 .accessfn = gt_vtimer_access,
1872 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
1873 .resetvalue = 0,
1874 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1875 },
1876 /* TimerValue views: a 32 bit downcounting view of the underlying state */
1877 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1878 .secure = ARM_CP_SECSTATE_NS,
1879 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1880 .accessfn = gt_ptimer_access,
1881 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1882 },
1883 { .name = "CNTP_TVAL(S)",
1884 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1885 .secure = ARM_CP_SECSTATE_S,
1886 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1887 .accessfn = gt_ptimer_access,
1888 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
1889 },
1890 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1891 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
1892 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1893 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
1894 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1895 },
1896 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
1897 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1898 .accessfn = gt_vtimer_access,
1899 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
1900 },
1901 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1902 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
1903 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1904 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
1905 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
1906 },
1907 /* The counter itself */
1908 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
1909 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1910 .accessfn = gt_pct_access,
1911 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
1912 },
1913 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
1914 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
1915 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1916 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
1917 },
1918 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
1919 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1920 .accessfn = gt_vct_access,
1921 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
1922 },
1923 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
1924 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
1925 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1926 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
1927 },
1928 /* Comparison value, indicating when the timer goes off */
1929 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
1930 .secure = ARM_CP_SECSTATE_NS,
1931 .access = PL1_RW | PL0_R,
1932 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1933 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1934 .accessfn = gt_ptimer_access,
1935 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
1936 },
1937 { .name = "CNTP_CVAL(S)", .cp = 15, .crm = 14, .opc1 = 2,
1938 .secure = ARM_CP_SECSTATE_S,
1939 .access = PL1_RW | PL0_R,
1940 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1941 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
1942 .accessfn = gt_ptimer_access,
1943 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
1944 },
1945 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1946 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
1947 .access = PL1_RW | PL0_R,
1948 .type = ARM_CP_IO,
1949 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1950 .resetvalue = 0, .accessfn = gt_ptimer_access,
1951 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
1952 },
1953 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
1954 .access = PL1_RW | PL0_R,
1955 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1956 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1957 .accessfn = gt_vtimer_access,
1958 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
1959 },
1960 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1961 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
1962 .access = PL1_RW | PL0_R,
1963 .type = ARM_CP_IO,
1964 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1965 .resetvalue = 0, .accessfn = gt_vtimer_access,
1966 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
1967 },
1968 /* Secure timer -- this is actually restricted to only EL3
1969 * and configurably Secure-EL1 via the accessfn.
1970 */
1971 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
1972 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
1973 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
1974 .accessfn = gt_stimer_access,
1975 .readfn = gt_sec_tval_read,
1976 .writefn = gt_sec_tval_write,
1977 .resetfn = gt_sec_timer_reset,
1978 },
1979 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
1980 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
1981 .type = ARM_CP_IO, .access = PL1_RW,
1982 .accessfn = gt_stimer_access,
1983 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
1984 .resetvalue = 0,
1985 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
1986 },
1987 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
1988 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
1989 .type = ARM_CP_IO, .access = PL1_RW,
1990 .accessfn = gt_stimer_access,
1991 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
1992 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
1993 },
1994 REGINFO_SENTINEL
1995 };
1996
1997 #else
1998 /* In user-mode none of the generic timer registers are accessible,
1999 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
2000 * so instead just don't register any of them.
2001 */
2002 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2003 REGINFO_SENTINEL
2004 };
2005
2006 #endif
2007
2008 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2009 {
2010 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2011 raw_write(env, ri, value);
2012 } else if (arm_feature(env, ARM_FEATURE_V7)) {
2013 raw_write(env, ri, value & 0xfffff6ff);
2014 } else {
2015 raw_write(env, ri, value & 0xfffff1ff);
2016 }
2017 }
2018
2019 #ifndef CONFIG_USER_ONLY
2020 /* get_phys_addr() isn't present for user-mode-only targets */
2021
2022 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2023 bool isread)
2024 {
2025 if (ri->opc2 & 4) {
2026 /* The ATS12NSO* operations must trap to EL3 if executed in
2027 * Secure EL1 (which can only happen if EL3 is AArch64).
2028 * They are simply UNDEF if executed from NS EL1.
2029 * They function normally from EL2 or EL3.
2030 */
2031 if (arm_current_el(env) == 1) {
2032 if (arm_is_secure_below_el3(env)) {
2033 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2034 }
2035 return CP_ACCESS_TRAP_UNCATEGORIZED;
2036 }
2037 }
2038 return CP_ACCESS_OK;
2039 }
2040
2041 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2042 int access_type, ARMMMUIdx mmu_idx)
2043 {
2044 hwaddr phys_addr;
2045 target_ulong page_size;
2046 int prot;
2047 uint32_t fsr;
2048 bool ret;
2049 uint64_t par64;
2050 MemTxAttrs attrs = {};
2051 ARMMMUFaultInfo fi = {};
2052
2053 ret = get_phys_addr(env, value, access_type, mmu_idx,
2054 &phys_addr, &attrs, &prot, &page_size, &fsr, &fi);
2055 if (extended_addresses_enabled(env)) {
2056 /* fsr is a DFSR/IFSR value for the long descriptor
2057 * translation table format, but with WnR always clear.
2058 * Convert it to a 64-bit PAR.
2059 */
2060 par64 = (1 << 11); /* LPAE bit always set */
2061 if (!ret) {
2062 par64 |= phys_addr & ~0xfffULL;
2063 if (!attrs.secure) {
2064 par64 |= (1 << 9); /* NS */
2065 }
2066 /* We don't set the ATTR or SH fields in the PAR. */
2067 } else {
2068 par64 |= 1; /* F */
2069 par64 |= (fsr & 0x3f) << 1; /* FS */
2070 /* Note that S2WLK and FSTAGE are always zero, because we don't
2071 * implement virtualization and therefore there can't be a stage 2
2072 * fault.
2073 */
2074 }
2075 } else {
2076 /* fsr is a DFSR/IFSR value for the short descriptor
2077 * translation table format (with WnR always clear).
2078 * Convert it to a 32-bit PAR.
2079 */
2080 if (!ret) {
2081 /* We do not set any attribute bits in the PAR */
2082 if (page_size == (1 << 24)
2083 && arm_feature(env, ARM_FEATURE_V7)) {
2084 par64 = (phys_addr & 0xff000000) | (1 << 1);
2085 } else {
2086 par64 = phys_addr & 0xfffff000;
2087 }
2088 if (!attrs.secure) {
2089 par64 |= (1 << 9); /* NS */
2090 }
2091 } else {
2092 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
2093 ((fsr & 0xf) << 1) | 1;
2094 }
2095 }
2096 return par64;
2097 }
2098
2099 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2100 {
2101 int access_type = ri->opc2 & 1;
2102 uint64_t par64;
2103 ARMMMUIdx mmu_idx;
2104 int el = arm_current_el(env);
2105 bool secure = arm_is_secure_below_el3(env);
2106
2107 switch (ri->opc2 & 6) {
2108 case 0:
2109 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2110 switch (el) {
2111 case 3:
2112 mmu_idx = ARMMMUIdx_S1E3;
2113 break;
2114 case 2:
2115 mmu_idx = ARMMMUIdx_S1NSE1;
2116 break;
2117 case 1:
2118 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2119 break;
2120 default:
2121 g_assert_not_reached();
2122 }
2123 break;
2124 case 2:
2125 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2126 switch (el) {
2127 case 3:
2128 mmu_idx = ARMMMUIdx_S1SE0;
2129 break;
2130 case 2:
2131 mmu_idx = ARMMMUIdx_S1NSE0;
2132 break;
2133 case 1:
2134 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2135 break;
2136 default:
2137 g_assert_not_reached();
2138 }
2139 break;
2140 case 4:
2141 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2142 mmu_idx = ARMMMUIdx_S12NSE1;
2143 break;
2144 case 6:
2145 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2146 mmu_idx = ARMMMUIdx_S12NSE0;
2147 break;
2148 default:
2149 g_assert_not_reached();
2150 }
2151
2152 par64 = do_ats_write(env, value, access_type, mmu_idx);
2153
2154 A32_BANKED_CURRENT_REG_SET(env, par, par64);
2155 }
2156
2157 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
2158 uint64_t value)
2159 {
2160 int access_type = ri->opc2 & 1;
2161 uint64_t par64;
2162
2163 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
2164
2165 A32_BANKED_CURRENT_REG_SET(env, par, par64);
2166 }
2167
2168 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
2169 bool isread)
2170 {
2171 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
2172 return CP_ACCESS_TRAP;
2173 }
2174 return CP_ACCESS_OK;
2175 }
2176
2177 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
2178 uint64_t value)
2179 {
2180 int access_type = ri->opc2 & 1;
2181 ARMMMUIdx mmu_idx;
2182 int secure = arm_is_secure_below_el3(env);
2183
2184 switch (ri->opc2 & 6) {
2185 case 0:
2186 switch (ri->opc1) {
2187 case 0: /* AT S1E1R, AT S1E1W */
2188 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2189 break;
2190 case 4: /* AT S1E2R, AT S1E2W */
2191 mmu_idx = ARMMMUIdx_S1E2;
2192 break;
2193 case 6: /* AT S1E3R, AT S1E3W */
2194 mmu_idx = ARMMMUIdx_S1E3;
2195 break;
2196 default:
2197 g_assert_not_reached();
2198 }
2199 break;
2200 case 2: /* AT S1E0R, AT S1E0W */
2201 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2202 break;
2203 case 4: /* AT S12E1R, AT S12E1W */
2204 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
2205 break;
2206 case 6: /* AT S12E0R, AT S12E0W */
2207 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
2208 break;
2209 default:
2210 g_assert_not_reached();
2211 }
2212
2213 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
2214 }
2215 #endif
2216
2217 static const ARMCPRegInfo vapa_cp_reginfo[] = {
2218 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
2219 .access = PL1_RW, .resetvalue = 0,
2220 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
2221 offsetoflow32(CPUARMState, cp15.par_ns) },
2222 .writefn = par_write },
2223 #ifndef CONFIG_USER_ONLY
2224 /* This underdecoding is safe because the reginfo is NO_RAW. */
2225 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
2226 .access = PL1_W, .accessfn = ats_access,
2227 .writefn = ats_write, .type = ARM_CP_NO_RAW },
2228 #endif
2229 REGINFO_SENTINEL
2230 };
2231
2232 /* Return basic MPU access permission bits. */
2233 static uint32_t simple_mpu_ap_bits(uint32_t val)
2234 {
2235 uint32_t ret;
2236 uint32_t mask;
2237 int i;
2238 ret = 0;
2239 mask = 3;
2240 for (i = 0; i < 16; i += 2) {
2241 ret |= (val >> i) & mask;
2242 mask <<= 2;
2243 }
2244 return ret;
2245 }
2246
2247 /* Pad basic MPU access permission bits to extended format. */
2248 static uint32_t extended_mpu_ap_bits(uint32_t val)
2249 {
2250 uint32_t ret;
2251 uint32_t mask;
2252 int i;
2253 ret = 0;
2254 mask = 3;
2255 for (i = 0; i < 16; i += 2) {
2256 ret |= (val & mask) << i;
2257 mask <<= 2;
2258 }
2259 return ret;
2260 }
2261
2262 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2263 uint64_t value)
2264 {
2265 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2266 }
2267
2268 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2269 {
2270 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2271 }
2272
2273 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2274 uint64_t value)
2275 {
2276 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2277 }
2278
2279 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2280 {
2281 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2282 }
2283
2284 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2285 {
2286 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2287
2288 if (!u32p) {
2289 return 0;
2290 }
2291
2292 u32p += env->cp15.c6_rgnr;
2293 return *u32p;
2294 }
2295
2296 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2297 uint64_t value)
2298 {
2299 ARMCPU *cpu = arm_env_get_cpu(env);
2300 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2301
2302 if (!u32p) {
2303 return;
2304 }
2305
2306 u32p += env->cp15.c6_rgnr;
2307 tlb_flush(CPU(cpu), 1); /* Mappings may have changed - purge! */
2308 *u32p = value;
2309 }
2310
2311 static void pmsav7_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2312 {
2313 ARMCPU *cpu = arm_env_get_cpu(env);
2314 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2315
2316 if (!u32p) {
2317 return;
2318 }
2319
2320 memset(u32p, 0, sizeof(*u32p) * cpu->pmsav7_dregion);
2321 }
2322
2323 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2324 uint64_t value)
2325 {
2326 ARMCPU *cpu = arm_env_get_cpu(env);
2327 uint32_t nrgs = cpu->pmsav7_dregion;
2328
2329 if (value >= nrgs) {
2330 qemu_log_mask(LOG_GUEST_ERROR,
2331 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2332 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2333 return;
2334 }
2335
2336 raw_write(env, ri, value);
2337 }
2338
2339 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2340 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2341 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2342 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2343 .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2344 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2345 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2346 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2347 .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2348 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2349 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2350 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2351 .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2352 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2353 .access = PL1_RW,
2354 .fieldoffset = offsetof(CPUARMState, cp15.c6_rgnr),
2355 .writefn = pmsav7_rgnr_write },
2356 REGINFO_SENTINEL
2357 };
2358
2359 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2360 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2361 .access = PL1_RW, .type = ARM_CP_ALIAS,
2362 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2363 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2364 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2365 .access = PL1_RW, .type = ARM_CP_ALIAS,
2366 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2367 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2368 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2369 .access = PL1_RW,
2370 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2371 .resetvalue = 0, },
2372 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2373 .access = PL1_RW,
2374 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2375 .resetvalue = 0, },
2376 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2377 .access = PL1_RW,
2378 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2379 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2380 .access = PL1_RW,
2381 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2382 /* Protection region base and size registers */
2383 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2384 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2385 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2386 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2387 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2388 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2389 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2390 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2391 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2392 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2393 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2394 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2395 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2396 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2397 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2398 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2399 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2400 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2401 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2402 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2403 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2404 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2405 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2406 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2407 REGINFO_SENTINEL
2408 };
2409
2410 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
2411 uint64_t value)
2412 {
2413 TCR *tcr = raw_ptr(env, ri);
2414 int maskshift = extract32(value, 0, 3);
2415
2416 if (!arm_feature(env, ARM_FEATURE_V8)) {
2417 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2418 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2419 * using Long-desciptor translation table format */
2420 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2421 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2422 /* In an implementation that includes the Security Extensions
2423 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2424 * Short-descriptor translation table format.
2425 */
2426 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2427 } else {
2428 value &= TTBCR_N;
2429 }
2430 }
2431
2432 /* Update the masks corresponding to the TCR bank being written
2433 * Note that we always calculate mask and base_mask, but
2434 * they are only used for short-descriptor tables (ie if EAE is 0);
2435 * for long-descriptor tables the TCR fields are used differently
2436 * and the mask and base_mask values are meaningless.
2437 */
2438 tcr->raw_tcr = value;
2439 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
2440 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
2441 }
2442
2443 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2444 uint64_t value)
2445 {
2446 ARMCPU *cpu = arm_env_get_cpu(env);
2447
2448 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2449 /* With LPAE the TTBCR could result in a change of ASID
2450 * via the TTBCR.A1 bit, so do a TLB flush.
2451 */
2452 tlb_flush(CPU(cpu), 1);
2453 }
2454 vmsa_ttbcr_raw_write(env, ri, value);
2455 }
2456
2457 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2458 {
2459 TCR *tcr = raw_ptr(env, ri);
2460
2461 /* Reset both the TCR as well as the masks corresponding to the bank of
2462 * the TCR being reset.
2463 */
2464 tcr->raw_tcr = 0;
2465 tcr->mask = 0;
2466 tcr->base_mask = 0xffffc000u;
2467 }
2468
2469 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2470 uint64_t value)
2471 {
2472 ARMCPU *cpu = arm_env_get_cpu(env);
2473 TCR *tcr = raw_ptr(env, ri);
2474
2475 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2476 tlb_flush(CPU(cpu), 1);
2477 tcr->raw_tcr = value;
2478 }
2479
2480 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2481 uint64_t value)
2482 {
2483 /* 64 bit accesses to the TTBRs can change the ASID and so we
2484 * must flush the TLB.
2485 */
2486 if (cpreg_field_is_64bit(ri)) {
2487 ARMCPU *cpu = arm_env_get_cpu(env);
2488
2489 tlb_flush(CPU(cpu), 1);
2490 }
2491 raw_write(env, ri, value);
2492 }
2493
2494 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2495 uint64_t value)
2496 {
2497 ARMCPU *cpu = arm_env_get_cpu(env);
2498 CPUState *cs = CPU(cpu);
2499
2500 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
2501 if (raw_read(env, ri) != value) {
2502 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
2503 ARMMMUIdx_S2NS, -1);
2504 raw_write(env, ri, value);
2505 }
2506 }
2507
2508 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2509 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2510 .access = PL1_RW, .type = ARM_CP_ALIAS,
2511 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2512 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2513 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2514 .access = PL1_RW, .resetvalue = 0,
2515 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2516 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2517 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2518 .access = PL1_RW, .resetvalue = 0,
2519 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2520 offsetof(CPUARMState, cp15.dfar_ns) } },
2521 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2522 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2523 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2524 .resetvalue = 0, },
2525 REGINFO_SENTINEL
2526 };
2527
2528 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2529 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2530 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2531 .access = PL1_RW,
2532 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2533 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2534 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2535 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2536 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2537 offsetof(CPUARMState, cp15.ttbr0_ns) } },
2538 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2539 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2540 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2541 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2542 offsetof(CPUARMState, cp15.ttbr1_ns) } },
2543 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2544 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2545 .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
2546 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2547 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2548 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2549 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2550 .raw_writefn = vmsa_ttbcr_raw_write,
2551 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2552 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2553 REGINFO_SENTINEL
2554 };
2555
2556 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2557 uint64_t value)
2558 {
2559 env->cp15.c15_ticonfig = value & 0xe7;
2560 /* The OS_TYPE bit in this register changes the reported CPUID! */
2561 env->cp15.c0_cpuid = (value & (1 << 5)) ?
2562 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2563 }
2564
2565 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2566 uint64_t value)
2567 {
2568 env->cp15.c15_threadid = value & 0xffff;
2569 }
2570
2571 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2572 uint64_t value)
2573 {
2574 /* Wait-for-interrupt (deprecated) */
2575 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
2576 }
2577
2578 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2579 uint64_t value)
2580 {
2581 /* On OMAP there are registers indicating the max/min index of dcache lines
2582 * containing a dirty line; cache flush operations have to reset these.
2583 */
2584 env->cp15.c15_i_max = 0x000;
2585 env->cp15.c15_i_min = 0xff0;
2586 }
2587
2588 static const ARMCPRegInfo omap_cp_reginfo[] = {
2589 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2590 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2591 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2592 .resetvalue = 0, },
2593 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2594 .access = PL1_RW, .type = ARM_CP_NOP },
2595 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2596 .access = PL1_RW,
2597 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2598 .writefn = omap_ticonfig_write },
2599 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2600 .access = PL1_RW,
2601 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2602 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2603 .access = PL1_RW, .resetvalue = 0xff0,
2604 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2605 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2606 .access = PL1_RW,
2607 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2608 .writefn = omap_threadid_write },
2609 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2610 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2611 .type = ARM_CP_NO_RAW,
2612 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2613 /* TODO: Peripheral port remap register:
2614 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2615 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2616 * when MMU is off.
2617 */
2618 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2619 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2620 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2621 .writefn = omap_cachemaint_write },
2622 { .name = "C9", .cp = 15, .crn = 9,
2623 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2624 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2625 REGINFO_SENTINEL
2626 };
2627
2628 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2629 uint64_t value)
2630 {
2631 env->cp15.c15_cpar = value & 0x3fff;
2632 }
2633
2634 static const ARMCPRegInfo xscale_cp_reginfo[] = {
2635 { .name = "XSCALE_CPAR",
2636 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2637 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2638 .writefn = xscale_cpar_write, },
2639 { .name = "XSCALE_AUXCR",
2640 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2641 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2642 .resetvalue = 0, },
2643 /* XScale specific cache-lockdown: since we have no cache we NOP these
2644 * and hope the guest does not really rely on cache behaviour.
2645 */
2646 { .name = "XSCALE_LOCK_ICACHE_LINE",
2647 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2648 .access = PL1_W, .type = ARM_CP_NOP },
2649 { .name = "XSCALE_UNLOCK_ICACHE",
2650 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2651 .access = PL1_W, .type = ARM_CP_NOP },
2652 { .name = "XSCALE_DCACHE_LOCK",
2653 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2654 .access = PL1_RW, .type = ARM_CP_NOP },
2655 { .name = "XSCALE_UNLOCK_DCACHE",
2656 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2657 .access = PL1_W, .type = ARM_CP_NOP },
2658 REGINFO_SENTINEL
2659 };
2660
2661 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2662 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2663 * implementation of this implementation-defined space.
2664 * Ideally this should eventually disappear in favour of actually
2665 * implementing the correct behaviour for all cores.
2666 */
2667 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2668 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2669 .access = PL1_RW,
2670 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2671 .resetvalue = 0 },
2672 REGINFO_SENTINEL
2673 };
2674
2675 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2676 /* Cache status: RAZ because we have no cache so it's always clean */
2677 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2678 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2679 .resetvalue = 0 },
2680 REGINFO_SENTINEL
2681 };
2682
2683 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2684 /* We never have a a block transfer operation in progress */
2685 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2686 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2687 .resetvalue = 0 },
2688 /* The cache ops themselves: these all NOP for QEMU */
2689 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2690 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2691 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2692 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2693 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2694 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2695 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2696 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2697 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2698 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2699 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2700 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2701 REGINFO_SENTINEL
2702 };
2703
2704 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2705 /* The cache test-and-clean instructions always return (1 << 30)
2706 * to indicate that there are no dirty cache lines.
2707 */
2708 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2709 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2710 .resetvalue = (1 << 30) },
2711 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2712 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2713 .resetvalue = (1 << 30) },
2714 REGINFO_SENTINEL
2715 };
2716
2717 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2718 /* Ignore ReadBuffer accesses */
2719 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
2720 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2721 .access = PL1_RW, .resetvalue = 0,
2722 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
2723 REGINFO_SENTINEL
2724 };
2725
2726 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2727 {
2728 ARMCPU *cpu = arm_env_get_cpu(env);
2729 unsigned int cur_el = arm_current_el(env);
2730 bool secure = arm_is_secure(env);
2731
2732 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2733 return env->cp15.vpidr_el2;
2734 }
2735 return raw_read(env, ri);
2736 }
2737
2738 static uint64_t mpidr_read_val(CPUARMState *env)
2739 {
2740 ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
2741 uint64_t mpidr = cpu->mp_affinity;
2742
2743 if (arm_feature(env, ARM_FEATURE_V7MP)) {
2744 mpidr |= (1U << 31);
2745 /* Cores which are uniprocessor (non-coherent)
2746 * but still implement the MP extensions set
2747 * bit 30. (For instance, Cortex-R5).
2748 */
2749 if (cpu->mp_is_up) {
2750 mpidr |= (1u << 30);
2751 }
2752 }
2753 return mpidr;
2754 }
2755
2756 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2757 {
2758 unsigned int cur_el = arm_current_el(env);
2759 bool secure = arm_is_secure(env);
2760
2761 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2762 return env->cp15.vmpidr_el2;
2763 }
2764 return mpidr_read_val(env);
2765 }
2766
2767 static const ARMCPRegInfo mpidr_cp_reginfo[] = {
2768 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
2769 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
2770 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
2771 REGINFO_SENTINEL
2772 };
2773
2774 static const ARMCPRegInfo lpae_cp_reginfo[] = {
2775 /* NOP AMAIR0/1 */
2776 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
2777 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
2778 .access = PL1_RW, .type = ARM_CP_CONST,
2779 .resetvalue = 0 },
2780 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2781 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
2782 .access = PL1_RW, .type = ARM_CP_CONST,
2783 .resetvalue = 0 },
2784 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
2785 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
2786 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
2787 offsetof(CPUARMState, cp15.par_ns)} },
2788 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
2789 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2790 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2791 offsetof(CPUARMState, cp15.ttbr0_ns) },
2792 .writefn = vmsa_ttbr_write, },
2793 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
2794 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2795 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2796 offsetof(CPUARMState, cp15.ttbr1_ns) },
2797 .writefn = vmsa_ttbr_write, },
2798 REGINFO_SENTINEL
2799 };
2800
2801 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2802 {
2803 return vfp_get_fpcr(env);
2804 }
2805
2806 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2807 uint64_t value)
2808 {
2809 vfp_set_fpcr(env, value);
2810 }
2811
2812 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2813 {
2814 return vfp_get_fpsr(env);
2815 }
2816
2817 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2818 uint64_t value)
2819 {
2820 vfp_set_fpsr(env, value);
2821 }
2822
2823 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
2824 bool isread)
2825 {
2826 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
2827 return CP_ACCESS_TRAP;
2828 }
2829 return CP_ACCESS_OK;
2830 }
2831
2832 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
2833 uint64_t value)
2834 {
2835 env->daif = value & PSTATE_DAIF;
2836 }
2837
2838 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
2839 const ARMCPRegInfo *ri,
2840 bool isread)
2841 {
2842 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
2843 * SCTLR_EL1.UCI is set.
2844 */
2845 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
2846 return CP_ACCESS_TRAP;
2847 }
2848 return CP_ACCESS_OK;
2849 }
2850
2851 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
2852 * Page D4-1736 (DDI0487A.b)
2853 */
2854
2855 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2856 uint64_t value)
2857 {
2858 ARMCPU *cpu = arm_env_get_cpu(env);
2859 CPUState *cs = CPU(cpu);
2860
2861 if (arm_is_secure_below_el3(env)) {
2862 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2863 } else {
2864 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
2865 }
2866 }
2867
2868 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2869 uint64_t value)
2870 {
2871 bool sec = arm_is_secure_below_el3(env);
2872 CPUState *other_cs;
2873
2874 CPU_FOREACH(other_cs) {
2875 if (sec) {
2876 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2877 } else {
2878 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
2879 ARMMMUIdx_S12NSE0, -1);
2880 }
2881 }
2882 }
2883
2884 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2885 uint64_t value)
2886 {
2887 /* Note that the 'ALL' scope must invalidate both stage 1 and
2888 * stage 2 translations, whereas most other scopes only invalidate
2889 * stage 1 translations.
2890 */
2891 ARMCPU *cpu = arm_env_get_cpu(env);
2892 CPUState *cs = CPU(cpu);
2893
2894 if (arm_is_secure_below_el3(env)) {
2895 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2896 } else {
2897 if (arm_feature(env, ARM_FEATURE_EL2)) {
2898 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
2899 ARMMMUIdx_S2NS, -1);
2900 } else {
2901 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
2902 }
2903 }
2904 }
2905
2906 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
2907 uint64_t value)
2908 {
2909 ARMCPU *cpu = arm_env_get_cpu(env);
2910 CPUState *cs = CPU(cpu);
2911
2912 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1);
2913 }
2914
2915 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
2916 uint64_t value)
2917 {
2918 ARMCPU *cpu = arm_env_get_cpu(env);
2919 CPUState *cs = CPU(cpu);
2920
2921 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E3, -1);
2922 }
2923
2924 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2925 uint64_t value)
2926 {
2927 /* Note that the 'ALL' scope must invalidate both stage 1 and
2928 * stage 2 translations, whereas most other scopes only invalidate
2929 * stage 1 translations.
2930 */
2931 bool sec = arm_is_secure_below_el3(env);
2932 bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
2933 CPUState *other_cs;
2934
2935 CPU_FOREACH(other_cs) {
2936 if (sec) {
2937 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2938 } else if (has_el2) {
2939 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
2940 ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1);
2941 } else {
2942 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
2943 ARMMMUIdx_S12NSE0, -1);
2944 }
2945 }
2946 }
2947
2948 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2949 uint64_t value)
2950 {
2951 CPUState *other_cs;
2952
2953 CPU_FOREACH(other_cs) {
2954 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1);
2955 }
2956 }
2957
2958 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2959 uint64_t value)
2960 {
2961 CPUState *other_cs;
2962
2963 CPU_FOREACH(other_cs) {
2964 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E3, -1);
2965 }
2966 }
2967
2968 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2969 uint64_t value)
2970 {
2971 /* Invalidate by VA, EL1&0 (AArch64 version).
2972 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
2973 * since we don't support flush-for-specific-ASID-only or
2974 * flush-last-level-only.
2975 */
2976 ARMCPU *cpu = arm_env_get_cpu(env);
2977 CPUState *cs = CPU(cpu);
2978 uint64_t pageaddr = sextract64(value << 12, 0, 56);
2979
2980 if (arm_is_secure_below_el3(env)) {
2981 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1SE1,
2982 ARMMMUIdx_S1SE0, -1);
2983 } else {
2984 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S12NSE1,
2985 ARMMMUIdx_S12NSE0, -1);
2986 }
2987 }
2988
2989 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
2990 uint64_t value)
2991 {