target/arm/helper: make it clear the EC field is also in hex
[qemu.git] / target / arm / helper.c
1 #include "qemu/osdep.h"
2 #include "trace.h"
3 #include "cpu.h"
4 #include "internals.h"
5 #include "exec/gdbstub.h"
6 #include "exec/helper-proto.h"
7 #include "qemu/host-utils.h"
8 #include "sysemu/arch_init.h"
9 #include "sysemu/sysemu.h"
10 #include "qemu/bitops.h"
11 #include "qemu/crc32c.h"
12 #include "exec/exec-all.h"
13 #include "exec/cpu_ldst.h"
14 #include "arm_ldst.h"
15 #include <zlib.h> /* For crc32 */
16 #include "exec/semihost.h"
17 #include "sysemu/kvm.h"
18
19 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
20
21 #ifndef CONFIG_USER_ONLY
22 static bool get_phys_addr(CPUARMState *env, target_ulong address,
23 int access_type, ARMMMUIdx mmu_idx,
24 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
25 target_ulong *page_size, uint32_t *fsr,
26 ARMMMUFaultInfo *fi);
27
28 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
29 int access_type, ARMMMUIdx mmu_idx,
30 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
31 target_ulong *page_size_ptr, uint32_t *fsr,
32 ARMMMUFaultInfo *fi);
33
34 /* Definitions for the PMCCNTR and PMCR registers */
35 #define PMCRD 0x8
36 #define PMCRC 0x4
37 #define PMCRE 0x1
38 #endif
39
40 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
41 {
42 int nregs;
43
44 /* VFP data registers are always little-endian. */
45 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
46 if (reg < nregs) {
47 stfq_le_p(buf, env->vfp.regs[reg]);
48 return 8;
49 }
50 if (arm_feature(env, ARM_FEATURE_NEON)) {
51 /* Aliases for Q regs. */
52 nregs += 16;
53 if (reg < nregs) {
54 stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
55 stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
56 return 16;
57 }
58 }
59 switch (reg - nregs) {
60 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
61 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
62 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
63 }
64 return 0;
65 }
66
67 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
68 {
69 int nregs;
70
71 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
72 if (reg < nregs) {
73 env->vfp.regs[reg] = ldfq_le_p(buf);
74 return 8;
75 }
76 if (arm_feature(env, ARM_FEATURE_NEON)) {
77 nregs += 16;
78 if (reg < nregs) {
79 env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
80 env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
81 return 16;
82 }
83 }
84 switch (reg - nregs) {
85 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
86 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
87 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
88 }
89 return 0;
90 }
91
92 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
93 {
94 switch (reg) {
95 case 0 ... 31:
96 /* 128 bit FP register */
97 stfq_le_p(buf, env->vfp.regs[reg * 2]);
98 stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
99 return 16;
100 case 32:
101 /* FPSR */
102 stl_p(buf, vfp_get_fpsr(env));
103 return 4;
104 case 33:
105 /* FPCR */
106 stl_p(buf, vfp_get_fpcr(env));
107 return 4;
108 default:
109 return 0;
110 }
111 }
112
113 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
114 {
115 switch (reg) {
116 case 0 ... 31:
117 /* 128 bit FP register */
118 env->vfp.regs[reg * 2] = ldfq_le_p(buf);
119 env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
120 return 16;
121 case 32:
122 /* FPSR */
123 vfp_set_fpsr(env, ldl_p(buf));
124 return 4;
125 case 33:
126 /* FPCR */
127 vfp_set_fpcr(env, ldl_p(buf));
128 return 4;
129 default:
130 return 0;
131 }
132 }
133
134 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
135 {
136 assert(ri->fieldoffset);
137 if (cpreg_field_is_64bit(ri)) {
138 return CPREG_FIELD64(env, ri);
139 } else {
140 return CPREG_FIELD32(env, ri);
141 }
142 }
143
144 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
145 uint64_t value)
146 {
147 assert(ri->fieldoffset);
148 if (cpreg_field_is_64bit(ri)) {
149 CPREG_FIELD64(env, ri) = value;
150 } else {
151 CPREG_FIELD32(env, ri) = value;
152 }
153 }
154
155 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
156 {
157 return (char *)env + ri->fieldoffset;
158 }
159
160 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
161 {
162 /* Raw read of a coprocessor register (as needed for migration, etc). */
163 if (ri->type & ARM_CP_CONST) {
164 return ri->resetvalue;
165 } else if (ri->raw_readfn) {
166 return ri->raw_readfn(env, ri);
167 } else if (ri->readfn) {
168 return ri->readfn(env, ri);
169 } else {
170 return raw_read(env, ri);
171 }
172 }
173
174 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
175 uint64_t v)
176 {
177 /* Raw write of a coprocessor register (as needed for migration, etc).
178 * Note that constant registers are treated as write-ignored; the
179 * caller should check for success by whether a readback gives the
180 * value written.
181 */
182 if (ri->type & ARM_CP_CONST) {
183 return;
184 } else if (ri->raw_writefn) {
185 ri->raw_writefn(env, ri, v);
186 } else if (ri->writefn) {
187 ri->writefn(env, ri, v);
188 } else {
189 raw_write(env, ri, v);
190 }
191 }
192
193 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
194 {
195 /* Return true if the regdef would cause an assertion if you called
196 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
197 * program bug for it not to have the NO_RAW flag).
198 * NB that returning false here doesn't necessarily mean that calling
199 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
200 * read/write access functions which are safe for raw use" from "has
201 * read/write access functions which have side effects but has forgotten
202 * to provide raw access functions".
203 * The tests here line up with the conditions in read/write_raw_cp_reg()
204 * and assertions in raw_read()/raw_write().
205 */
206 if ((ri->type & ARM_CP_CONST) ||
207 ri->fieldoffset ||
208 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
209 return false;
210 }
211 return true;
212 }
213
214 bool write_cpustate_to_list(ARMCPU *cpu)
215 {
216 /* Write the coprocessor state from cpu->env to the (index,value) list. */
217 int i;
218 bool ok = true;
219
220 for (i = 0; i < cpu->cpreg_array_len; i++) {
221 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
222 const ARMCPRegInfo *ri;
223
224 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
225 if (!ri) {
226 ok = false;
227 continue;
228 }
229 if (ri->type & ARM_CP_NO_RAW) {
230 continue;
231 }
232 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
233 }
234 return ok;
235 }
236
237 bool write_list_to_cpustate(ARMCPU *cpu)
238 {
239 int i;
240 bool ok = true;
241
242 for (i = 0; i < cpu->cpreg_array_len; i++) {
243 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
244 uint64_t v = cpu->cpreg_values[i];
245 const ARMCPRegInfo *ri;
246
247 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
248 if (!ri) {
249 ok = false;
250 continue;
251 }
252 if (ri->type & ARM_CP_NO_RAW) {
253 continue;
254 }
255 /* Write value and confirm it reads back as written
256 * (to catch read-only registers and partially read-only
257 * registers where the incoming migration value doesn't match)
258 */
259 write_raw_cp_reg(&cpu->env, ri, v);
260 if (read_raw_cp_reg(&cpu->env, ri) != v) {
261 ok = false;
262 }
263 }
264 return ok;
265 }
266
267 static void add_cpreg_to_list(gpointer key, gpointer opaque)
268 {
269 ARMCPU *cpu = opaque;
270 uint64_t regidx;
271 const ARMCPRegInfo *ri;
272
273 regidx = *(uint32_t *)key;
274 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
275
276 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
277 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
278 /* The value array need not be initialized at this point */
279 cpu->cpreg_array_len++;
280 }
281 }
282
283 static void count_cpreg(gpointer key, gpointer opaque)
284 {
285 ARMCPU *cpu = opaque;
286 uint64_t regidx;
287 const ARMCPRegInfo *ri;
288
289 regidx = *(uint32_t *)key;
290 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
291
292 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
293 cpu->cpreg_array_len++;
294 }
295 }
296
297 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
298 {
299 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
300 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
301
302 if (aidx > bidx) {
303 return 1;
304 }
305 if (aidx < bidx) {
306 return -1;
307 }
308 return 0;
309 }
310
311 void init_cpreg_list(ARMCPU *cpu)
312 {
313 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
314 * Note that we require cpreg_tuples[] to be sorted by key ID.
315 */
316 GList *keys;
317 int arraylen;
318
319 keys = g_hash_table_get_keys(cpu->cp_regs);
320 keys = g_list_sort(keys, cpreg_key_compare);
321
322 cpu->cpreg_array_len = 0;
323
324 g_list_foreach(keys, count_cpreg, cpu);
325
326 arraylen = cpu->cpreg_array_len;
327 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
328 cpu->cpreg_values = g_new(uint64_t, arraylen);
329 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
330 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
331 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
332 cpu->cpreg_array_len = 0;
333
334 g_list_foreach(keys, add_cpreg_to_list, cpu);
335
336 assert(cpu->cpreg_array_len == arraylen);
337
338 g_list_free(keys);
339 }
340
341 /*
342 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
343 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
344 *
345 * access_el3_aa32ns: Used to check AArch32 register views.
346 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
347 */
348 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
349 const ARMCPRegInfo *ri,
350 bool isread)
351 {
352 bool secure = arm_is_secure_below_el3(env);
353
354 assert(!arm_el_is_aa64(env, 3));
355 if (secure) {
356 return CP_ACCESS_TRAP_UNCATEGORIZED;
357 }
358 return CP_ACCESS_OK;
359 }
360
361 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
362 const ARMCPRegInfo *ri,
363 bool isread)
364 {
365 if (!arm_el_is_aa64(env, 3)) {
366 return access_el3_aa32ns(env, ri, isread);
367 }
368 return CP_ACCESS_OK;
369 }
370
371 /* Some secure-only AArch32 registers trap to EL3 if used from
372 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
373 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
374 * We assume that the .access field is set to PL1_RW.
375 */
376 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
377 const ARMCPRegInfo *ri,
378 bool isread)
379 {
380 if (arm_current_el(env) == 3) {
381 return CP_ACCESS_OK;
382 }
383 if (arm_is_secure_below_el3(env)) {
384 return CP_ACCESS_TRAP_EL3;
385 }
386 /* This will be EL1 NS and EL2 NS, which just UNDEF */
387 return CP_ACCESS_TRAP_UNCATEGORIZED;
388 }
389
390 /* Check for traps to "powerdown debug" registers, which are controlled
391 * by MDCR.TDOSA
392 */
393 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
394 bool isread)
395 {
396 int el = arm_current_el(env);
397
398 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA)
399 && !arm_is_secure_below_el3(env)) {
400 return CP_ACCESS_TRAP_EL2;
401 }
402 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
403 return CP_ACCESS_TRAP_EL3;
404 }
405 return CP_ACCESS_OK;
406 }
407
408 /* Check for traps to "debug ROM" registers, which are controlled
409 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
410 */
411 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
412 bool isread)
413 {
414 int el = arm_current_el(env);
415
416 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA)
417 && !arm_is_secure_below_el3(env)) {
418 return CP_ACCESS_TRAP_EL2;
419 }
420 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
421 return CP_ACCESS_TRAP_EL3;
422 }
423 return CP_ACCESS_OK;
424 }
425
426 /* Check for traps to general debug registers, which are controlled
427 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
428 */
429 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
430 bool isread)
431 {
432 int el = arm_current_el(env);
433
434 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA)
435 && !arm_is_secure_below_el3(env)) {
436 return CP_ACCESS_TRAP_EL2;
437 }
438 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
439 return CP_ACCESS_TRAP_EL3;
440 }
441 return CP_ACCESS_OK;
442 }
443
444 /* Check for traps to performance monitor registers, which are controlled
445 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
446 */
447 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
448 bool isread)
449 {
450 int el = arm_current_el(env);
451
452 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
453 && !arm_is_secure_below_el3(env)) {
454 return CP_ACCESS_TRAP_EL2;
455 }
456 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
457 return CP_ACCESS_TRAP_EL3;
458 }
459 return CP_ACCESS_OK;
460 }
461
462 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
463 {
464 ARMCPU *cpu = arm_env_get_cpu(env);
465
466 raw_write(env, ri, value);
467 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
468 }
469
470 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
471 {
472 ARMCPU *cpu = arm_env_get_cpu(env);
473
474 if (raw_read(env, ri) != value) {
475 /* Unlike real hardware the qemu TLB uses virtual addresses,
476 * not modified virtual addresses, so this causes a TLB flush.
477 */
478 tlb_flush(CPU(cpu));
479 raw_write(env, ri, value);
480 }
481 }
482
483 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
484 uint64_t value)
485 {
486 ARMCPU *cpu = arm_env_get_cpu(env);
487
488 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU)
489 && !extended_addresses_enabled(env)) {
490 /* For VMSA (when not using the LPAE long descriptor page table
491 * format) this register includes the ASID, so do a TLB flush.
492 * For PMSA it is purely a process ID and no action is needed.
493 */
494 tlb_flush(CPU(cpu));
495 }
496 raw_write(env, ri, value);
497 }
498
499 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
500 uint64_t value)
501 {
502 /* Invalidate all (TLBIALL) */
503 ARMCPU *cpu = arm_env_get_cpu(env);
504
505 tlb_flush(CPU(cpu));
506 }
507
508 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
509 uint64_t value)
510 {
511 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
512 ARMCPU *cpu = arm_env_get_cpu(env);
513
514 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
515 }
516
517 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
518 uint64_t value)
519 {
520 /* Invalidate by ASID (TLBIASID) */
521 ARMCPU *cpu = arm_env_get_cpu(env);
522
523 tlb_flush(CPU(cpu));
524 }
525
526 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
527 uint64_t value)
528 {
529 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
530 ARMCPU *cpu = arm_env_get_cpu(env);
531
532 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
533 }
534
535 /* IS variants of TLB operations must affect all cores */
536 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
537 uint64_t value)
538 {
539 CPUState *cs = ENV_GET_CPU(env);
540
541 tlb_flush_all_cpus_synced(cs);
542 }
543
544 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
545 uint64_t value)
546 {
547 CPUState *cs = ENV_GET_CPU(env);
548
549 tlb_flush_all_cpus_synced(cs);
550 }
551
552 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
553 uint64_t value)
554 {
555 CPUState *cs = ENV_GET_CPU(env);
556
557 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
558 }
559
560 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
561 uint64_t value)
562 {
563 CPUState *cs = ENV_GET_CPU(env);
564
565 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
566 }
567
568 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
569 uint64_t value)
570 {
571 CPUState *cs = ENV_GET_CPU(env);
572
573 tlb_flush_by_mmuidx(cs,
574 (1 << ARMMMUIdx_S12NSE1) |
575 (1 << ARMMMUIdx_S12NSE0) |
576 (1 << ARMMMUIdx_S2NS));
577 }
578
579 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
580 uint64_t value)
581 {
582 CPUState *cs = ENV_GET_CPU(env);
583
584 tlb_flush_by_mmuidx_all_cpus_synced(cs,
585 (1 << ARMMMUIdx_S12NSE1) |
586 (1 << ARMMMUIdx_S12NSE0) |
587 (1 << ARMMMUIdx_S2NS));
588 }
589
590 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
591 uint64_t value)
592 {
593 /* Invalidate by IPA. This has to invalidate any structures that
594 * contain only stage 2 translation information, but does not need
595 * to apply to structures that contain combined stage 1 and stage 2
596 * translation information.
597 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
598 */
599 CPUState *cs = ENV_GET_CPU(env);
600 uint64_t pageaddr;
601
602 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
603 return;
604 }
605
606 pageaddr = sextract64(value << 12, 0, 40);
607
608 tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S2NS));
609 }
610
611 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
612 uint64_t value)
613 {
614 CPUState *cs = ENV_GET_CPU(env);
615 uint64_t pageaddr;
616
617 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
618 return;
619 }
620
621 pageaddr = sextract64(value << 12, 0, 40);
622
623 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
624 (1 << ARMMMUIdx_S2NS));
625 }
626
627 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
628 uint64_t value)
629 {
630 CPUState *cs = ENV_GET_CPU(env);
631
632 tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2));
633 }
634
635 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
636 uint64_t value)
637 {
638 CPUState *cs = ENV_GET_CPU(env);
639
640 tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E2));
641 }
642
643 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
644 uint64_t value)
645 {
646 CPUState *cs = ENV_GET_CPU(env);
647 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
648
649 tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E2));
650 }
651
652 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
653 uint64_t value)
654 {
655 CPUState *cs = ENV_GET_CPU(env);
656 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
657
658 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
659 (1 << ARMMMUIdx_S1E2));
660 }
661
662 static const ARMCPRegInfo cp_reginfo[] = {
663 /* Define the secure and non-secure FCSE identifier CP registers
664 * separately because there is no secure bank in V8 (no _EL3). This allows
665 * the secure register to be properly reset and migrated. There is also no
666 * v8 EL1 version of the register so the non-secure instance stands alone.
667 */
668 { .name = "FCSEIDR(NS)",
669 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
670 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
671 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
672 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
673 { .name = "FCSEIDR(S)",
674 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
675 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
676 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
677 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
678 /* Define the secure and non-secure context identifier CP registers
679 * separately because there is no secure bank in V8 (no _EL3). This allows
680 * the secure register to be properly reset and migrated. In the
681 * non-secure case, the 32-bit register will have reset and migration
682 * disabled during registration as it is handled by the 64-bit instance.
683 */
684 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
685 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
686 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
687 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
688 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
689 { .name = "CONTEXTIDR(S)", .state = ARM_CP_STATE_AA32,
690 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
691 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
692 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
693 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
694 REGINFO_SENTINEL
695 };
696
697 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
698 /* NB: Some of these registers exist in v8 but with more precise
699 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
700 */
701 /* MMU Domain access control / MPU write buffer control */
702 { .name = "DACR",
703 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
704 .access = PL1_RW, .resetvalue = 0,
705 .writefn = dacr_write, .raw_writefn = raw_write,
706 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
707 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
708 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
709 * For v6 and v5, these mappings are overly broad.
710 */
711 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
712 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
713 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
714 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
715 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
716 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
717 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
718 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
719 /* Cache maintenance ops; some of this space may be overridden later. */
720 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
721 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
722 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
723 REGINFO_SENTINEL
724 };
725
726 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
727 /* Not all pre-v6 cores implemented this WFI, so this is slightly
728 * over-broad.
729 */
730 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
731 .access = PL1_W, .type = ARM_CP_WFI },
732 REGINFO_SENTINEL
733 };
734
735 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
736 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
737 * is UNPREDICTABLE; we choose to NOP as most implementations do).
738 */
739 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
740 .access = PL1_W, .type = ARM_CP_WFI },
741 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
742 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
743 * OMAPCP will override this space.
744 */
745 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
746 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
747 .resetvalue = 0 },
748 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
749 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
750 .resetvalue = 0 },
751 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
752 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
753 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
754 .resetvalue = 0 },
755 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
756 * implementing it as RAZ means the "debug architecture version" bits
757 * will read as a reserved value, which should cause Linux to not try
758 * to use the debug hardware.
759 */
760 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
761 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
762 /* MMU TLB control. Note that the wildcarding means we cover not just
763 * the unified TLB ops but also the dside/iside/inner-shareable variants.
764 */
765 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
766 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
767 .type = ARM_CP_NO_RAW },
768 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
769 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
770 .type = ARM_CP_NO_RAW },
771 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
772 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
773 .type = ARM_CP_NO_RAW },
774 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
775 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
776 .type = ARM_CP_NO_RAW },
777 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
778 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
779 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
780 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
781 REGINFO_SENTINEL
782 };
783
784 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
785 uint64_t value)
786 {
787 uint32_t mask = 0;
788
789 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
790 if (!arm_feature(env, ARM_FEATURE_V8)) {
791 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
792 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
793 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
794 */
795 if (arm_feature(env, ARM_FEATURE_VFP)) {
796 /* VFP coprocessor: cp10 & cp11 [23:20] */
797 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
798
799 if (!arm_feature(env, ARM_FEATURE_NEON)) {
800 /* ASEDIS [31] bit is RAO/WI */
801 value |= (1 << 31);
802 }
803
804 /* VFPv3 and upwards with NEON implement 32 double precision
805 * registers (D0-D31).
806 */
807 if (!arm_feature(env, ARM_FEATURE_NEON) ||
808 !arm_feature(env, ARM_FEATURE_VFP3)) {
809 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
810 value |= (1 << 30);
811 }
812 }
813 value &= mask;
814 }
815 env->cp15.cpacr_el1 = value;
816 }
817
818 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
819 bool isread)
820 {
821 if (arm_feature(env, ARM_FEATURE_V8)) {
822 /* Check if CPACR accesses are to be trapped to EL2 */
823 if (arm_current_el(env) == 1 &&
824 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
825 return CP_ACCESS_TRAP_EL2;
826 /* Check if CPACR accesses are to be trapped to EL3 */
827 } else if (arm_current_el(env) < 3 &&
828 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
829 return CP_ACCESS_TRAP_EL3;
830 }
831 }
832
833 return CP_ACCESS_OK;
834 }
835
836 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
837 bool isread)
838 {
839 /* Check if CPTR accesses are set to trap to EL3 */
840 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
841 return CP_ACCESS_TRAP_EL3;
842 }
843
844 return CP_ACCESS_OK;
845 }
846
847 static const ARMCPRegInfo v6_cp_reginfo[] = {
848 /* prefetch by MVA in v6, NOP in v7 */
849 { .name = "MVA_prefetch",
850 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
851 .access = PL1_W, .type = ARM_CP_NOP },
852 /* We need to break the TB after ISB to execute self-modifying code
853 * correctly and also to take any pending interrupts immediately.
854 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
855 */
856 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
857 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
858 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
859 .access = PL0_W, .type = ARM_CP_NOP },
860 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
861 .access = PL0_W, .type = ARM_CP_NOP },
862 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
863 .access = PL1_RW,
864 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
865 offsetof(CPUARMState, cp15.ifar_ns) },
866 .resetvalue = 0, },
867 /* Watchpoint Fault Address Register : should actually only be present
868 * for 1136, 1176, 11MPCore.
869 */
870 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
871 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
872 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
873 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
874 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
875 .resetvalue = 0, .writefn = cpacr_write },
876 REGINFO_SENTINEL
877 };
878
879 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
880 bool isread)
881 {
882 /* Performance monitor registers user accessibility is controlled
883 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
884 * trapping to EL2 or EL3 for other accesses.
885 */
886 int el = arm_current_el(env);
887
888 if (el == 0 && !env->cp15.c9_pmuserenr) {
889 return CP_ACCESS_TRAP;
890 }
891 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
892 && !arm_is_secure_below_el3(env)) {
893 return CP_ACCESS_TRAP_EL2;
894 }
895 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
896 return CP_ACCESS_TRAP_EL3;
897 }
898
899 return CP_ACCESS_OK;
900 }
901
902 #ifndef CONFIG_USER_ONLY
903
904 static inline bool arm_ccnt_enabled(CPUARMState *env)
905 {
906 /* This does not support checking PMCCFILTR_EL0 register */
907
908 if (!(env->cp15.c9_pmcr & PMCRE)) {
909 return false;
910 }
911
912 return true;
913 }
914
915 void pmccntr_sync(CPUARMState *env)
916 {
917 uint64_t temp_ticks;
918
919 temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
920 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
921
922 if (env->cp15.c9_pmcr & PMCRD) {
923 /* Increment once every 64 processor clock cycles */
924 temp_ticks /= 64;
925 }
926
927 if (arm_ccnt_enabled(env)) {
928 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
929 }
930 }
931
932 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
933 uint64_t value)
934 {
935 pmccntr_sync(env);
936
937 if (value & PMCRC) {
938 /* The counter has been reset */
939 env->cp15.c15_ccnt = 0;
940 }
941
942 /* only the DP, X, D and E bits are writable */
943 env->cp15.c9_pmcr &= ~0x39;
944 env->cp15.c9_pmcr |= (value & 0x39);
945
946 pmccntr_sync(env);
947 }
948
949 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
950 {
951 uint64_t total_ticks;
952
953 if (!arm_ccnt_enabled(env)) {
954 /* Counter is disabled, do not change value */
955 return env->cp15.c15_ccnt;
956 }
957
958 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
959 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
960
961 if (env->cp15.c9_pmcr & PMCRD) {
962 /* Increment once every 64 processor clock cycles */
963 total_ticks /= 64;
964 }
965 return total_ticks - env->cp15.c15_ccnt;
966 }
967
968 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
969 uint64_t value)
970 {
971 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
972 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
973 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
974 * accessed.
975 */
976 env->cp15.c9_pmselr = value & 0x1f;
977 }
978
979 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
980 uint64_t value)
981 {
982 uint64_t total_ticks;
983
984 if (!arm_ccnt_enabled(env)) {
985 /* Counter is disabled, set the absolute value */
986 env->cp15.c15_ccnt = value;
987 return;
988 }
989
990 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
991 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
992
993 if (env->cp15.c9_pmcr & PMCRD) {
994 /* Increment once every 64 processor clock cycles */
995 total_ticks /= 64;
996 }
997 env->cp15.c15_ccnt = total_ticks - value;
998 }
999
1000 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1001 uint64_t value)
1002 {
1003 uint64_t cur_val = pmccntr_read(env, NULL);
1004
1005 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1006 }
1007
1008 #else /* CONFIG_USER_ONLY */
1009
1010 void pmccntr_sync(CPUARMState *env)
1011 {
1012 }
1013
1014 #endif
1015
1016 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1017 uint64_t value)
1018 {
1019 pmccntr_sync(env);
1020 env->cp15.pmccfiltr_el0 = value & 0x7E000000;
1021 pmccntr_sync(env);
1022 }
1023
1024 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1025 uint64_t value)
1026 {
1027 value &= (1 << 31);
1028 env->cp15.c9_pmcnten |= value;
1029 }
1030
1031 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1032 uint64_t value)
1033 {
1034 value &= (1 << 31);
1035 env->cp15.c9_pmcnten &= ~value;
1036 }
1037
1038 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1039 uint64_t value)
1040 {
1041 env->cp15.c9_pmovsr &= ~value;
1042 }
1043
1044 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1045 uint64_t value)
1046 {
1047 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1048 * PMSELR value is equal to or greater than the number of implemented
1049 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1050 */
1051 if (env->cp15.c9_pmselr == 0x1f) {
1052 pmccfiltr_write(env, ri, value);
1053 }
1054 }
1055
1056 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1057 {
1058 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1059 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write().
1060 */
1061 if (env->cp15.c9_pmselr == 0x1f) {
1062 return env->cp15.pmccfiltr_el0;
1063 } else {
1064 return 0;
1065 }
1066 }
1067
1068 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1069 uint64_t value)
1070 {
1071 env->cp15.c9_pmuserenr = value & 1;
1072 }
1073
1074 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1075 uint64_t value)
1076 {
1077 /* We have no event counters so only the C bit can be changed */
1078 value &= (1 << 31);
1079 env->cp15.c9_pminten |= value;
1080 }
1081
1082 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1083 uint64_t value)
1084 {
1085 value &= (1 << 31);
1086 env->cp15.c9_pminten &= ~value;
1087 }
1088
1089 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1090 uint64_t value)
1091 {
1092 /* Note that even though the AArch64 view of this register has bits
1093 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1094 * architectural requirements for bits which are RES0 only in some
1095 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1096 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1097 */
1098 raw_write(env, ri, value & ~0x1FULL);
1099 }
1100
1101 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1102 {
1103 /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1104 * For bits that vary between AArch32/64, code needs to check the
1105 * current execution mode before directly using the feature bit.
1106 */
1107 uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
1108
1109 if (!arm_feature(env, ARM_FEATURE_EL2)) {
1110 valid_mask &= ~SCR_HCE;
1111
1112 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1113 * supported if EL2 exists. The bit is UNK/SBZP when
1114 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1115 * when EL2 is unavailable.
1116 * On ARMv8, this bit is always available.
1117 */
1118 if (arm_feature(env, ARM_FEATURE_V7) &&
1119 !arm_feature(env, ARM_FEATURE_V8)) {
1120 valid_mask &= ~SCR_SMD;
1121 }
1122 }
1123
1124 /* Clear all-context RES0 bits. */
1125 value &= valid_mask;
1126 raw_write(env, ri, value);
1127 }
1128
1129 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1130 {
1131 ARMCPU *cpu = arm_env_get_cpu(env);
1132
1133 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1134 * bank
1135 */
1136 uint32_t index = A32_BANKED_REG_GET(env, csselr,
1137 ri->secure & ARM_CP_SECSTATE_S);
1138
1139 return cpu->ccsidr[index];
1140 }
1141
1142 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1143 uint64_t value)
1144 {
1145 raw_write(env, ri, value & 0xf);
1146 }
1147
1148 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1149 {
1150 CPUState *cs = ENV_GET_CPU(env);
1151 uint64_t ret = 0;
1152
1153 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1154 ret |= CPSR_I;
1155 }
1156 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1157 ret |= CPSR_F;
1158 }
1159 /* External aborts are not possible in QEMU so A bit is always clear */
1160 return ret;
1161 }
1162
1163 static const ARMCPRegInfo v7_cp_reginfo[] = {
1164 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1165 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1166 .access = PL1_W, .type = ARM_CP_NOP },
1167 /* Performance monitors are implementation defined in v7,
1168 * but with an ARM recommended set of registers, which we
1169 * follow (although we don't actually implement any counters)
1170 *
1171 * Performance registers fall into three categories:
1172 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1173 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1174 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1175 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1176 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1177 */
1178 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1179 .access = PL0_RW, .type = ARM_CP_ALIAS,
1180 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1181 .writefn = pmcntenset_write,
1182 .accessfn = pmreg_access,
1183 .raw_writefn = raw_write },
1184 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1185 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1186 .access = PL0_RW, .accessfn = pmreg_access,
1187 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1188 .writefn = pmcntenset_write, .raw_writefn = raw_write },
1189 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1190 .access = PL0_RW,
1191 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1192 .accessfn = pmreg_access,
1193 .writefn = pmcntenclr_write,
1194 .type = ARM_CP_ALIAS },
1195 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1196 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1197 .access = PL0_RW, .accessfn = pmreg_access,
1198 .type = ARM_CP_ALIAS,
1199 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1200 .writefn = pmcntenclr_write },
1201 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1202 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1203 .accessfn = pmreg_access,
1204 .writefn = pmovsr_write,
1205 .raw_writefn = raw_write },
1206 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1207 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1208 .access = PL0_RW, .accessfn = pmreg_access,
1209 .type = ARM_CP_ALIAS,
1210 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1211 .writefn = pmovsr_write,
1212 .raw_writefn = raw_write },
1213 /* Unimplemented so WI. */
1214 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1215 .access = PL0_W, .accessfn = pmreg_access, .type = ARM_CP_NOP },
1216 #ifndef CONFIG_USER_ONLY
1217 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1218 .access = PL0_RW, .type = ARM_CP_ALIAS,
1219 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
1220 .accessfn = pmreg_access, .writefn = pmselr_write,
1221 .raw_writefn = raw_write},
1222 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1223 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
1224 .access = PL0_RW, .accessfn = pmreg_access,
1225 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1226 .writefn = pmselr_write, .raw_writefn = raw_write, },
1227 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1228 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
1229 .readfn = pmccntr_read, .writefn = pmccntr_write32,
1230 .accessfn = pmreg_access },
1231 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1232 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1233 .access = PL0_RW, .accessfn = pmreg_access,
1234 .type = ARM_CP_IO,
1235 .readfn = pmccntr_read, .writefn = pmccntr_write, },
1236 #endif
1237 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1238 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1239 .writefn = pmccfiltr_write,
1240 .access = PL0_RW, .accessfn = pmreg_access,
1241 .type = ARM_CP_IO,
1242 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1243 .resetvalue = 0, },
1244 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1245 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1246 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1247 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1248 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
1249 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1250 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1251 /* Unimplemented, RAZ/WI. */
1252 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1253 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1254 .accessfn = pmreg_access },
1255 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1256 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1257 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1258 .resetvalue = 0,
1259 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1260 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1261 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1262 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1263 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1264 .resetvalue = 0,
1265 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1266 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1267 .access = PL1_RW, .accessfn = access_tpm,
1268 .type = ARM_CP_ALIAS,
1269 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
1270 .resetvalue = 0,
1271 .writefn = pmintenset_write, .raw_writefn = raw_write },
1272 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
1273 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
1274 .access = PL1_RW, .accessfn = access_tpm,
1275 .type = ARM_CP_IO,
1276 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1277 .writefn = pmintenset_write, .raw_writefn = raw_write,
1278 .resetvalue = 0x0 },
1279 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1280 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1281 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1282 .writefn = pmintenclr_write, },
1283 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1284 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1285 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1286 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1287 .writefn = pmintenclr_write },
1288 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1289 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
1290 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
1291 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1292 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1293 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1294 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1295 offsetof(CPUARMState, cp15.csselr_ns) } },
1296 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1297 * just RAZ for all cores:
1298 */
1299 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1300 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1301 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1302 /* Auxiliary fault status registers: these also are IMPDEF, and we
1303 * choose to RAZ/WI for all cores.
1304 */
1305 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1306 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1307 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1308 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1309 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1310 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1311 /* MAIR can just read-as-written because we don't implement caches
1312 * and so don't need to care about memory attributes.
1313 */
1314 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1315 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1316 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1317 .resetvalue = 0 },
1318 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
1319 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1320 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1321 .resetvalue = 0 },
1322 /* For non-long-descriptor page tables these are PRRR and NMRR;
1323 * regardless they still act as reads-as-written for QEMU.
1324 */
1325 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1326 * allows them to assign the correct fieldoffset based on the endianness
1327 * handled in the field definitions.
1328 */
1329 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1330 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1331 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1332 offsetof(CPUARMState, cp15.mair0_ns) },
1333 .resetfn = arm_cp_reset_ignore },
1334 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1335 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1336 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1337 offsetof(CPUARMState, cp15.mair1_ns) },
1338 .resetfn = arm_cp_reset_ignore },
1339 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1340 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1341 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1342 /* 32 bit ITLB invalidates */
1343 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1344 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1345 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1346 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1347 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1348 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1349 /* 32 bit DTLB invalidates */
1350 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1351 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1352 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1353 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1354 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1355 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1356 /* 32 bit TLB invalidates */
1357 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1358 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1359 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1360 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1361 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1362 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1363 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1364 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1365 REGINFO_SENTINEL
1366 };
1367
1368 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1369 /* 32 bit TLB invalidates, Inner Shareable */
1370 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1371 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1372 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1373 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1374 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1375 .type = ARM_CP_NO_RAW, .access = PL1_W,
1376 .writefn = tlbiasid_is_write },
1377 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1378 .type = ARM_CP_NO_RAW, .access = PL1_W,
1379 .writefn = tlbimvaa_is_write },
1380 REGINFO_SENTINEL
1381 };
1382
1383 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1384 uint64_t value)
1385 {
1386 value &= 1;
1387 env->teecr = value;
1388 }
1389
1390 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1391 bool isread)
1392 {
1393 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1394 return CP_ACCESS_TRAP;
1395 }
1396 return CP_ACCESS_OK;
1397 }
1398
1399 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1400 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1401 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1402 .resetvalue = 0,
1403 .writefn = teecr_write },
1404 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1405 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1406 .accessfn = teehbr_access, .resetvalue = 0 },
1407 REGINFO_SENTINEL
1408 };
1409
1410 static const ARMCPRegInfo v6k_cp_reginfo[] = {
1411 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1412 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1413 .access = PL0_RW,
1414 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1415 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1416 .access = PL0_RW,
1417 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1418 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1419 .resetfn = arm_cp_reset_ignore },
1420 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1421 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1422 .access = PL0_R|PL1_W,
1423 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1424 .resetvalue = 0},
1425 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1426 .access = PL0_R|PL1_W,
1427 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1428 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1429 .resetfn = arm_cp_reset_ignore },
1430 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1431 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1432 .access = PL1_RW,
1433 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1434 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1435 .access = PL1_RW,
1436 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1437 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1438 .resetvalue = 0 },
1439 REGINFO_SENTINEL
1440 };
1441
1442 #ifndef CONFIG_USER_ONLY
1443
1444 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1445 bool isread)
1446 {
1447 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1448 * Writable only at the highest implemented exception level.
1449 */
1450 int el = arm_current_el(env);
1451
1452 switch (el) {
1453 case 0:
1454 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
1455 return CP_ACCESS_TRAP;
1456 }
1457 break;
1458 case 1:
1459 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1460 arm_is_secure_below_el3(env)) {
1461 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1462 return CP_ACCESS_TRAP_UNCATEGORIZED;
1463 }
1464 break;
1465 case 2:
1466 case 3:
1467 break;
1468 }
1469
1470 if (!isread && el < arm_highest_el(env)) {
1471 return CP_ACCESS_TRAP_UNCATEGORIZED;
1472 }
1473
1474 return CP_ACCESS_OK;
1475 }
1476
1477 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1478 bool isread)
1479 {
1480 unsigned int cur_el = arm_current_el(env);
1481 bool secure = arm_is_secure(env);
1482
1483 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1484 if (cur_el == 0 &&
1485 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1486 return CP_ACCESS_TRAP;
1487 }
1488
1489 if (arm_feature(env, ARM_FEATURE_EL2) &&
1490 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1491 !extract32(env->cp15.cnthctl_el2, 0, 1)) {
1492 return CP_ACCESS_TRAP_EL2;
1493 }
1494 return CP_ACCESS_OK;
1495 }
1496
1497 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1498 bool isread)
1499 {
1500 unsigned int cur_el = arm_current_el(env);
1501 bool secure = arm_is_secure(env);
1502
1503 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1504 * EL0[PV]TEN is zero.
1505 */
1506 if (cur_el == 0 &&
1507 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1508 return CP_ACCESS_TRAP;
1509 }
1510
1511 if (arm_feature(env, ARM_FEATURE_EL2) &&
1512 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1513 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
1514 return CP_ACCESS_TRAP_EL2;
1515 }
1516 return CP_ACCESS_OK;
1517 }
1518
1519 static CPAccessResult gt_pct_access(CPUARMState *env,
1520 const ARMCPRegInfo *ri,
1521 bool isread)
1522 {
1523 return gt_counter_access(env, GTIMER_PHYS, isread);
1524 }
1525
1526 static CPAccessResult gt_vct_access(CPUARMState *env,
1527 const ARMCPRegInfo *ri,
1528 bool isread)
1529 {
1530 return gt_counter_access(env, GTIMER_VIRT, isread);
1531 }
1532
1533 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1534 bool isread)
1535 {
1536 return gt_timer_access(env, GTIMER_PHYS, isread);
1537 }
1538
1539 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1540 bool isread)
1541 {
1542 return gt_timer_access(env, GTIMER_VIRT, isread);
1543 }
1544
1545 static CPAccessResult gt_stimer_access(CPUARMState *env,
1546 const ARMCPRegInfo *ri,
1547 bool isread)
1548 {
1549 /* The AArch64 register view of the secure physical timer is
1550 * always accessible from EL3, and configurably accessible from
1551 * Secure EL1.
1552 */
1553 switch (arm_current_el(env)) {
1554 case 1:
1555 if (!arm_is_secure(env)) {
1556 return CP_ACCESS_TRAP;
1557 }
1558 if (!(env->cp15.scr_el3 & SCR_ST)) {
1559 return CP_ACCESS_TRAP_EL3;
1560 }
1561 return CP_ACCESS_OK;
1562 case 0:
1563 case 2:
1564 return CP_ACCESS_TRAP;
1565 case 3:
1566 return CP_ACCESS_OK;
1567 default:
1568 g_assert_not_reached();
1569 }
1570 }
1571
1572 static uint64_t gt_get_countervalue(CPUARMState *env)
1573 {
1574 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1575 }
1576
1577 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1578 {
1579 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1580
1581 if (gt->ctl & 1) {
1582 /* Timer enabled: calculate and set current ISTATUS, irq, and
1583 * reset timer to when ISTATUS next has to change
1584 */
1585 uint64_t offset = timeridx == GTIMER_VIRT ?
1586 cpu->env.cp15.cntvoff_el2 : 0;
1587 uint64_t count = gt_get_countervalue(&cpu->env);
1588 /* Note that this must be unsigned 64 bit arithmetic: */
1589 int istatus = count - offset >= gt->cval;
1590 uint64_t nexttick;
1591 int irqstate;
1592
1593 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1594
1595 irqstate = (istatus && !(gt->ctl & 2));
1596 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1597
1598 if (istatus) {
1599 /* Next transition is when count rolls back over to zero */
1600 nexttick = UINT64_MAX;
1601 } else {
1602 /* Next transition is when we hit cval */
1603 nexttick = gt->cval + offset;
1604 }
1605 /* Note that the desired next expiry time might be beyond the
1606 * signed-64-bit range of a QEMUTimer -- in this case we just
1607 * set the timer for as far in the future as possible. When the
1608 * timer expires we will reset the timer for any remaining period.
1609 */
1610 if (nexttick > INT64_MAX / GTIMER_SCALE) {
1611 nexttick = INT64_MAX / GTIMER_SCALE;
1612 }
1613 timer_mod(cpu->gt_timer[timeridx], nexttick);
1614 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
1615 } else {
1616 /* Timer disabled: ISTATUS and timer output always clear */
1617 gt->ctl &= ~4;
1618 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1619 timer_del(cpu->gt_timer[timeridx]);
1620 trace_arm_gt_recalc_disabled(timeridx);
1621 }
1622 }
1623
1624 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1625 int timeridx)
1626 {
1627 ARMCPU *cpu = arm_env_get_cpu(env);
1628
1629 timer_del(cpu->gt_timer[timeridx]);
1630 }
1631
1632 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1633 {
1634 return gt_get_countervalue(env);
1635 }
1636
1637 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1638 {
1639 return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
1640 }
1641
1642 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1643 int timeridx,
1644 uint64_t value)
1645 {
1646 trace_arm_gt_cval_write(timeridx, value);
1647 env->cp15.c14_timer[timeridx].cval = value;
1648 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1649 }
1650
1651 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1652 int timeridx)
1653 {
1654 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1655
1656 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1657 (gt_get_countervalue(env) - offset));
1658 }
1659
1660 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1661 int timeridx,
1662 uint64_t value)
1663 {
1664 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1665
1666 trace_arm_gt_tval_write(timeridx, value);
1667 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1668 sextract64(value, 0, 32);
1669 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1670 }
1671
1672 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1673 int timeridx,
1674 uint64_t value)
1675 {
1676 ARMCPU *cpu = arm_env_get_cpu(env);
1677 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1678
1679 trace_arm_gt_ctl_write(timeridx, value);
1680 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1681 if ((oldval ^ value) & 1) {
1682 /* Enable toggled */
1683 gt_recalc_timer(cpu, timeridx);
1684 } else if ((oldval ^ value) & 2) {
1685 /* IMASK toggled: don't need to recalculate,
1686 * just set the interrupt line based on ISTATUS
1687 */
1688 int irqstate = (oldval & 4) && !(value & 2);
1689
1690 trace_arm_gt_imask_toggle(timeridx, irqstate);
1691 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1692 }
1693 }
1694
1695 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1696 {
1697 gt_timer_reset(env, ri, GTIMER_PHYS);
1698 }
1699
1700 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1701 uint64_t value)
1702 {
1703 gt_cval_write(env, ri, GTIMER_PHYS, value);
1704 }
1705
1706 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1707 {
1708 return gt_tval_read(env, ri, GTIMER_PHYS);
1709 }
1710
1711 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1712 uint64_t value)
1713 {
1714 gt_tval_write(env, ri, GTIMER_PHYS, value);
1715 }
1716
1717 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1718 uint64_t value)
1719 {
1720 gt_ctl_write(env, ri, GTIMER_PHYS, value);
1721 }
1722
1723 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1724 {
1725 gt_timer_reset(env, ri, GTIMER_VIRT);
1726 }
1727
1728 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1729 uint64_t value)
1730 {
1731 gt_cval_write(env, ri, GTIMER_VIRT, value);
1732 }
1733
1734 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1735 {
1736 return gt_tval_read(env, ri, GTIMER_VIRT);
1737 }
1738
1739 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1740 uint64_t value)
1741 {
1742 gt_tval_write(env, ri, GTIMER_VIRT, value);
1743 }
1744
1745 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1746 uint64_t value)
1747 {
1748 gt_ctl_write(env, ri, GTIMER_VIRT, value);
1749 }
1750
1751 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1752 uint64_t value)
1753 {
1754 ARMCPU *cpu = arm_env_get_cpu(env);
1755
1756 trace_arm_gt_cntvoff_write(value);
1757 raw_write(env, ri, value);
1758 gt_recalc_timer(cpu, GTIMER_VIRT);
1759 }
1760
1761 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1762 {
1763 gt_timer_reset(env, ri, GTIMER_HYP);
1764 }
1765
1766 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1767 uint64_t value)
1768 {
1769 gt_cval_write(env, ri, GTIMER_HYP, value);
1770 }
1771
1772 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1773 {
1774 return gt_tval_read(env, ri, GTIMER_HYP);
1775 }
1776
1777 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1778 uint64_t value)
1779 {
1780 gt_tval_write(env, ri, GTIMER_HYP, value);
1781 }
1782
1783 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1784 uint64_t value)
1785 {
1786 gt_ctl_write(env, ri, GTIMER_HYP, value);
1787 }
1788
1789 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1790 {
1791 gt_timer_reset(env, ri, GTIMER_SEC);
1792 }
1793
1794 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1795 uint64_t value)
1796 {
1797 gt_cval_write(env, ri, GTIMER_SEC, value);
1798 }
1799
1800 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1801 {
1802 return gt_tval_read(env, ri, GTIMER_SEC);
1803 }
1804
1805 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1806 uint64_t value)
1807 {
1808 gt_tval_write(env, ri, GTIMER_SEC, value);
1809 }
1810
1811 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1812 uint64_t value)
1813 {
1814 gt_ctl_write(env, ri, GTIMER_SEC, value);
1815 }
1816
1817 void arm_gt_ptimer_cb(void *opaque)
1818 {
1819 ARMCPU *cpu = opaque;
1820
1821 gt_recalc_timer(cpu, GTIMER_PHYS);
1822 }
1823
1824 void arm_gt_vtimer_cb(void *opaque)
1825 {
1826 ARMCPU *cpu = opaque;
1827
1828 gt_recalc_timer(cpu, GTIMER_VIRT);
1829 }
1830
1831 void arm_gt_htimer_cb(void *opaque)
1832 {
1833 ARMCPU *cpu = opaque;
1834
1835 gt_recalc_timer(cpu, GTIMER_HYP);
1836 }
1837
1838 void arm_gt_stimer_cb(void *opaque)
1839 {
1840 ARMCPU *cpu = opaque;
1841
1842 gt_recalc_timer(cpu, GTIMER_SEC);
1843 }
1844
1845 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1846 /* Note that CNTFRQ is purely reads-as-written for the benefit
1847 * of software; writing it doesn't actually change the timer frequency.
1848 * Our reset value matches the fixed frequency we implement the timer at.
1849 */
1850 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1851 .type = ARM_CP_ALIAS,
1852 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1853 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1854 },
1855 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1856 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1857 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1858 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1859 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
1860 },
1861 /* overall control: mostly access permissions */
1862 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
1863 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
1864 .access = PL1_RW,
1865 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
1866 .resetvalue = 0,
1867 },
1868 /* per-timer control */
1869 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1870 .secure = ARM_CP_SECSTATE_NS,
1871 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1872 .accessfn = gt_ptimer_access,
1873 .fieldoffset = offsetoflow32(CPUARMState,
1874 cp15.c14_timer[GTIMER_PHYS].ctl),
1875 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1876 },
1877 { .name = "CNTP_CTL(S)",
1878 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1879 .secure = ARM_CP_SECSTATE_S,
1880 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1881 .accessfn = gt_ptimer_access,
1882 .fieldoffset = offsetoflow32(CPUARMState,
1883 cp15.c14_timer[GTIMER_SEC].ctl),
1884 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
1885 },
1886 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
1887 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
1888 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1889 .accessfn = gt_ptimer_access,
1890 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
1891 .resetvalue = 0,
1892 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1893 },
1894 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
1895 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1896 .accessfn = gt_vtimer_access,
1897 .fieldoffset = offsetoflow32(CPUARMState,
1898 cp15.c14_timer[GTIMER_VIRT].ctl),
1899 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1900 },
1901 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
1902 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
1903 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1904 .accessfn = gt_vtimer_access,
1905 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
1906 .resetvalue = 0,
1907 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1908 },
1909 /* TimerValue views: a 32 bit downcounting view of the underlying state */
1910 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1911 .secure = ARM_CP_SECSTATE_NS,
1912 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1913 .accessfn = gt_ptimer_access,
1914 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1915 },
1916 { .name = "CNTP_TVAL(S)",
1917 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1918 .secure = ARM_CP_SECSTATE_S,
1919 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1920 .accessfn = gt_ptimer_access,
1921 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
1922 },
1923 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1924 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
1925 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1926 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
1927 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1928 },
1929 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
1930 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1931 .accessfn = gt_vtimer_access,
1932 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
1933 },
1934 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1935 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
1936 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1937 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
1938 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
1939 },
1940 /* The counter itself */
1941 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
1942 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1943 .accessfn = gt_pct_access,
1944 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
1945 },
1946 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
1947 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
1948 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1949 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
1950 },
1951 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
1952 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1953 .accessfn = gt_vct_access,
1954 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
1955 },
1956 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
1957 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
1958 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1959 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
1960 },
1961 /* Comparison value, indicating when the timer goes off */
1962 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
1963 .secure = ARM_CP_SECSTATE_NS,
1964 .access = PL1_RW | PL0_R,
1965 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1966 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1967 .accessfn = gt_ptimer_access,
1968 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
1969 },
1970 { .name = "CNTP_CVAL(S)", .cp = 15, .crm = 14, .opc1 = 2,
1971 .secure = ARM_CP_SECSTATE_S,
1972 .access = PL1_RW | PL0_R,
1973 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1974 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
1975 .accessfn = gt_ptimer_access,
1976 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
1977 },
1978 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1979 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
1980 .access = PL1_RW | PL0_R,
1981 .type = ARM_CP_IO,
1982 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1983 .resetvalue = 0, .accessfn = gt_ptimer_access,
1984 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
1985 },
1986 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
1987 .access = PL1_RW | PL0_R,
1988 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1989 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1990 .accessfn = gt_vtimer_access,
1991 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
1992 },
1993 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1994 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
1995 .access = PL1_RW | PL0_R,
1996 .type = ARM_CP_IO,
1997 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1998 .resetvalue = 0, .accessfn = gt_vtimer_access,
1999 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2000 },
2001 /* Secure timer -- this is actually restricted to only EL3
2002 * and configurably Secure-EL1 via the accessfn.
2003 */
2004 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2005 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2006 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2007 .accessfn = gt_stimer_access,
2008 .readfn = gt_sec_tval_read,
2009 .writefn = gt_sec_tval_write,
2010 .resetfn = gt_sec_timer_reset,
2011 },
2012 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2013 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2014 .type = ARM_CP_IO, .access = PL1_RW,
2015 .accessfn = gt_stimer_access,
2016 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2017 .resetvalue = 0,
2018 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2019 },
2020 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2021 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2022 .type = ARM_CP_IO, .access = PL1_RW,
2023 .accessfn = gt_stimer_access,
2024 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2025 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2026 },
2027 REGINFO_SENTINEL
2028 };
2029
2030 #else
2031 /* In user-mode none of the generic timer registers are accessible,
2032 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
2033 * so instead just don't register any of them.
2034 */
2035 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2036 REGINFO_SENTINEL
2037 };
2038
2039 #endif
2040
2041 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2042 {
2043 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2044 raw_write(env, ri, value);
2045 } else if (arm_feature(env, ARM_FEATURE_V7)) {
2046 raw_write(env, ri, value & 0xfffff6ff);
2047 } else {
2048 raw_write(env, ri, value & 0xfffff1ff);
2049 }
2050 }
2051
2052 #ifndef CONFIG_USER_ONLY
2053 /* get_phys_addr() isn't present for user-mode-only targets */
2054
2055 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2056 bool isread)
2057 {
2058 if (ri->opc2 & 4) {
2059 /* The ATS12NSO* operations must trap to EL3 if executed in
2060 * Secure EL1 (which can only happen if EL3 is AArch64).
2061 * They are simply UNDEF if executed from NS EL1.
2062 * They function normally from EL2 or EL3.
2063 */
2064 if (arm_current_el(env) == 1) {
2065 if (arm_is_secure_below_el3(env)) {
2066 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2067 }
2068 return CP_ACCESS_TRAP_UNCATEGORIZED;
2069 }
2070 }
2071 return CP_ACCESS_OK;
2072 }
2073
2074 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2075 int access_type, ARMMMUIdx mmu_idx)
2076 {
2077 hwaddr phys_addr;
2078 target_ulong page_size;
2079 int prot;
2080 uint32_t fsr;
2081 bool ret;
2082 uint64_t par64;
2083 MemTxAttrs attrs = {};
2084 ARMMMUFaultInfo fi = {};
2085
2086 ret = get_phys_addr(env, value, access_type, mmu_idx,
2087 &phys_addr, &attrs, &prot, &page_size, &fsr, &fi);
2088 if (extended_addresses_enabled(env)) {
2089 /* fsr is a DFSR/IFSR value for the long descriptor
2090 * translation table format, but with WnR always clear.
2091 * Convert it to a 64-bit PAR.
2092 */
2093 par64 = (1 << 11); /* LPAE bit always set */
2094 if (!ret) {
2095 par64 |= phys_addr & ~0xfffULL;
2096 if (!attrs.secure) {
2097 par64 |= (1 << 9); /* NS */
2098 }
2099 /* We don't set the ATTR or SH fields in the PAR. */
2100 } else {
2101 par64 |= 1; /* F */
2102 par64 |= (fsr & 0x3f) << 1; /* FS */
2103 /* Note that S2WLK and FSTAGE are always zero, because we don't
2104 * implement virtualization and therefore there can't be a stage 2
2105 * fault.
2106 */
2107 }
2108 } else {
2109 /* fsr is a DFSR/IFSR value for the short descriptor
2110 * translation table format (with WnR always clear).
2111 * Convert it to a 32-bit PAR.
2112 */
2113 if (!ret) {
2114 /* We do not set any attribute bits in the PAR */
2115 if (page_size == (1 << 24)
2116 && arm_feature(env, ARM_FEATURE_V7)) {
2117 par64 = (phys_addr & 0xff000000) | (1 << 1);
2118 } else {
2119 par64 = phys_addr & 0xfffff000;
2120 }
2121 if (!attrs.secure) {
2122 par64 |= (1 << 9); /* NS */
2123 }
2124 } else {
2125 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
2126 ((fsr & 0xf) << 1) | 1;
2127 }
2128 }
2129 return par64;
2130 }
2131
2132 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2133 {
2134 int access_type = ri->opc2 & 1;
2135 uint64_t par64;
2136 ARMMMUIdx mmu_idx;
2137 int el = arm_current_el(env);
2138 bool secure = arm_is_secure_below_el3(env);
2139
2140 switch (ri->opc2 & 6) {
2141 case 0:
2142 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2143 switch (el) {
2144 case 3:
2145 mmu_idx = ARMMMUIdx_S1E3;
2146 break;
2147 case 2:
2148 mmu_idx = ARMMMUIdx_S1NSE1;
2149 break;
2150 case 1:
2151 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2152 break;
2153 default:
2154 g_assert_not_reached();
2155 }
2156 break;
2157 case 2:
2158 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2159 switch (el) {
2160 case 3:
2161 mmu_idx = ARMMMUIdx_S1SE0;
2162 break;
2163 case 2:
2164 mmu_idx = ARMMMUIdx_S1NSE0;
2165 break;
2166 case 1:
2167 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2168 break;
2169 default:
2170 g_assert_not_reached();
2171 }
2172 break;
2173 case 4:
2174 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2175 mmu_idx = ARMMMUIdx_S12NSE1;
2176 break;
2177 case 6:
2178 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2179 mmu_idx = ARMMMUIdx_S12NSE0;
2180 break;
2181 default:
2182 g_assert_not_reached();
2183 }
2184
2185 par64 = do_ats_write(env, value, access_type, mmu_idx);
2186
2187 A32_BANKED_CURRENT_REG_SET(env, par, par64);
2188 }
2189
2190 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
2191 uint64_t value)
2192 {
2193 int access_type = ri->opc2 & 1;
2194 uint64_t par64;
2195
2196 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
2197
2198 A32_BANKED_CURRENT_REG_SET(env, par, par64);
2199 }
2200
2201 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
2202 bool isread)
2203 {
2204 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
2205 return CP_ACCESS_TRAP;
2206 }
2207 return CP_ACCESS_OK;
2208 }
2209
2210 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
2211 uint64_t value)
2212 {
2213 int access_type = ri->opc2 & 1;
2214 ARMMMUIdx mmu_idx;
2215 int secure = arm_is_secure_below_el3(env);
2216
2217 switch (ri->opc2 & 6) {
2218 case 0:
2219 switch (ri->opc1) {
2220 case 0: /* AT S1E1R, AT S1E1W */
2221 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2222 break;
2223 case 4: /* AT S1E2R, AT S1E2W */
2224 mmu_idx = ARMMMUIdx_S1E2;
2225 break;
2226 case 6: /* AT S1E3R, AT S1E3W */
2227 mmu_idx = ARMMMUIdx_S1E3;
2228 break;
2229 default:
2230 g_assert_not_reached();
2231 }
2232 break;
2233 case 2: /* AT S1E0R, AT S1E0W */
2234 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2235 break;
2236 case 4: /* AT S12E1R, AT S12E1W */
2237 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
2238 break;
2239 case 6: /* AT S12E0R, AT S12E0W */
2240 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
2241 break;
2242 default:
2243 g_assert_not_reached();
2244 }
2245
2246 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
2247 }
2248 #endif
2249
2250 static const ARMCPRegInfo vapa_cp_reginfo[] = {
2251 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
2252 .access = PL1_RW, .resetvalue = 0,
2253 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
2254 offsetoflow32(CPUARMState, cp15.par_ns) },
2255 .writefn = par_write },
2256 #ifndef CONFIG_USER_ONLY
2257 /* This underdecoding is safe because the reginfo is NO_RAW. */
2258 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
2259 .access = PL1_W, .accessfn = ats_access,
2260 .writefn = ats_write, .type = ARM_CP_NO_RAW },
2261 #endif
2262 REGINFO_SENTINEL
2263 };
2264
2265 /* Return basic MPU access permission bits. */
2266 static uint32_t simple_mpu_ap_bits(uint32_t val)
2267 {
2268 uint32_t ret;
2269 uint32_t mask;
2270 int i;
2271 ret = 0;
2272 mask = 3;
2273 for (i = 0; i < 16; i += 2) {
2274 ret |= (val >> i) & mask;
2275 mask <<= 2;
2276 }
2277 return ret;
2278 }
2279
2280 /* Pad basic MPU access permission bits to extended format. */
2281 static uint32_t extended_mpu_ap_bits(uint32_t val)
2282 {
2283 uint32_t ret;
2284 uint32_t mask;
2285 int i;
2286 ret = 0;
2287 mask = 3;
2288 for (i = 0; i < 16; i += 2) {
2289 ret |= (val & mask) << i;
2290 mask <<= 2;
2291 }
2292 return ret;
2293 }
2294
2295 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2296 uint64_t value)
2297 {
2298 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2299 }
2300
2301 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2302 {
2303 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2304 }
2305
2306 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2307 uint64_t value)
2308 {
2309 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2310 }
2311
2312 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2313 {
2314 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2315 }
2316
2317 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2318 {
2319 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2320
2321 if (!u32p) {
2322 return 0;
2323 }
2324
2325 u32p += env->cp15.c6_rgnr;
2326 return *u32p;
2327 }
2328
2329 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2330 uint64_t value)
2331 {
2332 ARMCPU *cpu = arm_env_get_cpu(env);
2333 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2334
2335 if (!u32p) {
2336 return;
2337 }
2338
2339 u32p += env->cp15.c6_rgnr;
2340 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2341 *u32p = value;
2342 }
2343
2344 static void pmsav7_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2345 {
2346 ARMCPU *cpu = arm_env_get_cpu(env);
2347 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2348
2349 if (!u32p) {
2350 return;
2351 }
2352
2353 memset(u32p, 0, sizeof(*u32p) * cpu->pmsav7_dregion);
2354 }
2355
2356 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2357 uint64_t value)
2358 {
2359 ARMCPU *cpu = arm_env_get_cpu(env);
2360 uint32_t nrgs = cpu->pmsav7_dregion;
2361
2362 if (value >= nrgs) {
2363 qemu_log_mask(LOG_GUEST_ERROR,
2364 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2365 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2366 return;
2367 }
2368
2369 raw_write(env, ri, value);
2370 }
2371
2372 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2373 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2374 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2375 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2376 .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2377 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2378 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2379 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2380 .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2381 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2382 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2383 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2384 .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2385 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2386 .access = PL1_RW,
2387 .fieldoffset = offsetof(CPUARMState, cp15.c6_rgnr),
2388 .writefn = pmsav7_rgnr_write },
2389 REGINFO_SENTINEL
2390 };
2391
2392 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2393 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2394 .access = PL1_RW, .type = ARM_CP_ALIAS,
2395 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2396 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2397 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2398 .access = PL1_RW, .type = ARM_CP_ALIAS,
2399 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2400 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2401 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2402 .access = PL1_RW,
2403 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2404 .resetvalue = 0, },
2405 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2406 .access = PL1_RW,
2407 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2408 .resetvalue = 0, },
2409 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2410 .access = PL1_RW,
2411 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2412 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2413 .access = PL1_RW,
2414 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2415 /* Protection region base and size registers */
2416 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2417 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2418 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2419 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2420 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2421 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2422 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2423 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2424 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2425 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2426 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2427 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2428 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2429 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2430 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2431 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2432 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2433 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2434 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2435 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2436 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2437 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2438 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2439 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2440 REGINFO_SENTINEL
2441 };
2442
2443 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
2444 uint64_t value)
2445 {
2446 TCR *tcr = raw_ptr(env, ri);
2447 int maskshift = extract32(value, 0, 3);
2448
2449 if (!arm_feature(env, ARM_FEATURE_V8)) {
2450 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2451 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2452 * using Long-desciptor translation table format */
2453 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2454 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2455 /* In an implementation that includes the Security Extensions
2456 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2457 * Short-descriptor translation table format.
2458 */
2459 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2460 } else {
2461 value &= TTBCR_N;
2462 }
2463 }
2464
2465 /* Update the masks corresponding to the TCR bank being written
2466 * Note that we always calculate mask and base_mask, but
2467 * they are only used for short-descriptor tables (ie if EAE is 0);
2468 * for long-descriptor tables the TCR fields are used differently
2469 * and the mask and base_mask values are meaningless.
2470 */
2471 tcr->raw_tcr = value;
2472 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
2473 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
2474 }
2475
2476 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2477 uint64_t value)
2478 {
2479 ARMCPU *cpu = arm_env_get_cpu(env);
2480
2481 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2482 /* With LPAE the TTBCR could result in a change of ASID
2483 * via the TTBCR.A1 bit, so do a TLB flush.
2484 */
2485 tlb_flush(CPU(cpu));
2486 }
2487 vmsa_ttbcr_raw_write(env, ri, value);
2488 }
2489
2490 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2491 {
2492 TCR *tcr = raw_ptr(env, ri);
2493
2494 /* Reset both the TCR as well as the masks corresponding to the bank of
2495 * the TCR being reset.
2496 */
2497 tcr->raw_tcr = 0;
2498 tcr->mask = 0;
2499 tcr->base_mask = 0xffffc000u;
2500 }
2501
2502 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2503 uint64_t value)
2504 {
2505 ARMCPU *cpu = arm_env_get_cpu(env);
2506 TCR *tcr = raw_ptr(env, ri);
2507
2508 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2509 tlb_flush(CPU(cpu));
2510 tcr->raw_tcr = value;
2511 }
2512
2513 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2514 uint64_t value)
2515 {
2516 /* 64 bit accesses to the TTBRs can change the ASID and so we
2517 * must flush the TLB.
2518 */
2519 if (cpreg_field_is_64bit(ri)) {
2520 ARMCPU *cpu = arm_env_get_cpu(env);
2521
2522 tlb_flush(CPU(cpu));
2523 }
2524 raw_write(env, ri, value);
2525 }
2526
2527 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2528 uint64_t value)
2529 {
2530 ARMCPU *cpu = arm_env_get_cpu(env);
2531 CPUState *cs = CPU(cpu);
2532
2533 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
2534 if (raw_read(env, ri) != value) {
2535 tlb_flush_by_mmuidx(cs,
2536 (1 << ARMMMUIdx_S12NSE1) |
2537 (1 << ARMMMUIdx_S12NSE0) |
2538 (1 << ARMMMUIdx_S2NS));
2539 raw_write(env, ri, value);
2540 }
2541 }
2542
2543 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2544 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2545 .access = PL1_RW, .type = ARM_CP_ALIAS,
2546 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2547 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2548 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2549 .access = PL1_RW, .resetvalue = 0,
2550 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2551 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2552 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2553 .access = PL1_RW, .resetvalue = 0,
2554 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2555 offsetof(CPUARMState, cp15.dfar_ns) } },
2556 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2557 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2558 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2559 .resetvalue = 0, },
2560 REGINFO_SENTINEL
2561 };
2562
2563 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2564 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2565 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2566 .access = PL1_RW,
2567 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2568 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2569 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2570 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2571 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2572 offsetof(CPUARMState, cp15.ttbr0_ns) } },
2573 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2574 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2575 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2576 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2577 offsetof(CPUARMState, cp15.ttbr1_ns) } },
2578 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2579 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2580 .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
2581 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2582 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2583 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2584 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2585 .raw_writefn = vmsa_ttbcr_raw_write,
2586 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2587 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2588 REGINFO_SENTINEL
2589 };
2590
2591 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2592 uint64_t value)
2593 {
2594 env->cp15.c15_ticonfig = value & 0xe7;
2595 /* The OS_TYPE bit in this register changes the reported CPUID! */
2596 env->cp15.c0_cpuid = (value & (1 << 5)) ?
2597 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2598 }
2599
2600 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2601 uint64_t value)
2602 {
2603 env->cp15.c15_threadid = value & 0xffff;
2604 }
2605
2606 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2607 uint64_t value)
2608 {
2609 /* Wait-for-interrupt (deprecated) */
2610 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
2611 }
2612
2613 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2614 uint64_t value)
2615 {
2616 /* On OMAP there are registers indicating the max/min index of dcache lines
2617 * containing a dirty line; cache flush operations have to reset these.
2618 */
2619 env->cp15.c15_i_max = 0x000;
2620 env->cp15.c15_i_min = 0xff0;
2621 }
2622
2623 static const ARMCPRegInfo omap_cp_reginfo[] = {
2624 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2625 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2626 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2627 .resetvalue = 0, },
2628 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2629 .access = PL1_RW, .type = ARM_CP_NOP },
2630 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2631 .access = PL1_RW,
2632 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2633 .writefn = omap_ticonfig_write },
2634 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2635 .access = PL1_RW,
2636 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2637 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2638 .access = PL1_RW, .resetvalue = 0xff0,
2639 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2640 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2641 .access = PL1_RW,
2642 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2643 .writefn = omap_threadid_write },
2644 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2645 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2646 .type = ARM_CP_NO_RAW,
2647 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2648 /* TODO: Peripheral port remap register:
2649 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2650 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2651 * when MMU is off.
2652 */
2653 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2654 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2655 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2656 .writefn = omap_cachemaint_write },
2657 { .name = "C9", .cp = 15, .crn = 9,
2658 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2659 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2660 REGINFO_SENTINEL
2661 };
2662
2663 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2664 uint64_t value)
2665 {
2666 env->cp15.c15_cpar = value & 0x3fff;
2667 }
2668
2669 static const ARMCPRegInfo xscale_cp_reginfo[] = {
2670 { .name = "XSCALE_CPAR",
2671 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2672 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2673 .writefn = xscale_cpar_write, },
2674 { .name = "XSCALE_AUXCR",
2675 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2676 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2677 .resetvalue = 0, },
2678 /* XScale specific cache-lockdown: since we have no cache we NOP these
2679 * and hope the guest does not really rely on cache behaviour.
2680 */
2681 { .name = "XSCALE_LOCK_ICACHE_LINE",
2682 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2683 .access = PL1_W, .type = ARM_CP_NOP },
2684 { .name = "XSCALE_UNLOCK_ICACHE",
2685 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2686 .access = PL1_W, .type = ARM_CP_NOP },
2687 { .name = "XSCALE_DCACHE_LOCK",
2688 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2689 .access = PL1_RW, .type = ARM_CP_NOP },
2690 { .name = "XSCALE_UNLOCK_DCACHE",
2691 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2692 .access = PL1_W, .type = ARM_CP_NOP },
2693 REGINFO_SENTINEL
2694 };
2695
2696 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2697 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2698 * implementation of this implementation-defined space.
2699 * Ideally this should eventually disappear in favour of actually
2700 * implementing the correct behaviour for all cores.
2701 */
2702 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2703 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2704 .access = PL1_RW,
2705 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2706 .resetvalue = 0 },
2707 REGINFO_SENTINEL
2708 };
2709
2710 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2711 /* Cache status: RAZ because we have no cache so it's always clean */
2712 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2713 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2714 .resetvalue = 0 },
2715 REGINFO_SENTINEL
2716 };
2717
2718 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2719 /* We never have a a block transfer operation in progress */
2720 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2721 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2722 .resetvalue = 0 },
2723 /* The cache ops themselves: these all NOP for QEMU */
2724 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2725 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2726 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2727 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2728 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2729 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2730 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2731 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2732 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2733 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2734 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2735 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2736 REGINFO_SENTINEL
2737 };
2738
2739 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2740 /* The cache test-and-clean instructions always return (1 << 30)
2741 * to indicate that there are no dirty cache lines.
2742 */
2743 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2744 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2745 .resetvalue = (1 << 30) },
2746 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2747 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2748 .resetvalue = (1 << 30) },
2749 REGINFO_SENTINEL
2750 };
2751
2752 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2753 /* Ignore ReadBuffer accesses */
2754 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
2755 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2756 .access = PL1_RW, .resetvalue = 0,
2757 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
2758 REGINFO_SENTINEL
2759 };
2760
2761 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2762 {
2763 ARMCPU *cpu = arm_env_get_cpu(env);
2764 unsigned int cur_el = arm_current_el(env);
2765 bool secure = arm_is_secure(env);
2766
2767 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2768 return env->cp15.vpidr_el2;
2769 }
2770 return raw_read(env, ri);
2771 }
2772
2773 static uint64_t mpidr_read_val(CPUARMState *env)
2774 {
2775 ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
2776 uint64_t mpidr = cpu->mp_affinity;
2777
2778 if (arm_feature(env, ARM_FEATURE_V7MP)) {
2779 mpidr |= (1U << 31);
2780 /* Cores which are uniprocessor (non-coherent)
2781 * but still implement the MP extensions set
2782 * bit 30. (For instance, Cortex-R5).
2783 */
2784 if (cpu->mp_is_up) {
2785 mpidr |= (1u << 30);
2786 }
2787 }
2788 return mpidr;
2789 }
2790
2791 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2792 {
2793 unsigned int cur_el = arm_current_el(env);
2794 bool secure = arm_is_secure(env);
2795
2796 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2797 return env->cp15.vmpidr_el2;
2798 }
2799 return mpidr_read_val(env);
2800 }
2801
2802 static const ARMCPRegInfo mpidr_cp_reginfo[] = {
2803 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
2804 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
2805 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
2806 REGINFO_SENTINEL
2807 };
2808
2809 static const ARMCPRegInfo lpae_cp_reginfo[] = {
2810 /* NOP AMAIR0/1 */
2811 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
2812 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
2813 .access = PL1_RW, .type = ARM_CP_CONST,
2814 .resetvalue = 0 },
2815 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2816 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
2817 .access = PL1_RW, .type = ARM_CP_CONST,
2818 .resetvalue = 0 },
2819 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
2820 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
2821 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
2822 offsetof(CPUARMState, cp15.par_ns)} },
2823 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
2824 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2825 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2826 offsetof(CPUARMState, cp15.ttbr0_ns) },
2827 .writefn = vmsa_ttbr_write, },
2828 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
2829 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2830 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2831 offsetof(CPUARMState, cp15.ttbr1_ns) },
2832 .writefn = vmsa_ttbr_write, },
2833 REGINFO_SENTINEL
2834 };
2835
2836 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2837 {
2838 return vfp_get_fpcr(env);
2839 }
2840
2841 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2842 uint64_t value)
2843 {
2844 vfp_set_fpcr(env, value);
2845 }
2846
2847 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2848 {
2849 return vfp_get_fpsr(env);
2850 }
2851
2852 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2853 uint64_t value)
2854 {
2855 vfp_set_fpsr(env, value);
2856 }
2857
2858 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
2859 bool isread)
2860 {
2861 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
2862 return CP_ACCESS_TRAP;
2863 }
2864 return CP_ACCESS_OK;
2865 }
2866
2867 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
2868 uint64_t value)
2869 {
2870 env->daif = value & PSTATE_DAIF;
2871 }
2872
2873 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
2874 const ARMCPRegInfo *ri,
2875 bool isread)
2876 {
2877 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
2878 * SCTLR_EL1.UCI is set.
2879 */
2880 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
2881 return CP_ACCESS_TRAP;
2882 }
2883 return CP_ACCESS_OK;
2884 }
2885
2886 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
2887 * Page D4-1736 (DDI0487A.b)
2888 */
2889
2890 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2891 uint64_t value)
2892 {
2893 CPUState *cs = ENV_GET_CPU(env);
2894
2895 if (arm_is_secure_below_el3(env)) {
2896 tlb_flush_by_mmuidx(cs,
2897 (1 << ARMMMUIdx_S1SE1) |
2898 (1 << ARMMMUIdx_S1SE0));
2899 } else {
2900 tlb_flush_by_mmuidx(cs,
2901 (1 << ARMMMUIdx_S12NSE1) |
2902 (1 << ARMMMUIdx_S12NSE0));
2903 }
2904 }
2905
2906 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2907 uint64_t value)
2908 {
2909 CPUState *cs = ENV_GET_CPU(env);
2910 bool sec = arm_is_secure_below_el3(env);
2911
2912 if (sec) {
2913 tlb_flush_by_mmuidx_all_cpus_synced(cs,
2914 (1 << ARMMMUIdx_S1SE1) |
2915 (1 << ARMMMUIdx_S1SE0));
2916 } else {
2917 tlb_flush_by_mmuidx_all_cpus_synced(cs,
2918 (1 << ARMMMUIdx_S12NSE1) |
2919 (1 << ARMMMUIdx_S12NSE0));
2920 }
2921 }
2922
2923 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2924 uint64_t value)
2925 {
2926 /* Note that the 'ALL' scope must invalidate both stage 1 and
2927 * stage 2 translations, whereas most other scopes only invalidate
2928 * stage 1 translations.
2929 */
2930 ARMCPU *cpu = arm_env_get_cpu(env);
2931 CPUState *cs = CPU(cpu);
2932
2933 if (arm_is_secure_below_el3(env)) {
2934 tlb_flush_by_mmuidx(cs,
2935 (1 << ARMMMUIdx_S1SE1) |
2936 (1 << ARMMMUIdx_S1SE0));
2937 } else {
2938 if (arm_feature(env, ARM_FEATURE_EL2)) {
2939 tlb_flush_by_mmuidx(cs,
2940 (1 << ARMMMUIdx_S12NSE1) |
2941 (1 << ARMMMUIdx_S12NSE0) |
2942 (1 << ARMMMUIdx_S2NS));
2943 } else {
2944 tlb_flush_by_mmuidx(cs,
2945 (1 << ARMMMUIdx_S12NSE1) |
2946 (1 << ARMMMUIdx_S12NSE0));
2947 }
2948 }
2949 }
2950
2951 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
2952 uint64_t value)
2953 {
2954 ARMCPU *cpu = arm_env_get_cpu(env);
2955 CPUState *cs = CPU(cpu);
2956
2957 tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2));
2958 }
2959
2960 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
2961 uint64_t value)
2962 {
2963 ARMCPU *cpu = arm_env_get_cpu(env);
2964 CPUState *cs = CPU(cpu);
2965
2966 tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E3));
2967 }
2968
2969 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2970 uint64_t value)
2971 {
2972 /* Note that the 'ALL' scope must invalidate both stage 1 and
2973 * stage 2 translations, whereas most other scopes only invalidate
2974 * stage 1 translations.
2975 */
2976 CPUState *cs = ENV_GET_CPU(env);
2977 bool sec = arm_is_secure_below_el3(env);
2978 bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
2979
2980 if (sec) {