target/arm: Fix SVE system register access checks
[qemu.git] / target / arm / helper.c
1 #include "qemu/osdep.h"
2 #include "target/arm/idau.h"
3 #include "trace.h"
4 #include "cpu.h"
5 #include "internals.h"
6 #include "exec/gdbstub.h"
7 #include "exec/helper-proto.h"
8 #include "qemu/host-utils.h"
9 #include "sysemu/arch_init.h"
10 #include "sysemu/sysemu.h"
11 #include "qemu/bitops.h"
12 #include "qemu/crc32c.h"
13 #include "exec/exec-all.h"
14 #include "exec/cpu_ldst.h"
15 #include "arm_ldst.h"
16 #include <zlib.h> /* For crc32 */
17 #include "exec/semihost.h"
18 #include "sysemu/kvm.h"
19 #include "fpu/softfloat.h"
20
21 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
22
23 #ifndef CONFIG_USER_ONLY
24 /* Cacheability and shareability attributes for a memory access */
25 typedef struct ARMCacheAttrs {
26 unsigned int attrs:8; /* as in the MAIR register encoding */
27 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
28 } ARMCacheAttrs;
29
30 static bool get_phys_addr(CPUARMState *env, target_ulong address,
31 MMUAccessType access_type, ARMMMUIdx mmu_idx,
32 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
33 target_ulong *page_size,
34 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
35
36 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
37 MMUAccessType access_type, ARMMMUIdx mmu_idx,
38 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
39 target_ulong *page_size_ptr,
40 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
41
42 /* Security attributes for an address, as returned by v8m_security_lookup. */
43 typedef struct V8M_SAttributes {
44 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
45 bool ns;
46 bool nsc;
47 uint8_t sregion;
48 bool srvalid;
49 uint8_t iregion;
50 bool irvalid;
51 } V8M_SAttributes;
52
53 static void v8m_security_lookup(CPUARMState *env, uint32_t address,
54 MMUAccessType access_type, ARMMMUIdx mmu_idx,
55 V8M_SAttributes *sattrs);
56 #endif
57
58 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
59 {
60 int nregs;
61
62 /* VFP data registers are always little-endian. */
63 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
64 if (reg < nregs) {
65 stq_le_p(buf, *aa32_vfp_dreg(env, reg));
66 return 8;
67 }
68 if (arm_feature(env, ARM_FEATURE_NEON)) {
69 /* Aliases for Q regs. */
70 nregs += 16;
71 if (reg < nregs) {
72 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
73 stq_le_p(buf, q[0]);
74 stq_le_p(buf + 8, q[1]);
75 return 16;
76 }
77 }
78 switch (reg - nregs) {
79 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
80 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
81 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
82 }
83 return 0;
84 }
85
86 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
87 {
88 int nregs;
89
90 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
91 if (reg < nregs) {
92 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
93 return 8;
94 }
95 if (arm_feature(env, ARM_FEATURE_NEON)) {
96 nregs += 16;
97 if (reg < nregs) {
98 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
99 q[0] = ldq_le_p(buf);
100 q[1] = ldq_le_p(buf + 8);
101 return 16;
102 }
103 }
104 switch (reg - nregs) {
105 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
106 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
107 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
108 }
109 return 0;
110 }
111
112 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
113 {
114 switch (reg) {
115 case 0 ... 31:
116 /* 128 bit FP register */
117 {
118 uint64_t *q = aa64_vfp_qreg(env, reg);
119 stq_le_p(buf, q[0]);
120 stq_le_p(buf + 8, q[1]);
121 return 16;
122 }
123 case 32:
124 /* FPSR */
125 stl_p(buf, vfp_get_fpsr(env));
126 return 4;
127 case 33:
128 /* FPCR */
129 stl_p(buf, vfp_get_fpcr(env));
130 return 4;
131 default:
132 return 0;
133 }
134 }
135
136 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
137 {
138 switch (reg) {
139 case 0 ... 31:
140 /* 128 bit FP register */
141 {
142 uint64_t *q = aa64_vfp_qreg(env, reg);
143 q[0] = ldq_le_p(buf);
144 q[1] = ldq_le_p(buf + 8);
145 return 16;
146 }
147 case 32:
148 /* FPSR */
149 vfp_set_fpsr(env, ldl_p(buf));
150 return 4;
151 case 33:
152 /* FPCR */
153 vfp_set_fpcr(env, ldl_p(buf));
154 return 4;
155 default:
156 return 0;
157 }
158 }
159
160 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
161 {
162 assert(ri->fieldoffset);
163 if (cpreg_field_is_64bit(ri)) {
164 return CPREG_FIELD64(env, ri);
165 } else {
166 return CPREG_FIELD32(env, ri);
167 }
168 }
169
170 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
171 uint64_t value)
172 {
173 assert(ri->fieldoffset);
174 if (cpreg_field_is_64bit(ri)) {
175 CPREG_FIELD64(env, ri) = value;
176 } else {
177 CPREG_FIELD32(env, ri) = value;
178 }
179 }
180
181 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
182 {
183 return (char *)env + ri->fieldoffset;
184 }
185
186 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
187 {
188 /* Raw read of a coprocessor register (as needed for migration, etc). */
189 if (ri->type & ARM_CP_CONST) {
190 return ri->resetvalue;
191 } else if (ri->raw_readfn) {
192 return ri->raw_readfn(env, ri);
193 } else if (ri->readfn) {
194 return ri->readfn(env, ri);
195 } else {
196 return raw_read(env, ri);
197 }
198 }
199
200 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
201 uint64_t v)
202 {
203 /* Raw write of a coprocessor register (as needed for migration, etc).
204 * Note that constant registers are treated as write-ignored; the
205 * caller should check for success by whether a readback gives the
206 * value written.
207 */
208 if (ri->type & ARM_CP_CONST) {
209 return;
210 } else if (ri->raw_writefn) {
211 ri->raw_writefn(env, ri, v);
212 } else if (ri->writefn) {
213 ri->writefn(env, ri, v);
214 } else {
215 raw_write(env, ri, v);
216 }
217 }
218
219 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg)
220 {
221 ARMCPU *cpu = arm_env_get_cpu(env);
222 const ARMCPRegInfo *ri;
223 uint32_t key;
224
225 key = cpu->dyn_xml.cpregs_keys[reg];
226 ri = get_arm_cp_reginfo(cpu->cp_regs, key);
227 if (ri) {
228 if (cpreg_field_is_64bit(ri)) {
229 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
230 } else {
231 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
232 }
233 }
234 return 0;
235 }
236
237 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
238 {
239 return 0;
240 }
241
242 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
243 {
244 /* Return true if the regdef would cause an assertion if you called
245 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
246 * program bug for it not to have the NO_RAW flag).
247 * NB that returning false here doesn't necessarily mean that calling
248 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
249 * read/write access functions which are safe for raw use" from "has
250 * read/write access functions which have side effects but has forgotten
251 * to provide raw access functions".
252 * The tests here line up with the conditions in read/write_raw_cp_reg()
253 * and assertions in raw_read()/raw_write().
254 */
255 if ((ri->type & ARM_CP_CONST) ||
256 ri->fieldoffset ||
257 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
258 return false;
259 }
260 return true;
261 }
262
263 bool write_cpustate_to_list(ARMCPU *cpu)
264 {
265 /* Write the coprocessor state from cpu->env to the (index,value) list. */
266 int i;
267 bool ok = true;
268
269 for (i = 0; i < cpu->cpreg_array_len; i++) {
270 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
271 const ARMCPRegInfo *ri;
272
273 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
274 if (!ri) {
275 ok = false;
276 continue;
277 }
278 if (ri->type & ARM_CP_NO_RAW) {
279 continue;
280 }
281 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
282 }
283 return ok;
284 }
285
286 bool write_list_to_cpustate(ARMCPU *cpu)
287 {
288 int i;
289 bool ok = true;
290
291 for (i = 0; i < cpu->cpreg_array_len; i++) {
292 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
293 uint64_t v = cpu->cpreg_values[i];
294 const ARMCPRegInfo *ri;
295
296 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
297 if (!ri) {
298 ok = false;
299 continue;
300 }
301 if (ri->type & ARM_CP_NO_RAW) {
302 continue;
303 }
304 /* Write value and confirm it reads back as written
305 * (to catch read-only registers and partially read-only
306 * registers where the incoming migration value doesn't match)
307 */
308 write_raw_cp_reg(&cpu->env, ri, v);
309 if (read_raw_cp_reg(&cpu->env, ri) != v) {
310 ok = false;
311 }
312 }
313 return ok;
314 }
315
316 static void add_cpreg_to_list(gpointer key, gpointer opaque)
317 {
318 ARMCPU *cpu = opaque;
319 uint64_t regidx;
320 const ARMCPRegInfo *ri;
321
322 regidx = *(uint32_t *)key;
323 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
324
325 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
326 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
327 /* The value array need not be initialized at this point */
328 cpu->cpreg_array_len++;
329 }
330 }
331
332 static void count_cpreg(gpointer key, gpointer opaque)
333 {
334 ARMCPU *cpu = opaque;
335 uint64_t regidx;
336 const ARMCPRegInfo *ri;
337
338 regidx = *(uint32_t *)key;
339 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
340
341 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
342 cpu->cpreg_array_len++;
343 }
344 }
345
346 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
347 {
348 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
349 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
350
351 if (aidx > bidx) {
352 return 1;
353 }
354 if (aidx < bidx) {
355 return -1;
356 }
357 return 0;
358 }
359
360 void init_cpreg_list(ARMCPU *cpu)
361 {
362 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
363 * Note that we require cpreg_tuples[] to be sorted by key ID.
364 */
365 GList *keys;
366 int arraylen;
367
368 keys = g_hash_table_get_keys(cpu->cp_regs);
369 keys = g_list_sort(keys, cpreg_key_compare);
370
371 cpu->cpreg_array_len = 0;
372
373 g_list_foreach(keys, count_cpreg, cpu);
374
375 arraylen = cpu->cpreg_array_len;
376 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
377 cpu->cpreg_values = g_new(uint64_t, arraylen);
378 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
379 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
380 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
381 cpu->cpreg_array_len = 0;
382
383 g_list_foreach(keys, add_cpreg_to_list, cpu);
384
385 assert(cpu->cpreg_array_len == arraylen);
386
387 g_list_free(keys);
388 }
389
390 /*
391 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
392 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
393 *
394 * access_el3_aa32ns: Used to check AArch32 register views.
395 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
396 */
397 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
398 const ARMCPRegInfo *ri,
399 bool isread)
400 {
401 bool secure = arm_is_secure_below_el3(env);
402
403 assert(!arm_el_is_aa64(env, 3));
404 if (secure) {
405 return CP_ACCESS_TRAP_UNCATEGORIZED;
406 }
407 return CP_ACCESS_OK;
408 }
409
410 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
411 const ARMCPRegInfo *ri,
412 bool isread)
413 {
414 if (!arm_el_is_aa64(env, 3)) {
415 return access_el3_aa32ns(env, ri, isread);
416 }
417 return CP_ACCESS_OK;
418 }
419
420 /* Some secure-only AArch32 registers trap to EL3 if used from
421 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
422 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
423 * We assume that the .access field is set to PL1_RW.
424 */
425 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
426 const ARMCPRegInfo *ri,
427 bool isread)
428 {
429 if (arm_current_el(env) == 3) {
430 return CP_ACCESS_OK;
431 }
432 if (arm_is_secure_below_el3(env)) {
433 return CP_ACCESS_TRAP_EL3;
434 }
435 /* This will be EL1 NS and EL2 NS, which just UNDEF */
436 return CP_ACCESS_TRAP_UNCATEGORIZED;
437 }
438
439 /* Check for traps to "powerdown debug" registers, which are controlled
440 * by MDCR.TDOSA
441 */
442 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
443 bool isread)
444 {
445 int el = arm_current_el(env);
446
447 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA)
448 && !arm_is_secure_below_el3(env)) {
449 return CP_ACCESS_TRAP_EL2;
450 }
451 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
452 return CP_ACCESS_TRAP_EL3;
453 }
454 return CP_ACCESS_OK;
455 }
456
457 /* Check for traps to "debug ROM" registers, which are controlled
458 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
459 */
460 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
461 bool isread)
462 {
463 int el = arm_current_el(env);
464
465 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA)
466 && !arm_is_secure_below_el3(env)) {
467 return CP_ACCESS_TRAP_EL2;
468 }
469 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
470 return CP_ACCESS_TRAP_EL3;
471 }
472 return CP_ACCESS_OK;
473 }
474
475 /* Check for traps to general debug registers, which are controlled
476 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
477 */
478 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
479 bool isread)
480 {
481 int el = arm_current_el(env);
482
483 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA)
484 && !arm_is_secure_below_el3(env)) {
485 return CP_ACCESS_TRAP_EL2;
486 }
487 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
488 return CP_ACCESS_TRAP_EL3;
489 }
490 return CP_ACCESS_OK;
491 }
492
493 /* Check for traps to performance monitor registers, which are controlled
494 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
495 */
496 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
497 bool isread)
498 {
499 int el = arm_current_el(env);
500
501 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
502 && !arm_is_secure_below_el3(env)) {
503 return CP_ACCESS_TRAP_EL2;
504 }
505 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
506 return CP_ACCESS_TRAP_EL3;
507 }
508 return CP_ACCESS_OK;
509 }
510
511 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
512 {
513 ARMCPU *cpu = arm_env_get_cpu(env);
514
515 raw_write(env, ri, value);
516 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
517 }
518
519 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
520 {
521 ARMCPU *cpu = arm_env_get_cpu(env);
522
523 if (raw_read(env, ri) != value) {
524 /* Unlike real hardware the qemu TLB uses virtual addresses,
525 * not modified virtual addresses, so this causes a TLB flush.
526 */
527 tlb_flush(CPU(cpu));
528 raw_write(env, ri, value);
529 }
530 }
531
532 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
533 uint64_t value)
534 {
535 ARMCPU *cpu = arm_env_get_cpu(env);
536
537 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
538 && !extended_addresses_enabled(env)) {
539 /* For VMSA (when not using the LPAE long descriptor page table
540 * format) this register includes the ASID, so do a TLB flush.
541 * For PMSA it is purely a process ID and no action is needed.
542 */
543 tlb_flush(CPU(cpu));
544 }
545 raw_write(env, ri, value);
546 }
547
548 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
549 uint64_t value)
550 {
551 /* Invalidate all (TLBIALL) */
552 ARMCPU *cpu = arm_env_get_cpu(env);
553
554 tlb_flush(CPU(cpu));
555 }
556
557 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
558 uint64_t value)
559 {
560 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
561 ARMCPU *cpu = arm_env_get_cpu(env);
562
563 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
564 }
565
566 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
567 uint64_t value)
568 {
569 /* Invalidate by ASID (TLBIASID) */
570 ARMCPU *cpu = arm_env_get_cpu(env);
571
572 tlb_flush(CPU(cpu));
573 }
574
575 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
576 uint64_t value)
577 {
578 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
579 ARMCPU *cpu = arm_env_get_cpu(env);
580
581 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
582 }
583
584 /* IS variants of TLB operations must affect all cores */
585 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
586 uint64_t value)
587 {
588 CPUState *cs = ENV_GET_CPU(env);
589
590 tlb_flush_all_cpus_synced(cs);
591 }
592
593 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
594 uint64_t value)
595 {
596 CPUState *cs = ENV_GET_CPU(env);
597
598 tlb_flush_all_cpus_synced(cs);
599 }
600
601 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
602 uint64_t value)
603 {
604 CPUState *cs = ENV_GET_CPU(env);
605
606 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
607 }
608
609 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
610 uint64_t value)
611 {
612 CPUState *cs = ENV_GET_CPU(env);
613
614 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
615 }
616
617 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
618 uint64_t value)
619 {
620 CPUState *cs = ENV_GET_CPU(env);
621
622 tlb_flush_by_mmuidx(cs,
623 ARMMMUIdxBit_S12NSE1 |
624 ARMMMUIdxBit_S12NSE0 |
625 ARMMMUIdxBit_S2NS);
626 }
627
628 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
629 uint64_t value)
630 {
631 CPUState *cs = ENV_GET_CPU(env);
632
633 tlb_flush_by_mmuidx_all_cpus_synced(cs,
634 ARMMMUIdxBit_S12NSE1 |
635 ARMMMUIdxBit_S12NSE0 |
636 ARMMMUIdxBit_S2NS);
637 }
638
639 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
640 uint64_t value)
641 {
642 /* Invalidate by IPA. This has to invalidate any structures that
643 * contain only stage 2 translation information, but does not need
644 * to apply to structures that contain combined stage 1 and stage 2
645 * translation information.
646 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
647 */
648 CPUState *cs = ENV_GET_CPU(env);
649 uint64_t pageaddr;
650
651 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
652 return;
653 }
654
655 pageaddr = sextract64(value << 12, 0, 40);
656
657 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
658 }
659
660 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
661 uint64_t value)
662 {
663 CPUState *cs = ENV_GET_CPU(env);
664 uint64_t pageaddr;
665
666 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
667 return;
668 }
669
670 pageaddr = sextract64(value << 12, 0, 40);
671
672 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
673 ARMMMUIdxBit_S2NS);
674 }
675
676 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
677 uint64_t value)
678 {
679 CPUState *cs = ENV_GET_CPU(env);
680
681 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
682 }
683
684 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
685 uint64_t value)
686 {
687 CPUState *cs = ENV_GET_CPU(env);
688
689 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
690 }
691
692 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
693 uint64_t value)
694 {
695 CPUState *cs = ENV_GET_CPU(env);
696 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
697
698 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
699 }
700
701 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
702 uint64_t value)
703 {
704 CPUState *cs = ENV_GET_CPU(env);
705 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
706
707 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
708 ARMMMUIdxBit_S1E2);
709 }
710
711 static const ARMCPRegInfo cp_reginfo[] = {
712 /* Define the secure and non-secure FCSE identifier CP registers
713 * separately because there is no secure bank in V8 (no _EL3). This allows
714 * the secure register to be properly reset and migrated. There is also no
715 * v8 EL1 version of the register so the non-secure instance stands alone.
716 */
717 { .name = "FCSEIDR",
718 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
719 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
720 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
721 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
722 { .name = "FCSEIDR_S",
723 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
724 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
725 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
726 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
727 /* Define the secure and non-secure context identifier CP registers
728 * separately because there is no secure bank in V8 (no _EL3). This allows
729 * the secure register to be properly reset and migrated. In the
730 * non-secure case, the 32-bit register will have reset and migration
731 * disabled during registration as it is handled by the 64-bit instance.
732 */
733 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
734 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
735 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
736 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
737 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
738 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
739 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
740 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
741 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
742 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
743 REGINFO_SENTINEL
744 };
745
746 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
747 /* NB: Some of these registers exist in v8 but with more precise
748 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
749 */
750 /* MMU Domain access control / MPU write buffer control */
751 { .name = "DACR",
752 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
753 .access = PL1_RW, .resetvalue = 0,
754 .writefn = dacr_write, .raw_writefn = raw_write,
755 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
756 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
757 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
758 * For v6 and v5, these mappings are overly broad.
759 */
760 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
761 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
762 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
763 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
764 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
765 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
766 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
767 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
768 /* Cache maintenance ops; some of this space may be overridden later. */
769 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
770 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
771 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
772 REGINFO_SENTINEL
773 };
774
775 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
776 /* Not all pre-v6 cores implemented this WFI, so this is slightly
777 * over-broad.
778 */
779 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
780 .access = PL1_W, .type = ARM_CP_WFI },
781 REGINFO_SENTINEL
782 };
783
784 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
785 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
786 * is UNPREDICTABLE; we choose to NOP as most implementations do).
787 */
788 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
789 .access = PL1_W, .type = ARM_CP_WFI },
790 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
791 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
792 * OMAPCP will override this space.
793 */
794 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
795 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
796 .resetvalue = 0 },
797 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
798 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
799 .resetvalue = 0 },
800 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
801 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
802 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
803 .resetvalue = 0 },
804 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
805 * implementing it as RAZ means the "debug architecture version" bits
806 * will read as a reserved value, which should cause Linux to not try
807 * to use the debug hardware.
808 */
809 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
810 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
811 /* MMU TLB control. Note that the wildcarding means we cover not just
812 * the unified TLB ops but also the dside/iside/inner-shareable variants.
813 */
814 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
815 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
816 .type = ARM_CP_NO_RAW },
817 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
818 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
819 .type = ARM_CP_NO_RAW },
820 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
821 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
822 .type = ARM_CP_NO_RAW },
823 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
824 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
825 .type = ARM_CP_NO_RAW },
826 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
827 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
828 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
829 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
830 REGINFO_SENTINEL
831 };
832
833 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
834 uint64_t value)
835 {
836 uint32_t mask = 0;
837
838 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
839 if (!arm_feature(env, ARM_FEATURE_V8)) {
840 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
841 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
842 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
843 */
844 if (arm_feature(env, ARM_FEATURE_VFP)) {
845 /* VFP coprocessor: cp10 & cp11 [23:20] */
846 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
847
848 if (!arm_feature(env, ARM_FEATURE_NEON)) {
849 /* ASEDIS [31] bit is RAO/WI */
850 value |= (1 << 31);
851 }
852
853 /* VFPv3 and upwards with NEON implement 32 double precision
854 * registers (D0-D31).
855 */
856 if (!arm_feature(env, ARM_FEATURE_NEON) ||
857 !arm_feature(env, ARM_FEATURE_VFP3)) {
858 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
859 value |= (1 << 30);
860 }
861 }
862 value &= mask;
863 }
864 env->cp15.cpacr_el1 = value;
865 }
866
867 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
868 {
869 /* Call cpacr_write() so that we reset with the correct RAO bits set
870 * for our CPU features.
871 */
872 cpacr_write(env, ri, 0);
873 }
874
875 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
876 bool isread)
877 {
878 if (arm_feature(env, ARM_FEATURE_V8)) {
879 /* Check if CPACR accesses are to be trapped to EL2 */
880 if (arm_current_el(env) == 1 &&
881 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
882 return CP_ACCESS_TRAP_EL2;
883 /* Check if CPACR accesses are to be trapped to EL3 */
884 } else if (arm_current_el(env) < 3 &&
885 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
886 return CP_ACCESS_TRAP_EL3;
887 }
888 }
889
890 return CP_ACCESS_OK;
891 }
892
893 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
894 bool isread)
895 {
896 /* Check if CPTR accesses are set to trap to EL3 */
897 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
898 return CP_ACCESS_TRAP_EL3;
899 }
900
901 return CP_ACCESS_OK;
902 }
903
904 static const ARMCPRegInfo v6_cp_reginfo[] = {
905 /* prefetch by MVA in v6, NOP in v7 */
906 { .name = "MVA_prefetch",
907 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
908 .access = PL1_W, .type = ARM_CP_NOP },
909 /* We need to break the TB after ISB to execute self-modifying code
910 * correctly and also to take any pending interrupts immediately.
911 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
912 */
913 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
914 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
915 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
916 .access = PL0_W, .type = ARM_CP_NOP },
917 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
918 .access = PL0_W, .type = ARM_CP_NOP },
919 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
920 .access = PL1_RW,
921 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
922 offsetof(CPUARMState, cp15.ifar_ns) },
923 .resetvalue = 0, },
924 /* Watchpoint Fault Address Register : should actually only be present
925 * for 1136, 1176, 11MPCore.
926 */
927 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
928 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
929 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
930 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
931 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
932 .resetfn = cpacr_reset, .writefn = cpacr_write },
933 REGINFO_SENTINEL
934 };
935
936 /* Definitions for the PMU registers */
937 #define PMCRN_MASK 0xf800
938 #define PMCRN_SHIFT 11
939 #define PMCRD 0x8
940 #define PMCRC 0x4
941 #define PMCRE 0x1
942
943 static inline uint32_t pmu_num_counters(CPUARMState *env)
944 {
945 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
946 }
947
948 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
949 static inline uint64_t pmu_counter_mask(CPUARMState *env)
950 {
951 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
952 }
953
954 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
955 bool isread)
956 {
957 /* Performance monitor registers user accessibility is controlled
958 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
959 * trapping to EL2 or EL3 for other accesses.
960 */
961 int el = arm_current_el(env);
962
963 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
964 return CP_ACCESS_TRAP;
965 }
966 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
967 && !arm_is_secure_below_el3(env)) {
968 return CP_ACCESS_TRAP_EL2;
969 }
970 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
971 return CP_ACCESS_TRAP_EL3;
972 }
973
974 return CP_ACCESS_OK;
975 }
976
977 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
978 const ARMCPRegInfo *ri,
979 bool isread)
980 {
981 /* ER: event counter read trap control */
982 if (arm_feature(env, ARM_FEATURE_V8)
983 && arm_current_el(env) == 0
984 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
985 && isread) {
986 return CP_ACCESS_OK;
987 }
988
989 return pmreg_access(env, ri, isread);
990 }
991
992 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
993 const ARMCPRegInfo *ri,
994 bool isread)
995 {
996 /* SW: software increment write trap control */
997 if (arm_feature(env, ARM_FEATURE_V8)
998 && arm_current_el(env) == 0
999 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1000 && !isread) {
1001 return CP_ACCESS_OK;
1002 }
1003
1004 return pmreg_access(env, ri, isread);
1005 }
1006
1007 #ifndef CONFIG_USER_ONLY
1008
1009 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1010 const ARMCPRegInfo *ri,
1011 bool isread)
1012 {
1013 /* ER: event counter read trap control */
1014 if (arm_feature(env, ARM_FEATURE_V8)
1015 && arm_current_el(env) == 0
1016 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1017 return CP_ACCESS_OK;
1018 }
1019
1020 return pmreg_access(env, ri, isread);
1021 }
1022
1023 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1024 const ARMCPRegInfo *ri,
1025 bool isread)
1026 {
1027 /* CR: cycle counter read trap control */
1028 if (arm_feature(env, ARM_FEATURE_V8)
1029 && arm_current_el(env) == 0
1030 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1031 && isread) {
1032 return CP_ACCESS_OK;
1033 }
1034
1035 return pmreg_access(env, ri, isread);
1036 }
1037
1038 static inline bool arm_ccnt_enabled(CPUARMState *env)
1039 {
1040 /* This does not support checking PMCCFILTR_EL0 register */
1041
1042 if (!(env->cp15.c9_pmcr & PMCRE) || !(env->cp15.c9_pmcnten & (1 << 31))) {
1043 return false;
1044 }
1045
1046 return true;
1047 }
1048
1049 void pmccntr_sync(CPUARMState *env)
1050 {
1051 uint64_t temp_ticks;
1052
1053 temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1054 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1055
1056 if (env->cp15.c9_pmcr & PMCRD) {
1057 /* Increment once every 64 processor clock cycles */
1058 temp_ticks /= 64;
1059 }
1060
1061 if (arm_ccnt_enabled(env)) {
1062 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
1063 }
1064 }
1065
1066 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1067 uint64_t value)
1068 {
1069 pmccntr_sync(env);
1070
1071 if (value & PMCRC) {
1072 /* The counter has been reset */
1073 env->cp15.c15_ccnt = 0;
1074 }
1075
1076 /* only the DP, X, D and E bits are writable */
1077 env->cp15.c9_pmcr &= ~0x39;
1078 env->cp15.c9_pmcr |= (value & 0x39);
1079
1080 pmccntr_sync(env);
1081 }
1082
1083 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1084 {
1085 uint64_t total_ticks;
1086
1087 if (!arm_ccnt_enabled(env)) {
1088 /* Counter is disabled, do not change value */
1089 return env->cp15.c15_ccnt;
1090 }
1091
1092 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1093 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1094
1095 if (env->cp15.c9_pmcr & PMCRD) {
1096 /* Increment once every 64 processor clock cycles */
1097 total_ticks /= 64;
1098 }
1099 return total_ticks - env->cp15.c15_ccnt;
1100 }
1101
1102 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1103 uint64_t value)
1104 {
1105 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1106 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1107 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1108 * accessed.
1109 */
1110 env->cp15.c9_pmselr = value & 0x1f;
1111 }
1112
1113 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1114 uint64_t value)
1115 {
1116 uint64_t total_ticks;
1117
1118 if (!arm_ccnt_enabled(env)) {
1119 /* Counter is disabled, set the absolute value */
1120 env->cp15.c15_ccnt = value;
1121 return;
1122 }
1123
1124 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1125 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1126
1127 if (env->cp15.c9_pmcr & PMCRD) {
1128 /* Increment once every 64 processor clock cycles */
1129 total_ticks /= 64;
1130 }
1131 env->cp15.c15_ccnt = total_ticks - value;
1132 }
1133
1134 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1135 uint64_t value)
1136 {
1137 uint64_t cur_val = pmccntr_read(env, NULL);
1138
1139 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1140 }
1141
1142 #else /* CONFIG_USER_ONLY */
1143
1144 void pmccntr_sync(CPUARMState *env)
1145 {
1146 }
1147
1148 #endif
1149
1150 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1151 uint64_t value)
1152 {
1153 pmccntr_sync(env);
1154 env->cp15.pmccfiltr_el0 = value & 0xfc000000;
1155 pmccntr_sync(env);
1156 }
1157
1158 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1159 uint64_t value)
1160 {
1161 value &= pmu_counter_mask(env);
1162 env->cp15.c9_pmcnten |= value;
1163 }
1164
1165 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1166 uint64_t value)
1167 {
1168 value &= pmu_counter_mask(env);
1169 env->cp15.c9_pmcnten &= ~value;
1170 }
1171
1172 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1173 uint64_t value)
1174 {
1175 env->cp15.c9_pmovsr &= ~value;
1176 }
1177
1178 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1179 uint64_t value)
1180 {
1181 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1182 * PMSELR value is equal to or greater than the number of implemented
1183 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1184 */
1185 if (env->cp15.c9_pmselr == 0x1f) {
1186 pmccfiltr_write(env, ri, value);
1187 }
1188 }
1189
1190 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1191 {
1192 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1193 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write().
1194 */
1195 if (env->cp15.c9_pmselr == 0x1f) {
1196 return env->cp15.pmccfiltr_el0;
1197 } else {
1198 return 0;
1199 }
1200 }
1201
1202 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1203 uint64_t value)
1204 {
1205 if (arm_feature(env, ARM_FEATURE_V8)) {
1206 env->cp15.c9_pmuserenr = value & 0xf;
1207 } else {
1208 env->cp15.c9_pmuserenr = value & 1;
1209 }
1210 }
1211
1212 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1213 uint64_t value)
1214 {
1215 /* We have no event counters so only the C bit can be changed */
1216 value &= pmu_counter_mask(env);
1217 env->cp15.c9_pminten |= value;
1218 }
1219
1220 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1221 uint64_t value)
1222 {
1223 value &= pmu_counter_mask(env);
1224 env->cp15.c9_pminten &= ~value;
1225 }
1226
1227 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1228 uint64_t value)
1229 {
1230 /* Note that even though the AArch64 view of this register has bits
1231 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1232 * architectural requirements for bits which are RES0 only in some
1233 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1234 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1235 */
1236 raw_write(env, ri, value & ~0x1FULL);
1237 }
1238
1239 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1240 {
1241 /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1242 * For bits that vary between AArch32/64, code needs to check the
1243 * current execution mode before directly using the feature bit.
1244 */
1245 uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
1246
1247 if (!arm_feature(env, ARM_FEATURE_EL2)) {
1248 valid_mask &= ~SCR_HCE;
1249
1250 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1251 * supported if EL2 exists. The bit is UNK/SBZP when
1252 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1253 * when EL2 is unavailable.
1254 * On ARMv8, this bit is always available.
1255 */
1256 if (arm_feature(env, ARM_FEATURE_V7) &&
1257 !arm_feature(env, ARM_FEATURE_V8)) {
1258 valid_mask &= ~SCR_SMD;
1259 }
1260 }
1261
1262 /* Clear all-context RES0 bits. */
1263 value &= valid_mask;
1264 raw_write(env, ri, value);
1265 }
1266
1267 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1268 {
1269 ARMCPU *cpu = arm_env_get_cpu(env);
1270
1271 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1272 * bank
1273 */
1274 uint32_t index = A32_BANKED_REG_GET(env, csselr,
1275 ri->secure & ARM_CP_SECSTATE_S);
1276
1277 return cpu->ccsidr[index];
1278 }
1279
1280 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1281 uint64_t value)
1282 {
1283 raw_write(env, ri, value & 0xf);
1284 }
1285
1286 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1287 {
1288 CPUState *cs = ENV_GET_CPU(env);
1289 uint64_t ret = 0;
1290
1291 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1292 ret |= CPSR_I;
1293 }
1294 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1295 ret |= CPSR_F;
1296 }
1297 /* External aborts are not possible in QEMU so A bit is always clear */
1298 return ret;
1299 }
1300
1301 static const ARMCPRegInfo v7_cp_reginfo[] = {
1302 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1303 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1304 .access = PL1_W, .type = ARM_CP_NOP },
1305 /* Performance monitors are implementation defined in v7,
1306 * but with an ARM recommended set of registers, which we
1307 * follow (although we don't actually implement any counters)
1308 *
1309 * Performance registers fall into three categories:
1310 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1311 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1312 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1313 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1314 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1315 */
1316 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1317 .access = PL0_RW, .type = ARM_CP_ALIAS,
1318 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1319 .writefn = pmcntenset_write,
1320 .accessfn = pmreg_access,
1321 .raw_writefn = raw_write },
1322 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1323 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1324 .access = PL0_RW, .accessfn = pmreg_access,
1325 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1326 .writefn = pmcntenset_write, .raw_writefn = raw_write },
1327 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1328 .access = PL0_RW,
1329 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1330 .accessfn = pmreg_access,
1331 .writefn = pmcntenclr_write,
1332 .type = ARM_CP_ALIAS },
1333 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1334 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1335 .access = PL0_RW, .accessfn = pmreg_access,
1336 .type = ARM_CP_ALIAS,
1337 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1338 .writefn = pmcntenclr_write },
1339 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1340 .access = PL0_RW,
1341 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
1342 .accessfn = pmreg_access,
1343 .writefn = pmovsr_write,
1344 .raw_writefn = raw_write },
1345 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1346 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1347 .access = PL0_RW, .accessfn = pmreg_access,
1348 .type = ARM_CP_ALIAS,
1349 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1350 .writefn = pmovsr_write,
1351 .raw_writefn = raw_write },
1352 /* Unimplemented so WI. */
1353 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1354 .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NOP },
1355 #ifndef CONFIG_USER_ONLY
1356 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1357 .access = PL0_RW, .type = ARM_CP_ALIAS,
1358 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
1359 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
1360 .raw_writefn = raw_write},
1361 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1362 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
1363 .access = PL0_RW, .accessfn = pmreg_access_selr,
1364 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1365 .writefn = pmselr_write, .raw_writefn = raw_write, },
1366 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1367 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
1368 .readfn = pmccntr_read, .writefn = pmccntr_write32,
1369 .accessfn = pmreg_access_ccntr },
1370 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1371 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1372 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
1373 .type = ARM_CP_IO,
1374 .readfn = pmccntr_read, .writefn = pmccntr_write, },
1375 #endif
1376 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1377 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1378 .writefn = pmccfiltr_write,
1379 .access = PL0_RW, .accessfn = pmreg_access,
1380 .type = ARM_CP_IO,
1381 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1382 .resetvalue = 0, },
1383 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1384 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1385 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1386 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1387 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
1388 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1389 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1390 /* Unimplemented, RAZ/WI. */
1391 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1392 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1393 .accessfn = pmreg_access_xevcntr },
1394 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1395 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1396 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
1397 .resetvalue = 0,
1398 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1399 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1400 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1401 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1402 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1403 .resetvalue = 0,
1404 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1405 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1406 .access = PL1_RW, .accessfn = access_tpm,
1407 .type = ARM_CP_ALIAS | ARM_CP_IO,
1408 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
1409 .resetvalue = 0,
1410 .writefn = pmintenset_write, .raw_writefn = raw_write },
1411 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
1412 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
1413 .access = PL1_RW, .accessfn = access_tpm,
1414 .type = ARM_CP_IO,
1415 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1416 .writefn = pmintenset_write, .raw_writefn = raw_write,
1417 .resetvalue = 0x0 },
1418 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1419 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1420 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1421 .writefn = pmintenclr_write, },
1422 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1423 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1424 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1425 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1426 .writefn = pmintenclr_write },
1427 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1428 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
1429 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
1430 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1431 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1432 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1433 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1434 offsetof(CPUARMState, cp15.csselr_ns) } },
1435 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1436 * just RAZ for all cores:
1437 */
1438 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1439 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1440 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1441 /* Auxiliary fault status registers: these also are IMPDEF, and we
1442 * choose to RAZ/WI for all cores.
1443 */
1444 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1445 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1446 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1447 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1448 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1449 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1450 /* MAIR can just read-as-written because we don't implement caches
1451 * and so don't need to care about memory attributes.
1452 */
1453 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1454 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1455 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1456 .resetvalue = 0 },
1457 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
1458 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1459 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1460 .resetvalue = 0 },
1461 /* For non-long-descriptor page tables these are PRRR and NMRR;
1462 * regardless they still act as reads-as-written for QEMU.
1463 */
1464 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1465 * allows them to assign the correct fieldoffset based on the endianness
1466 * handled in the field definitions.
1467 */
1468 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1469 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1470 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1471 offsetof(CPUARMState, cp15.mair0_ns) },
1472 .resetfn = arm_cp_reset_ignore },
1473 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1474 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1475 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1476 offsetof(CPUARMState, cp15.mair1_ns) },
1477 .resetfn = arm_cp_reset_ignore },
1478 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1479 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1480 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1481 /* 32 bit ITLB invalidates */
1482 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1483 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1484 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1485 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1486 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1487 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1488 /* 32 bit DTLB invalidates */
1489 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1490 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1491 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1492 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1493 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1494 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1495 /* 32 bit TLB invalidates */
1496 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1497 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1498 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1499 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1500 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1501 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1502 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1503 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1504 REGINFO_SENTINEL
1505 };
1506
1507 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1508 /* 32 bit TLB invalidates, Inner Shareable */
1509 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1510 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1511 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1512 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1513 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1514 .type = ARM_CP_NO_RAW, .access = PL1_W,
1515 .writefn = tlbiasid_is_write },
1516 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1517 .type = ARM_CP_NO_RAW, .access = PL1_W,
1518 .writefn = tlbimvaa_is_write },
1519 REGINFO_SENTINEL
1520 };
1521
1522 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1523 uint64_t value)
1524 {
1525 value &= 1;
1526 env->teecr = value;
1527 }
1528
1529 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1530 bool isread)
1531 {
1532 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1533 return CP_ACCESS_TRAP;
1534 }
1535 return CP_ACCESS_OK;
1536 }
1537
1538 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1539 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1540 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1541 .resetvalue = 0,
1542 .writefn = teecr_write },
1543 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1544 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1545 .accessfn = teehbr_access, .resetvalue = 0 },
1546 REGINFO_SENTINEL
1547 };
1548
1549 static const ARMCPRegInfo v6k_cp_reginfo[] = {
1550 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1551 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1552 .access = PL0_RW,
1553 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1554 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1555 .access = PL0_RW,
1556 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1557 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1558 .resetfn = arm_cp_reset_ignore },
1559 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1560 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1561 .access = PL0_R|PL1_W,
1562 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1563 .resetvalue = 0},
1564 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1565 .access = PL0_R|PL1_W,
1566 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1567 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1568 .resetfn = arm_cp_reset_ignore },
1569 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1570 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1571 .access = PL1_RW,
1572 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1573 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1574 .access = PL1_RW,
1575 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1576 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1577 .resetvalue = 0 },
1578 REGINFO_SENTINEL
1579 };
1580
1581 #ifndef CONFIG_USER_ONLY
1582
1583 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1584 bool isread)
1585 {
1586 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1587 * Writable only at the highest implemented exception level.
1588 */
1589 int el = arm_current_el(env);
1590
1591 switch (el) {
1592 case 0:
1593 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
1594 return CP_ACCESS_TRAP;
1595 }
1596 break;
1597 case 1:
1598 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1599 arm_is_secure_below_el3(env)) {
1600 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1601 return CP_ACCESS_TRAP_UNCATEGORIZED;
1602 }
1603 break;
1604 case 2:
1605 case 3:
1606 break;
1607 }
1608
1609 if (!isread && el < arm_highest_el(env)) {
1610 return CP_ACCESS_TRAP_UNCATEGORIZED;
1611 }
1612
1613 return CP_ACCESS_OK;
1614 }
1615
1616 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1617 bool isread)
1618 {
1619 unsigned int cur_el = arm_current_el(env);
1620 bool secure = arm_is_secure(env);
1621
1622 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1623 if (cur_el == 0 &&
1624 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1625 return CP_ACCESS_TRAP;
1626 }
1627
1628 if (arm_feature(env, ARM_FEATURE_EL2) &&
1629 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1630 !extract32(env->cp15.cnthctl_el2, 0, 1)) {
1631 return CP_ACCESS_TRAP_EL2;
1632 }
1633 return CP_ACCESS_OK;
1634 }
1635
1636 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1637 bool isread)
1638 {
1639 unsigned int cur_el = arm_current_el(env);
1640 bool secure = arm_is_secure(env);
1641
1642 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1643 * EL0[PV]TEN is zero.
1644 */
1645 if (cur_el == 0 &&
1646 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1647 return CP_ACCESS_TRAP;
1648 }
1649
1650 if (arm_feature(env, ARM_FEATURE_EL2) &&
1651 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1652 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
1653 return CP_ACCESS_TRAP_EL2;
1654 }
1655 return CP_ACCESS_OK;
1656 }
1657
1658 static CPAccessResult gt_pct_access(CPUARMState *env,
1659 const ARMCPRegInfo *ri,
1660 bool isread)
1661 {
1662 return gt_counter_access(env, GTIMER_PHYS, isread);
1663 }
1664
1665 static CPAccessResult gt_vct_access(CPUARMState *env,
1666 const ARMCPRegInfo *ri,
1667 bool isread)
1668 {
1669 return gt_counter_access(env, GTIMER_VIRT, isread);
1670 }
1671
1672 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1673 bool isread)
1674 {
1675 return gt_timer_access(env, GTIMER_PHYS, isread);
1676 }
1677
1678 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1679 bool isread)
1680 {
1681 return gt_timer_access(env, GTIMER_VIRT, isread);
1682 }
1683
1684 static CPAccessResult gt_stimer_access(CPUARMState *env,
1685 const ARMCPRegInfo *ri,
1686 bool isread)
1687 {
1688 /* The AArch64 register view of the secure physical timer is
1689 * always accessible from EL3, and configurably accessible from
1690 * Secure EL1.
1691 */
1692 switch (arm_current_el(env)) {
1693 case 1:
1694 if (!arm_is_secure(env)) {
1695 return CP_ACCESS_TRAP;
1696 }
1697 if (!(env->cp15.scr_el3 & SCR_ST)) {
1698 return CP_ACCESS_TRAP_EL3;
1699 }
1700 return CP_ACCESS_OK;
1701 case 0:
1702 case 2:
1703 return CP_ACCESS_TRAP;
1704 case 3:
1705 return CP_ACCESS_OK;
1706 default:
1707 g_assert_not_reached();
1708 }
1709 }
1710
1711 static uint64_t gt_get_countervalue(CPUARMState *env)
1712 {
1713 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1714 }
1715
1716 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1717 {
1718 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1719
1720 if (gt->ctl & 1) {
1721 /* Timer enabled: calculate and set current ISTATUS, irq, and
1722 * reset timer to when ISTATUS next has to change
1723 */
1724 uint64_t offset = timeridx == GTIMER_VIRT ?
1725 cpu->env.cp15.cntvoff_el2 : 0;
1726 uint64_t count = gt_get_countervalue(&cpu->env);
1727 /* Note that this must be unsigned 64 bit arithmetic: */
1728 int istatus = count - offset >= gt->cval;
1729 uint64_t nexttick;
1730 int irqstate;
1731
1732 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1733
1734 irqstate = (istatus && !(gt->ctl & 2));
1735 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1736
1737 if (istatus) {
1738 /* Next transition is when count rolls back over to zero */
1739 nexttick = UINT64_MAX;
1740 } else {
1741 /* Next transition is when we hit cval */
1742 nexttick = gt->cval + offset;
1743 }
1744 /* Note that the desired next expiry time might be beyond the
1745 * signed-64-bit range of a QEMUTimer -- in this case we just
1746 * set the timer for as far in the future as possible. When the
1747 * timer expires we will reset the timer for any remaining period.
1748 */
1749 if (nexttick > INT64_MAX / GTIMER_SCALE) {
1750 nexttick = INT64_MAX / GTIMER_SCALE;
1751 }
1752 timer_mod(cpu->gt_timer[timeridx], nexttick);
1753 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
1754 } else {
1755 /* Timer disabled: ISTATUS and timer output always clear */
1756 gt->ctl &= ~4;
1757 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1758 timer_del(cpu->gt_timer[timeridx]);
1759 trace_arm_gt_recalc_disabled(timeridx);
1760 }
1761 }
1762
1763 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1764 int timeridx)
1765 {
1766 ARMCPU *cpu = arm_env_get_cpu(env);
1767
1768 timer_del(cpu->gt_timer[timeridx]);
1769 }
1770
1771 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1772 {
1773 return gt_get_countervalue(env);
1774 }
1775
1776 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1777 {
1778 return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
1779 }
1780
1781 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1782 int timeridx,
1783 uint64_t value)
1784 {
1785 trace_arm_gt_cval_write(timeridx, value);
1786 env->cp15.c14_timer[timeridx].cval = value;
1787 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1788 }
1789
1790 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1791 int timeridx)
1792 {
1793 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1794
1795 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1796 (gt_get_countervalue(env) - offset));
1797 }
1798
1799 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1800 int timeridx,
1801 uint64_t value)
1802 {
1803 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1804
1805 trace_arm_gt_tval_write(timeridx, value);
1806 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1807 sextract64(value, 0, 32);
1808 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1809 }
1810
1811 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1812 int timeridx,
1813 uint64_t value)
1814 {
1815 ARMCPU *cpu = arm_env_get_cpu(env);
1816 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1817
1818 trace_arm_gt_ctl_write(timeridx, value);
1819 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1820 if ((oldval ^ value) & 1) {
1821 /* Enable toggled */
1822 gt_recalc_timer(cpu, timeridx);
1823 } else if ((oldval ^ value) & 2) {
1824 /* IMASK toggled: don't need to recalculate,
1825 * just set the interrupt line based on ISTATUS
1826 */
1827 int irqstate = (oldval & 4) && !(value & 2);
1828
1829 trace_arm_gt_imask_toggle(timeridx, irqstate);
1830 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1831 }
1832 }
1833
1834 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1835 {
1836 gt_timer_reset(env, ri, GTIMER_PHYS);
1837 }
1838
1839 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1840 uint64_t value)
1841 {
1842 gt_cval_write(env, ri, GTIMER_PHYS, value);
1843 }
1844
1845 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1846 {
1847 return gt_tval_read(env, ri, GTIMER_PHYS);
1848 }
1849
1850 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1851 uint64_t value)
1852 {
1853 gt_tval_write(env, ri, GTIMER_PHYS, value);
1854 }
1855
1856 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1857 uint64_t value)
1858 {
1859 gt_ctl_write(env, ri, GTIMER_PHYS, value);
1860 }
1861
1862 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1863 {
1864 gt_timer_reset(env, ri, GTIMER_VIRT);
1865 }
1866
1867 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1868 uint64_t value)
1869 {
1870 gt_cval_write(env, ri, GTIMER_VIRT, value);
1871 }
1872
1873 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1874 {
1875 return gt_tval_read(env, ri, GTIMER_VIRT);
1876 }
1877
1878 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1879 uint64_t value)
1880 {
1881 gt_tval_write(env, ri, GTIMER_VIRT, value);
1882 }
1883
1884 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1885 uint64_t value)
1886 {
1887 gt_ctl_write(env, ri, GTIMER_VIRT, value);
1888 }
1889
1890 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1891 uint64_t value)
1892 {
1893 ARMCPU *cpu = arm_env_get_cpu(env);
1894
1895 trace_arm_gt_cntvoff_write(value);
1896 raw_write(env, ri, value);
1897 gt_recalc_timer(cpu, GTIMER_VIRT);
1898 }
1899
1900 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1901 {
1902 gt_timer_reset(env, ri, GTIMER_HYP);
1903 }
1904
1905 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1906 uint64_t value)
1907 {
1908 gt_cval_write(env, ri, GTIMER_HYP, value);
1909 }
1910
1911 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1912 {
1913 return gt_tval_read(env, ri, GTIMER_HYP);
1914 }
1915
1916 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1917 uint64_t value)
1918 {
1919 gt_tval_write(env, ri, GTIMER_HYP, value);
1920 }
1921
1922 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1923 uint64_t value)
1924 {
1925 gt_ctl_write(env, ri, GTIMER_HYP, value);
1926 }
1927
1928 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1929 {
1930 gt_timer_reset(env, ri, GTIMER_SEC);
1931 }
1932
1933 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1934 uint64_t value)
1935 {
1936 gt_cval_write(env, ri, GTIMER_SEC, value);
1937 }
1938
1939 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1940 {
1941 return gt_tval_read(env, ri, GTIMER_SEC);
1942 }
1943
1944 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1945 uint64_t value)
1946 {
1947 gt_tval_write(env, ri, GTIMER_SEC, value);
1948 }
1949
1950 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1951 uint64_t value)
1952 {
1953 gt_ctl_write(env, ri, GTIMER_SEC, value);
1954 }
1955
1956 void arm_gt_ptimer_cb(void *opaque)
1957 {
1958 ARMCPU *cpu = opaque;
1959
1960 gt_recalc_timer(cpu, GTIMER_PHYS);
1961 }
1962
1963 void arm_gt_vtimer_cb(void *opaque)
1964 {
1965 ARMCPU *cpu = opaque;
1966
1967 gt_recalc_timer(cpu, GTIMER_VIRT);
1968 }
1969
1970 void arm_gt_htimer_cb(void *opaque)
1971 {
1972 ARMCPU *cpu = opaque;
1973
1974 gt_recalc_timer(cpu, GTIMER_HYP);
1975 }
1976
1977 void arm_gt_stimer_cb(void *opaque)
1978 {
1979 ARMCPU *cpu = opaque;
1980
1981 gt_recalc_timer(cpu, GTIMER_SEC);
1982 }
1983
1984 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1985 /* Note that CNTFRQ is purely reads-as-written for the benefit
1986 * of software; writing it doesn't actually change the timer frequency.
1987 * Our reset value matches the fixed frequency we implement the timer at.
1988 */
1989 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1990 .type = ARM_CP_ALIAS,
1991 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1992 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1993 },
1994 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1995 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1996 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1997 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1998 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
1999 },
2000 /* overall control: mostly access permissions */
2001 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2002 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
2003 .access = PL1_RW,
2004 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2005 .resetvalue = 0,
2006 },
2007 /* per-timer control */
2008 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2009 .secure = ARM_CP_SECSTATE_NS,
2010 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2011 .accessfn = gt_ptimer_access,
2012 .fieldoffset = offsetoflow32(CPUARMState,
2013 cp15.c14_timer[GTIMER_PHYS].ctl),
2014 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2015 },
2016 { .name = "CNTP_CTL_S",
2017 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2018 .secure = ARM_CP_SECSTATE_S,
2019 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2020 .accessfn = gt_ptimer_access,
2021 .fieldoffset = offsetoflow32(CPUARMState,
2022 cp15.c14_timer[GTIMER_SEC].ctl),
2023 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2024 },
2025 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2026 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2027 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
2028 .accessfn = gt_ptimer_access,
2029 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2030 .resetvalue = 0,
2031 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2032 },
2033 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2034 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2035 .accessfn = gt_vtimer_access,
2036 .fieldoffset = offsetoflow32(CPUARMState,
2037 cp15.c14_timer[GTIMER_VIRT].ctl),
2038 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2039 },
2040 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2041 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2042 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
2043 .accessfn = gt_vtimer_access,
2044 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2045 .resetvalue = 0,
2046 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2047 },
2048 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2049 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2050 .secure = ARM_CP_SECSTATE_NS,
2051 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2052 .accessfn = gt_ptimer_access,
2053 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2054 },
2055 { .name = "CNTP_TVAL_S",
2056 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2057 .secure = ARM_CP_SECSTATE_S,
2058 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2059 .accessfn = gt_ptimer_access,
2060 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2061 },
2062 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2063 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2064 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2065 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2066 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2067 },
2068 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2069 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2070 .accessfn = gt_vtimer_access,
2071 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2072 },
2073 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2074 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2075 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2076 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2077 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2078 },
2079 /* The counter itself */
2080 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2081 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2082 .accessfn = gt_pct_access,
2083 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2084 },
2085 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2086 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2087 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2088 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2089 },
2090 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2091 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2092 .accessfn = gt_vct_access,
2093 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2094 },
2095 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2096 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2097 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2098 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2099 },
2100 /* Comparison value, indicating when the timer goes off */
2101 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2102 .secure = ARM_CP_SECSTATE_NS,
2103 .access = PL1_RW | PL0_R,
2104 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2105 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2106 .accessfn = gt_ptimer_access,
2107 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2108 },
2109 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
2110 .secure = ARM_CP_SECSTATE_S,
2111 .access = PL1_RW | PL0_R,
2112 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2113 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2114 .accessfn = gt_ptimer_access,
2115 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2116 },
2117 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2118 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2119 .access = PL1_RW | PL0_R,
2120 .type = ARM_CP_IO,
2121 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2122 .resetvalue = 0, .accessfn = gt_ptimer_access,
2123 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2124 },
2125 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2126 .access = PL1_RW | PL0_R,
2127 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2128 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2129 .accessfn = gt_vtimer_access,
2130 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2131 },
2132 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2133 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2134 .access = PL1_RW | PL0_R,
2135 .type = ARM_CP_IO,
2136 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2137 .resetvalue = 0, .accessfn = gt_vtimer_access,
2138 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2139 },
2140 /* Secure timer -- this is actually restricted to only EL3
2141 * and configurably Secure-EL1 via the accessfn.
2142 */
2143 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2144 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2145 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2146 .accessfn = gt_stimer_access,
2147 .readfn = gt_sec_tval_read,
2148 .writefn = gt_sec_tval_write,
2149 .resetfn = gt_sec_timer_reset,
2150 },
2151 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2152 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2153 .type = ARM_CP_IO, .access = PL1_RW,
2154 .accessfn = gt_stimer_access,
2155 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2156 .resetvalue = 0,
2157 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2158 },
2159 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2160 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2161 .type = ARM_CP_IO, .access = PL1_RW,
2162 .accessfn = gt_stimer_access,
2163 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2164 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2165 },
2166 REGINFO_SENTINEL
2167 };
2168
2169 #else
2170
2171 /* In user-mode most of the generic timer registers are inaccessible
2172 * however modern kernels (4.12+) allow access to cntvct_el0
2173 */
2174
2175 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2176 {
2177 /* Currently we have no support for QEMUTimer in linux-user so we
2178 * can't call gt_get_countervalue(env), instead we directly
2179 * call the lower level functions.
2180 */
2181 return cpu_get_clock() / GTIMER_SCALE;
2182 }
2183
2184 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2185 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2186 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2187 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
2188 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2189 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
2190 },
2191 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2192 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2193 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2194 .readfn = gt_virt_cnt_read,
2195 },
2196 REGINFO_SENTINEL
2197 };
2198
2199 #endif
2200
2201 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2202 {
2203 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2204 raw_write(env, ri, value);
2205 } else if (arm_feature(env, ARM_FEATURE_V7)) {
2206 raw_write(env, ri, value & 0xfffff6ff);
2207 } else {
2208 raw_write(env, ri, value & 0xfffff1ff);
2209 }
2210 }
2211
2212 #ifndef CONFIG_USER_ONLY
2213 /* get_phys_addr() isn't present for user-mode-only targets */
2214
2215 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2216 bool isread)
2217 {
2218 if (ri->opc2 & 4) {
2219 /* The ATS12NSO* operations must trap to EL3 if executed in
2220 * Secure EL1 (which can only happen if EL3 is AArch64).
2221 * They are simply UNDEF if executed from NS EL1.
2222 * They function normally from EL2 or EL3.
2223 */
2224 if (arm_current_el(env) == 1) {
2225 if (arm_is_secure_below_el3(env)) {
2226 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2227 }
2228 return CP_ACCESS_TRAP_UNCATEGORIZED;
2229 }
2230 }
2231 return CP_ACCESS_OK;
2232 }
2233
2234 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2235 MMUAccessType access_type, ARMMMUIdx mmu_idx)
2236 {
2237 hwaddr phys_addr;
2238 target_ulong page_size;
2239 int prot;
2240 bool ret;
2241 uint64_t par64;
2242 bool format64 = false;
2243 MemTxAttrs attrs = {};
2244 ARMMMUFaultInfo fi = {};
2245 ARMCacheAttrs cacheattrs = {};
2246
2247 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
2248 &prot, &page_size, &fi, &cacheattrs);
2249
2250 if (is_a64(env)) {
2251 format64 = true;
2252 } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
2253 /*
2254 * ATS1Cxx:
2255 * * TTBCR.EAE determines whether the result is returned using the
2256 * 32-bit or the 64-bit PAR format
2257 * * Instructions executed in Hyp mode always use the 64bit format
2258 *
2259 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2260 * * The Non-secure TTBCR.EAE bit is set to 1
2261 * * The implementation includes EL2, and the value of HCR.VM is 1
2262 *
2263 * ATS1Hx always uses the 64bit format (not supported yet).
2264 */
2265 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
2266
2267 if (arm_feature(env, ARM_FEATURE_EL2)) {
2268 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
2269 format64 |= env->cp15.hcr_el2 & HCR_VM;
2270 } else {
2271 format64 |= arm_current_el(env) == 2;
2272 }
2273 }
2274 }
2275
2276 if (format64) {
2277 /* Create a 64-bit PAR */
2278 par64 = (1 << 11); /* LPAE bit always set */
2279 if (!ret) {
2280 par64 |= phys_addr & ~0xfffULL;
2281 if (!attrs.secure) {
2282 par64 |= (1 << 9); /* NS */
2283 }
2284 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
2285 par64 |= cacheattrs.shareability << 7; /* SH */
2286 } else {
2287 uint32_t fsr = arm_fi_to_lfsc(&fi);
2288
2289 par64 |= 1; /* F */
2290 par64 |= (fsr & 0x3f) << 1; /* FS */
2291 /* Note that S2WLK and FSTAGE are always zero, because we don't
2292 * implement virtualization and therefore there can't be a stage 2
2293 * fault.
2294 */
2295 }
2296 } else {
2297 /* fsr is a DFSR/IFSR value for the short descriptor
2298 * translation table format (with WnR always clear).
2299 * Convert it to a 32-bit PAR.
2300 */
2301 if (!ret) {
2302 /* We do not set any attribute bits in the PAR */
2303 if (page_size == (1 << 24)
2304 && arm_feature(env, ARM_FEATURE_V7)) {
2305 par64 = (phys_addr & 0xff000000) | (1 << 1);
2306 } else {
2307 par64 = phys_addr & 0xfffff000;
2308 }
2309 if (!attrs.secure) {
2310 par64 |= (1 << 9); /* NS */
2311 }
2312 } else {
2313 uint32_t fsr = arm_fi_to_sfsc(&fi);
2314
2315 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
2316 ((fsr & 0xf) << 1) | 1;
2317 }
2318 }
2319 return par64;
2320 }
2321
2322 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2323 {
2324 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2325 uint64_t par64;
2326 ARMMMUIdx mmu_idx;
2327 int el = arm_current_el(env);
2328 bool secure = arm_is_secure_below_el3(env);
2329
2330 switch (ri->opc2 & 6) {
2331 case 0:
2332 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2333 switch (el) {
2334 case 3:
2335 mmu_idx = ARMMMUIdx_S1E3;
2336 break;
2337 case 2:
2338 mmu_idx = ARMMMUIdx_S1NSE1;
2339 break;
2340 case 1:
2341 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2342 break;
2343 default:
2344 g_assert_not_reached();
2345 }
2346 break;
2347 case 2:
2348 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2349 switch (el) {
2350 case 3:
2351 mmu_idx = ARMMMUIdx_S1SE0;
2352 break;
2353 case 2:
2354 mmu_idx = ARMMMUIdx_S1NSE0;
2355 break;
2356 case 1:
2357 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2358 break;
2359 default:
2360 g_assert_not_reached();
2361 }
2362 break;
2363 case 4:
2364 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2365 mmu_idx = ARMMMUIdx_S12NSE1;
2366 break;
2367 case 6:
2368 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2369 mmu_idx = ARMMMUIdx_S12NSE0;
2370 break;
2371 default:
2372 g_assert_not_reached();
2373 }
2374
2375 par64 = do_ats_write(env, value, access_type, mmu_idx);
2376
2377 A32_BANKED_CURRENT_REG_SET(env, par, par64);
2378 }
2379
2380 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
2381 uint64_t value)
2382 {
2383 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2384 uint64_t par64;
2385
2386 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
2387
2388 A32_BANKED_CURRENT_REG_SET(env, par, par64);
2389 }
2390
2391 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
2392 bool isread)
2393 {
2394 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
2395 return CP_ACCESS_TRAP;
2396 }
2397 return CP_ACCESS_OK;
2398 }
2399
2400 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
2401 uint64_t value)
2402 {
2403 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2404 ARMMMUIdx mmu_idx;
2405 int secure = arm_is_secure_below_el3(env);
2406
2407 switch (ri->opc2 & 6) {
2408 case 0:
2409 switch (ri->opc1) {
2410 case 0: /* AT S1E1R, AT S1E1W */
2411 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2412 break;
2413 case 4: /* AT S1E2R, AT S1E2W */
2414 mmu_idx = ARMMMUIdx_S1E2;
2415 break;
2416 case 6: /* AT S1E3R, AT S1E3W */
2417 mmu_idx = ARMMMUIdx_S1E3;
2418 break;
2419 default:
2420 g_assert_not_reached();
2421 }
2422 break;
2423 case 2: /* AT S1E0R, AT S1E0W */
2424 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2425 break;
2426 case 4: /* AT S12E1R, AT S12E1W */
2427 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
2428 break;
2429 case 6: /* AT S12E0R, AT S12E0W */
2430 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
2431 break;
2432 default:
2433 g_assert_not_reached();
2434 }
2435
2436 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
2437 }
2438 #endif
2439
2440 static const ARMCPRegInfo vapa_cp_reginfo[] = {
2441 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
2442 .access = PL1_RW, .resetvalue = 0,
2443 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
2444 offsetoflow32(CPUARMState, cp15.par_ns) },
2445 .writefn = par_write },
2446 #ifndef CONFIG_USER_ONLY
2447 /* This underdecoding is safe because the reginfo is NO_RAW. */
2448 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
2449 .access = PL1_W, .accessfn = ats_access,
2450 .writefn = ats_write, .type = ARM_CP_NO_RAW },
2451 #endif
2452 REGINFO_SENTINEL
2453 };
2454
2455 /* Return basic MPU access permission bits. */
2456 static uint32_t simple_mpu_ap_bits(uint32_t val)
2457 {
2458 uint32_t ret;
2459 uint32_t mask;
2460 int i;
2461 ret = 0;
2462 mask = 3;
2463 for (i = 0; i < 16; i += 2) {
2464 ret |= (val >> i) & mask;
2465 mask <<= 2;
2466 }
2467 return ret;
2468 }
2469
2470 /* Pad basic MPU access permission bits to extended format. */
2471 static uint32_t extended_mpu_ap_bits(uint32_t val)
2472 {
2473 uint32_t ret;
2474 uint32_t mask;
2475 int i;
2476 ret = 0;
2477 mask = 3;
2478 for (i = 0; i < 16; i += 2) {
2479 ret |= (val & mask) << i;
2480 mask <<= 2;
2481 }
2482 return ret;
2483 }
2484
2485 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2486 uint64_t value)
2487 {
2488 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2489 }
2490
2491 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2492 {
2493 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2494 }
2495
2496 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2497 uint64_t value)
2498 {
2499 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2500 }
2501
2502 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2503 {
2504 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2505 }
2506
2507 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2508 {
2509 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2510
2511 if (!u32p) {
2512 return 0;
2513 }
2514
2515 u32p += env->pmsav7.rnr[M_REG_NS];
2516 return *u32p;
2517 }
2518
2519 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2520 uint64_t value)
2521 {
2522 ARMCPU *cpu = arm_env_get_cpu(env);
2523 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2524
2525 if (!u32p) {
2526 return;
2527 }
2528
2529 u32p += env->pmsav7.rnr[M_REG_NS];
2530 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2531 *u32p = value;
2532 }
2533
2534 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2535 uint64_t value)
2536 {
2537 ARMCPU *cpu = arm_env_get_cpu(env);
2538 uint32_t nrgs = cpu->pmsav7_dregion;
2539
2540 if (value >= nrgs) {
2541 qemu_log_mask(LOG_GUEST_ERROR,
2542 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2543 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2544 return;
2545 }
2546
2547 raw_write(env, ri, value);
2548 }
2549
2550 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2551 /* Reset for all these registers is handled in arm_cpu_reset(),
2552 * because the PMSAv7 is also used by M-profile CPUs, which do
2553 * not register cpregs but still need the state to be reset.
2554 */
2555 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2556 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2557 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2558 .readfn = pmsav7_read, .writefn = pmsav7_write,
2559 .resetfn = arm_cp_reset_ignore },
2560 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2561 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2562 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2563 .readfn = pmsav7_read, .writefn = pmsav7_write,
2564 .resetfn = arm_cp_reset_ignore },
2565 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2566 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2567 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2568 .readfn = pmsav7_read, .writefn = pmsav7_write,
2569 .resetfn = arm_cp_reset_ignore },
2570 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2571 .access = PL1_RW,
2572 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
2573 .writefn = pmsav7_rgnr_write,
2574 .resetfn = arm_cp_reset_ignore },
2575 REGINFO_SENTINEL
2576 };
2577
2578 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2579 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2580 .access = PL1_RW, .type = ARM_CP_ALIAS,
2581 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2582 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2583 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2584 .access = PL1_RW, .type = ARM_CP_ALIAS,
2585 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2586 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2587 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2588 .access = PL1_RW,
2589 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2590 .resetvalue = 0, },
2591 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2592 .access = PL1_RW,
2593 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2594 .resetvalue = 0, },
2595 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2596 .access = PL1_RW,
2597 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2598 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2599 .access = PL1_RW,
2600 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2601 /* Protection region base and size registers */
2602 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2603 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2604 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2605 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2606 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2607 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2608 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2609 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2610 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2611 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2612 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2613 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2614 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2615 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2616 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2617 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2618 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2619 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2620 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2621 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2622 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2623 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2624 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2625 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2626 REGINFO_SENTINEL
2627 };
2628
2629 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
2630 uint64_t value)
2631 {
2632 TCR *tcr = raw_ptr(env, ri);
2633 int maskshift = extract32(value, 0, 3);
2634
2635 if (!arm_feature(env, ARM_FEATURE_V8)) {
2636 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2637 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2638 * using Long-desciptor translation table format */
2639 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2640 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2641 /* In an implementation that includes the Security Extensions
2642 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2643 * Short-descriptor translation table format.
2644 */
2645 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2646 } else {
2647 value &= TTBCR_N;
2648 }
2649 }
2650
2651 /* Update the masks corresponding to the TCR bank being written
2652 * Note that we always calculate mask and base_mask, but
2653 * they are only used for short-descriptor tables (ie if EAE is 0);
2654 * for long-descriptor tables the TCR fields are used differently
2655 * and the mask and base_mask values are meaningless.
2656 */
2657 tcr->raw_tcr = value;
2658 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
2659 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
2660 }
2661
2662 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2663 uint64_t value)
2664 {
2665 ARMCPU *cpu = arm_env_get_cpu(env);
2666
2667 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2668 /* With LPAE the TTBCR could result in a change of ASID
2669 * via the TTBCR.A1 bit, so do a TLB flush.
2670 */
2671 tlb_flush(CPU(cpu));
2672 }
2673 vmsa_ttbcr_raw_write(env, ri, value);
2674 }
2675
2676 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2677 {
2678 TCR *tcr = raw_ptr(env, ri);
2679
2680 /* Reset both the TCR as well as the masks corresponding to the bank of
2681 * the TCR being reset.
2682 */
2683 tcr->raw_tcr = 0;
2684 tcr->mask = 0;
2685 tcr->base_mask = 0xffffc000u;
2686 }
2687
2688 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2689 uint64_t value)
2690 {
2691 ARMCPU *cpu = arm_env_get_cpu(env);
2692 TCR *tcr = raw_ptr(env, ri);
2693
2694 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2695 tlb_flush(CPU(cpu));
2696 tcr->raw_tcr = value;
2697 }
2698
2699 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2700 uint64_t value)
2701 {
2702 /* 64 bit accesses to the TTBRs can change the ASID and so we
2703 * must flush the TLB.
2704 */
2705 if (cpreg_field_is_64bit(ri)) {
2706 ARMCPU *cpu = arm_env_get_cpu(env);
2707
2708 tlb_flush(CPU(cpu));
2709 }
2710 raw_write(env, ri, value);
2711 }
2712
2713 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2714 uint64_t value)
2715 {
2716 ARMCPU *cpu = arm_env_get_cpu(env);
2717 CPUState *cs = CPU(cpu);
2718
2719 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
2720 if (raw_read(env, ri) != value) {
2721 tlb_flush_by_mmuidx(cs,
2722 ARMMMUIdxBit_S12NSE1 |
2723 ARMMMUIdxBit_S12NSE0 |
2724 ARMMMUIdxBit_S2NS);
2725 raw_write(env, ri, value);
2726 }
2727 }
2728
2729 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2730 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2731 .access = PL1_RW, .type = ARM_CP_ALIAS,
2732 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2733 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2734 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2735 .access = PL1_RW, .resetvalue = 0,
2736 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2737 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2738 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2739 .access = PL1_RW, .resetvalue = 0,
2740 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2741 offsetof(CPUARMState, cp15.dfar_ns) } },
2742 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2743 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2744 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2745 .resetvalue = 0, },
2746 REGINFO_SENTINEL
2747 };
2748
2749 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2750 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2751 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2752 .access = PL1_RW,
2753 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2754 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2755 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2756 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2757 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2758 offsetof(CPUARMState, cp15.ttbr0_ns) } },
2759 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2760 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2761 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2762 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2763 offsetof(CPUARMState, cp15.ttbr1_ns) } },
2764 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2765 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2766 .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
2767 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2768 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2769 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2770 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2771 .raw_writefn = vmsa_ttbcr_raw_write,
2772 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2773 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2774 REGINFO_SENTINEL
2775 };
2776
2777 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2778 uint64_t value)
2779 {
2780 env->cp15.c15_ticonfig = value & 0xe7;
2781 /* The OS_TYPE bit in this register changes the reported CPUID! */
2782 env->cp15.c0_cpuid = (value & (1 << 5)) ?
2783 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2784 }
2785
2786 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2787 uint64_t value)
2788 {
2789 env->cp15.c15_threadid = value & 0xffff;
2790 }
2791
2792 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2793 uint64_t value)
2794 {
2795 /* Wait-for-interrupt (deprecated) */
2796 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
2797 }
2798
2799 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2800 uint64_t value)
2801 {
2802 /* On OMAP there are registers indicating the max/min index of dcache lines
2803 * containing a dirty line; cache flush operations have to reset these.
2804 */
2805 env->cp15.c15_i_max = 0x000;
2806 env->cp15.c15_i_min = 0xff0;
2807 }
2808
2809 static const ARMCPRegInfo omap_cp_reginfo[] = {
2810 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2811 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2812 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2813 .resetvalue = 0, },
2814 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2815 .access = PL1_RW, .type = ARM_CP_NOP },
2816 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2817 .access = PL1_RW,
2818 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2819 .writefn = omap_ticonfig_write },
2820 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2821 .access = PL1_RW,
2822 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2823 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2824 .access = PL1_RW, .resetvalue = 0xff0,
2825 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2826 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2827 .access = PL1_RW,
2828 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2829 .writefn = omap_threadid_write },
2830 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2831 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2832 .type = ARM_CP_NO_RAW,
2833 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2834 /* TODO: Peripheral port remap register:
2835 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2836 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2837 * when MMU is off.
2838 */
2839 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2840 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2841 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2842 .writefn = omap_cachemaint_write },
2843 { .name = "C9", .cp = 15, .crn = 9,
2844 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2845 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2846 REGINFO_SENTINEL
2847 };
2848
2849 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2850 uint64_t value)
2851 {
2852 env->cp15.c15_cpar = value & 0x3fff;
2853 }
2854
2855 static const ARMCPRegInfo xscale_cp_reginfo[] = {
2856 { .name = "XSCALE_CPAR",
2857 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2858 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2859 .writefn = xscale_cpar_write, },
2860 { .name = "XSCALE_AUXCR",
2861 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2862 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2863 .resetvalue = 0, },
2864 /* XScale specific cache-lockdown: since we have no cache we NOP these
2865 * and hope the guest does not really rely on cache behaviour.
2866 */
2867 { .name = "XSCALE_LOCK_ICACHE_LINE",
2868 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2869 .access = PL1_W, .type = ARM_CP_NOP },
2870 { .name = "XSCALE_UNLOCK_ICACHE",
2871 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2872 .access = PL1_W, .type = ARM_CP_NOP },
2873 { .name = "XSCALE_DCACHE_LOCK",
2874 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2875 .access = PL1_RW, .type = ARM_CP_NOP },
2876 { .name = "XSCALE_UNLOCK_DCACHE",
2877 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2878 .access = PL1_W, .type = ARM_CP_NOP },
2879 REGINFO_SENTINEL
2880 };
2881
2882 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2883 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2884 * implementation of this implementation-defined space.
2885 * Ideally this should eventually disappear in favour of actually
2886 * implementing the correct behaviour for all cores.
2887 */
2888 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2889 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2890 .access = PL1_RW,
2891 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2892 .resetvalue = 0 },
2893 REGINFO_SENTINEL
2894 };
2895
2896 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2897 /* Cache status: RAZ because we have no cache so it's always clean */
2898 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2899 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2900 .resetvalue = 0 },
2901 REGINFO_SENTINEL
2902 };
2903
2904 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2905 /* We never have a a block transfer operation in progress */
2906 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2907 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2908 .resetvalue = 0 },
2909 /* The cache ops themselves: these all NOP for QEMU */
2910 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2911 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2912 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2913 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2914 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2915 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2916 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2917 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2918 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2919 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2920 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2921 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2922 REGINFO_SENTINEL
2923 };
2924
2925 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2926 /* The cache test-and-clean instructions always return (1 << 30)
2927 * to indicate that there are no dirty cache lines.
2928 */
2929 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2930 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2931 .resetvalue = (1 << 30) },
2932 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2933 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2934 .resetvalue = (1 << 30) },
2935 REGINFO_SENTINEL
2936 };
2937
2938 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2939 /* Ignore ReadBuffer accesses */
2940 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
2941 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2942 .access = PL1_RW, .resetvalue = 0,
2943 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
2944 REGINFO_SENTINEL
2945 };
2946
2947 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2948 {
2949 ARMCPU *cpu = arm_env_get_cpu(env);
2950 unsigned int cur_el = arm_current_el(env);
2951 bool secure = arm_is_secure(env);
2952
2953 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2954 return env->cp15.vpidr_el2;
2955 }
2956 return raw_read(env, ri);
2957 }
2958
2959 static uint64_t mpidr_read_val(CPUARMState *env)
2960 {
2961 ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
2962 uint64_t mpidr = cpu->mp_affinity;
2963
2964 if (arm_feature(env, ARM_FEATURE_V7MP)) {
2965 mpidr |= (1U << 31);
2966 /* Cores which are uniprocessor (non-coherent)
2967 * but still implement the MP extensions set
2968 * bit 30. (For instance, Cortex-R5).
2969 */
2970 if (cpu->mp_is_up) {
2971 mpidr |= (1u << 30);
2972 }
2973 }
2974 return mpidr;
2975 }
2976
2977 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2978 {
2979 unsigned int cur_el = arm_current_el(env);
2980 bool secure = arm_is_secure(env);
2981
2982 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2983 return env->cp15.vmpidr_el2;
2984 }
2985 return mpidr_read_val(env);
2986 }
2987
2988 static const ARMCPRegInfo mpidr_cp_reginfo[] = {
2989 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
2990 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
2991 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },