target/arm: fix incorrect current EL bug in aarch32 exception emulation
[qemu.git] / target / arm / helper.c
1 /*
2 * ARM generic helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
12 #include "trace.h"
13 #include "cpu.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
24 #include "hw/irq.h"
25 #include "hw/semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/tcg.h"
29 #include "qemu/range.h"
30 #include "qapi/qapi-commands-machine-target.h"
31 #include "qapi/error.h"
32 #include "qemu/guest-random.h"
33 #ifdef CONFIG_TCG
34 #include "arm_ldst.h"
35 #include "exec/cpu_ldst.h"
36 #endif
37
38 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
39
40 #ifndef CONFIG_USER_ONLY
41
42 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
43 MMUAccessType access_type, ARMMMUIdx mmu_idx,
44 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
45 target_ulong *page_size_ptr,
46 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
47 #endif
48
49 static void switch_mode(CPUARMState *env, int mode);
50
51 static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
52 {
53 ARMCPU *cpu = env_archcpu(env);
54 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
55
56 /* VFP data registers are always little-endian. */
57 if (reg < nregs) {
58 return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg));
59 }
60 if (arm_feature(env, ARM_FEATURE_NEON)) {
61 /* Aliases for Q regs. */
62 nregs += 16;
63 if (reg < nregs) {
64 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
65 return gdb_get_reg128(buf, q[0], q[1]);
66 }
67 }
68 switch (reg - nregs) {
69 case 0: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); break;
70 case 1: return gdb_get_reg32(buf, vfp_get_fpscr(env)); break;
71 case 2: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]); break;
72 }
73 return 0;
74 }
75
76 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
77 {
78 ARMCPU *cpu = env_archcpu(env);
79 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
80
81 if (reg < nregs) {
82 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
83 return 8;
84 }
85 if (arm_feature(env, ARM_FEATURE_NEON)) {
86 nregs += 16;
87 if (reg < nregs) {
88 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
89 q[0] = ldq_le_p(buf);
90 q[1] = ldq_le_p(buf + 8);
91 return 16;
92 }
93 }
94 switch (reg - nregs) {
95 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
96 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
97 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
98 }
99 return 0;
100 }
101
102 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
103 {
104 switch (reg) {
105 case 0 ... 31:
106 {
107 /* 128 bit FP register - quads are in LE order */
108 uint64_t *q = aa64_vfp_qreg(env, reg);
109 return gdb_get_reg128(buf, q[1], q[0]);
110 }
111 case 32:
112 /* FPSR */
113 return gdb_get_reg32(buf, vfp_get_fpsr(env));
114 case 33:
115 /* FPCR */
116 return gdb_get_reg32(buf,vfp_get_fpcr(env));
117 default:
118 return 0;
119 }
120 }
121
122 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
123 {
124 switch (reg) {
125 case 0 ... 31:
126 /* 128 bit FP register */
127 {
128 uint64_t *q = aa64_vfp_qreg(env, reg);
129 q[0] = ldq_le_p(buf);
130 q[1] = ldq_le_p(buf + 8);
131 return 16;
132 }
133 case 32:
134 /* FPSR */
135 vfp_set_fpsr(env, ldl_p(buf));
136 return 4;
137 case 33:
138 /* FPCR */
139 vfp_set_fpcr(env, ldl_p(buf));
140 return 4;
141 default:
142 return 0;
143 }
144 }
145
146 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
147 {
148 assert(ri->fieldoffset);
149 if (cpreg_field_is_64bit(ri)) {
150 return CPREG_FIELD64(env, ri);
151 } else {
152 return CPREG_FIELD32(env, ri);
153 }
154 }
155
156 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
157 uint64_t value)
158 {
159 assert(ri->fieldoffset);
160 if (cpreg_field_is_64bit(ri)) {
161 CPREG_FIELD64(env, ri) = value;
162 } else {
163 CPREG_FIELD32(env, ri) = value;
164 }
165 }
166
167 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
168 {
169 return (char *)env + ri->fieldoffset;
170 }
171
172 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
173 {
174 /* Raw read of a coprocessor register (as needed for migration, etc). */
175 if (ri->type & ARM_CP_CONST) {
176 return ri->resetvalue;
177 } else if (ri->raw_readfn) {
178 return ri->raw_readfn(env, ri);
179 } else if (ri->readfn) {
180 return ri->readfn(env, ri);
181 } else {
182 return raw_read(env, ri);
183 }
184 }
185
186 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
187 uint64_t v)
188 {
189 /* Raw write of a coprocessor register (as needed for migration, etc).
190 * Note that constant registers are treated as write-ignored; the
191 * caller should check for success by whether a readback gives the
192 * value written.
193 */
194 if (ri->type & ARM_CP_CONST) {
195 return;
196 } else if (ri->raw_writefn) {
197 ri->raw_writefn(env, ri, v);
198 } else if (ri->writefn) {
199 ri->writefn(env, ri, v);
200 } else {
201 raw_write(env, ri, v);
202 }
203 }
204
205 /**
206 * arm_get/set_gdb_*: get/set a gdb register
207 * @env: the CPU state
208 * @buf: a buffer to copy to/from
209 * @reg: register number (offset from start of group)
210 *
211 * We return the number of bytes copied
212 */
213
214 static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
215 {
216 ARMCPU *cpu = env_archcpu(env);
217 const ARMCPRegInfo *ri;
218 uint32_t key;
219
220 key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg];
221 ri = get_arm_cp_reginfo(cpu->cp_regs, key);
222 if (ri) {
223 if (cpreg_field_is_64bit(ri)) {
224 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
225 } else {
226 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
227 }
228 }
229 return 0;
230 }
231
232 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
233 {
234 return 0;
235 }
236
237 #ifdef TARGET_AARCH64
238 static int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg)
239 {
240 ARMCPU *cpu = env_archcpu(env);
241
242 switch (reg) {
243 /* The first 32 registers are the zregs */
244 case 0 ... 31:
245 {
246 int vq, len = 0;
247 for (vq = 0; vq < cpu->sve_max_vq; vq++) {
248 len += gdb_get_reg128(buf,
249 env->vfp.zregs[reg].d[vq * 2 + 1],
250 env->vfp.zregs[reg].d[vq * 2]);
251 }
252 return len;
253 }
254 case 32:
255 return gdb_get_reg32(buf, vfp_get_fpsr(env));
256 case 33:
257 return gdb_get_reg32(buf, vfp_get_fpcr(env));
258 /* then 16 predicates and the ffr */
259 case 34 ... 50:
260 {
261 int preg = reg - 34;
262 int vq, len = 0;
263 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
264 len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]);
265 }
266 return len;
267 }
268 case 51:
269 {
270 /*
271 * We report in Vector Granules (VG) which is 64bit in a Z reg
272 * while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
273 */
274 int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1;
275 return gdb_get_reg32(buf, vq * 2);
276 }
277 default:
278 /* gdbstub asked for something out our range */
279 qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg);
280 break;
281 }
282
283 return 0;
284 }
285
286 static int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg)
287 {
288 ARMCPU *cpu = env_archcpu(env);
289
290 /* The first 32 registers are the zregs */
291 switch (reg) {
292 /* The first 32 registers are the zregs */
293 case 0 ... 31:
294 {
295 int vq, len = 0;
296 uint64_t *p = (uint64_t *) buf;
297 for (vq = 0; vq < cpu->sve_max_vq; vq++) {
298 env->vfp.zregs[reg].d[vq * 2 + 1] = *p++;
299 env->vfp.zregs[reg].d[vq * 2] = *p++;
300 len += 16;
301 }
302 return len;
303 }
304 case 32:
305 vfp_set_fpsr(env, *(uint32_t *)buf);
306 return 4;
307 case 33:
308 vfp_set_fpcr(env, *(uint32_t *)buf);
309 return 4;
310 case 34 ... 50:
311 {
312 int preg = reg - 34;
313 int vq, len = 0;
314 uint64_t *p = (uint64_t *) buf;
315 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
316 env->vfp.pregs[preg].p[vq / 4] = *p++;
317 len += 8;
318 }
319 return len;
320 }
321 case 51:
322 /* cannot set vg via gdbstub */
323 return 0;
324 default:
325 /* gdbstub asked for something out our range */
326 break;
327 }
328
329 return 0;
330 }
331 #endif /* TARGET_AARCH64 */
332
333 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
334 {
335 /* Return true if the regdef would cause an assertion if you called
336 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
337 * program bug for it not to have the NO_RAW flag).
338 * NB that returning false here doesn't necessarily mean that calling
339 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
340 * read/write access functions which are safe for raw use" from "has
341 * read/write access functions which have side effects but has forgotten
342 * to provide raw access functions".
343 * The tests here line up with the conditions in read/write_raw_cp_reg()
344 * and assertions in raw_read()/raw_write().
345 */
346 if ((ri->type & ARM_CP_CONST) ||
347 ri->fieldoffset ||
348 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
349 return false;
350 }
351 return true;
352 }
353
354 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
355 {
356 /* Write the coprocessor state from cpu->env to the (index,value) list. */
357 int i;
358 bool ok = true;
359
360 for (i = 0; i < cpu->cpreg_array_len; i++) {
361 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
362 const ARMCPRegInfo *ri;
363 uint64_t newval;
364
365 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
366 if (!ri) {
367 ok = false;
368 continue;
369 }
370 if (ri->type & ARM_CP_NO_RAW) {
371 continue;
372 }
373
374 newval = read_raw_cp_reg(&cpu->env, ri);
375 if (kvm_sync) {
376 /*
377 * Only sync if the previous list->cpustate sync succeeded.
378 * Rather than tracking the success/failure state for every
379 * item in the list, we just recheck "does the raw write we must
380 * have made in write_list_to_cpustate() read back OK" here.
381 */
382 uint64_t oldval = cpu->cpreg_values[i];
383
384 if (oldval == newval) {
385 continue;
386 }
387
388 write_raw_cp_reg(&cpu->env, ri, oldval);
389 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
390 continue;
391 }
392
393 write_raw_cp_reg(&cpu->env, ri, newval);
394 }
395 cpu->cpreg_values[i] = newval;
396 }
397 return ok;
398 }
399
400 bool write_list_to_cpustate(ARMCPU *cpu)
401 {
402 int i;
403 bool ok = true;
404
405 for (i = 0; i < cpu->cpreg_array_len; i++) {
406 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
407 uint64_t v = cpu->cpreg_values[i];
408 const ARMCPRegInfo *ri;
409
410 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
411 if (!ri) {
412 ok = false;
413 continue;
414 }
415 if (ri->type & ARM_CP_NO_RAW) {
416 continue;
417 }
418 /* Write value and confirm it reads back as written
419 * (to catch read-only registers and partially read-only
420 * registers where the incoming migration value doesn't match)
421 */
422 write_raw_cp_reg(&cpu->env, ri, v);
423 if (read_raw_cp_reg(&cpu->env, ri) != v) {
424 ok = false;
425 }
426 }
427 return ok;
428 }
429
430 static void add_cpreg_to_list(gpointer key, gpointer opaque)
431 {
432 ARMCPU *cpu = opaque;
433 uint64_t regidx;
434 const ARMCPRegInfo *ri;
435
436 regidx = *(uint32_t *)key;
437 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
438
439 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
440 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
441 /* The value array need not be initialized at this point */
442 cpu->cpreg_array_len++;
443 }
444 }
445
446 static void count_cpreg(gpointer key, gpointer opaque)
447 {
448 ARMCPU *cpu = opaque;
449 uint64_t regidx;
450 const ARMCPRegInfo *ri;
451
452 regidx = *(uint32_t *)key;
453 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
454
455 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
456 cpu->cpreg_array_len++;
457 }
458 }
459
460 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
461 {
462 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
463 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
464
465 if (aidx > bidx) {
466 return 1;
467 }
468 if (aidx < bidx) {
469 return -1;
470 }
471 return 0;
472 }
473
474 void init_cpreg_list(ARMCPU *cpu)
475 {
476 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
477 * Note that we require cpreg_tuples[] to be sorted by key ID.
478 */
479 GList *keys;
480 int arraylen;
481
482 keys = g_hash_table_get_keys(cpu->cp_regs);
483 keys = g_list_sort(keys, cpreg_key_compare);
484
485 cpu->cpreg_array_len = 0;
486
487 g_list_foreach(keys, count_cpreg, cpu);
488
489 arraylen = cpu->cpreg_array_len;
490 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
491 cpu->cpreg_values = g_new(uint64_t, arraylen);
492 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
493 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
494 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
495 cpu->cpreg_array_len = 0;
496
497 g_list_foreach(keys, add_cpreg_to_list, cpu);
498
499 assert(cpu->cpreg_array_len == arraylen);
500
501 g_list_free(keys);
502 }
503
504 /*
505 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
506 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
507 *
508 * access_el3_aa32ns: Used to check AArch32 register views.
509 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
510 */
511 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
512 const ARMCPRegInfo *ri,
513 bool isread)
514 {
515 bool secure = arm_is_secure_below_el3(env);
516
517 assert(!arm_el_is_aa64(env, 3));
518 if (secure) {
519 return CP_ACCESS_TRAP_UNCATEGORIZED;
520 }
521 return CP_ACCESS_OK;
522 }
523
524 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
525 const ARMCPRegInfo *ri,
526 bool isread)
527 {
528 if (!arm_el_is_aa64(env, 3)) {
529 return access_el3_aa32ns(env, ri, isread);
530 }
531 return CP_ACCESS_OK;
532 }
533
534 /* Some secure-only AArch32 registers trap to EL3 if used from
535 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
536 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
537 * We assume that the .access field is set to PL1_RW.
538 */
539 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
540 const ARMCPRegInfo *ri,
541 bool isread)
542 {
543 if (arm_current_el(env) == 3) {
544 return CP_ACCESS_OK;
545 }
546 if (arm_is_secure_below_el3(env)) {
547 return CP_ACCESS_TRAP_EL3;
548 }
549 /* This will be EL1 NS and EL2 NS, which just UNDEF */
550 return CP_ACCESS_TRAP_UNCATEGORIZED;
551 }
552
553 /* Check for traps to "powerdown debug" registers, which are controlled
554 * by MDCR.TDOSA
555 */
556 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
557 bool isread)
558 {
559 int el = arm_current_el(env);
560 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
561 (env->cp15.mdcr_el2 & MDCR_TDE) ||
562 (arm_hcr_el2_eff(env) & HCR_TGE);
563
564 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
565 return CP_ACCESS_TRAP_EL2;
566 }
567 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
568 return CP_ACCESS_TRAP_EL3;
569 }
570 return CP_ACCESS_OK;
571 }
572
573 /* Check for traps to "debug ROM" registers, which are controlled
574 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
575 */
576 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
577 bool isread)
578 {
579 int el = arm_current_el(env);
580 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
581 (env->cp15.mdcr_el2 & MDCR_TDE) ||
582 (arm_hcr_el2_eff(env) & HCR_TGE);
583
584 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
585 return CP_ACCESS_TRAP_EL2;
586 }
587 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
588 return CP_ACCESS_TRAP_EL3;
589 }
590 return CP_ACCESS_OK;
591 }
592
593 /* Check for traps to general debug registers, which are controlled
594 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
595 */
596 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
597 bool isread)
598 {
599 int el = arm_current_el(env);
600 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
601 (env->cp15.mdcr_el2 & MDCR_TDE) ||
602 (arm_hcr_el2_eff(env) & HCR_TGE);
603
604 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
605 return CP_ACCESS_TRAP_EL2;
606 }
607 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
608 return CP_ACCESS_TRAP_EL3;
609 }
610 return CP_ACCESS_OK;
611 }
612
613 /* Check for traps to performance monitor registers, which are controlled
614 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
615 */
616 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
617 bool isread)
618 {
619 int el = arm_current_el(env);
620
621 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
622 && !arm_is_secure_below_el3(env)) {
623 return CP_ACCESS_TRAP_EL2;
624 }
625 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
626 return CP_ACCESS_TRAP_EL3;
627 }
628 return CP_ACCESS_OK;
629 }
630
631 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
632 static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
633 bool isread)
634 {
635 if (arm_current_el(env) == 1) {
636 uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
637 if (arm_hcr_el2_eff(env) & trap) {
638 return CP_ACCESS_TRAP_EL2;
639 }
640 }
641 return CP_ACCESS_OK;
642 }
643
644 /* Check for traps from EL1 due to HCR_EL2.TSW. */
645 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
646 bool isread)
647 {
648 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
649 return CP_ACCESS_TRAP_EL2;
650 }
651 return CP_ACCESS_OK;
652 }
653
654 /* Check for traps from EL1 due to HCR_EL2.TACR. */
655 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
656 bool isread)
657 {
658 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
659 return CP_ACCESS_TRAP_EL2;
660 }
661 return CP_ACCESS_OK;
662 }
663
664 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
665 static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
666 bool isread)
667 {
668 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
669 return CP_ACCESS_TRAP_EL2;
670 }
671 return CP_ACCESS_OK;
672 }
673
674 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
675 {
676 ARMCPU *cpu = env_archcpu(env);
677
678 raw_write(env, ri, value);
679 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
680 }
681
682 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
683 {
684 ARMCPU *cpu = env_archcpu(env);
685
686 if (raw_read(env, ri) != value) {
687 /* Unlike real hardware the qemu TLB uses virtual addresses,
688 * not modified virtual addresses, so this causes a TLB flush.
689 */
690 tlb_flush(CPU(cpu));
691 raw_write(env, ri, value);
692 }
693 }
694
695 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
696 uint64_t value)
697 {
698 ARMCPU *cpu = env_archcpu(env);
699
700 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
701 && !extended_addresses_enabled(env)) {
702 /* For VMSA (when not using the LPAE long descriptor page table
703 * format) this register includes the ASID, so do a TLB flush.
704 * For PMSA it is purely a process ID and no action is needed.
705 */
706 tlb_flush(CPU(cpu));
707 }
708 raw_write(env, ri, value);
709 }
710
711 /* IS variants of TLB operations must affect all cores */
712 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
713 uint64_t value)
714 {
715 CPUState *cs = env_cpu(env);
716
717 tlb_flush_all_cpus_synced(cs);
718 }
719
720 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
721 uint64_t value)
722 {
723 CPUState *cs = env_cpu(env);
724
725 tlb_flush_all_cpus_synced(cs);
726 }
727
728 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
729 uint64_t value)
730 {
731 CPUState *cs = env_cpu(env);
732
733 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
734 }
735
736 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
737 uint64_t value)
738 {
739 CPUState *cs = env_cpu(env);
740
741 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
742 }
743
744 /*
745 * Non-IS variants of TLB operations are upgraded to
746 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
747 * force broadcast of these operations.
748 */
749 static bool tlb_force_broadcast(CPUARMState *env)
750 {
751 return (env->cp15.hcr_el2 & HCR_FB) &&
752 arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
753 }
754
755 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
756 uint64_t value)
757 {
758 /* Invalidate all (TLBIALL) */
759 CPUState *cs = env_cpu(env);
760
761 if (tlb_force_broadcast(env)) {
762 tlb_flush_all_cpus_synced(cs);
763 } else {
764 tlb_flush(cs);
765 }
766 }
767
768 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
769 uint64_t value)
770 {
771 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
772 CPUState *cs = env_cpu(env);
773
774 value &= TARGET_PAGE_MASK;
775 if (tlb_force_broadcast(env)) {
776 tlb_flush_page_all_cpus_synced(cs, value);
777 } else {
778 tlb_flush_page(cs, value);
779 }
780 }
781
782 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
783 uint64_t value)
784 {
785 /* Invalidate by ASID (TLBIASID) */
786 CPUState *cs = env_cpu(env);
787
788 if (tlb_force_broadcast(env)) {
789 tlb_flush_all_cpus_synced(cs);
790 } else {
791 tlb_flush(cs);
792 }
793 }
794
795 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
796 uint64_t value)
797 {
798 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
799 CPUState *cs = env_cpu(env);
800
801 value &= TARGET_PAGE_MASK;
802 if (tlb_force_broadcast(env)) {
803 tlb_flush_page_all_cpus_synced(cs, value);
804 } else {
805 tlb_flush_page(cs, value);
806 }
807 }
808
809 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
810 uint64_t value)
811 {
812 CPUState *cs = env_cpu(env);
813
814 tlb_flush_by_mmuidx(cs,
815 ARMMMUIdxBit_E10_1 |
816 ARMMMUIdxBit_E10_1_PAN |
817 ARMMMUIdxBit_E10_0 |
818 ARMMMUIdxBit_Stage2);
819 }
820
821 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
822 uint64_t value)
823 {
824 CPUState *cs = env_cpu(env);
825
826 tlb_flush_by_mmuidx_all_cpus_synced(cs,
827 ARMMMUIdxBit_E10_1 |
828 ARMMMUIdxBit_E10_1_PAN |
829 ARMMMUIdxBit_E10_0 |
830 ARMMMUIdxBit_Stage2);
831 }
832
833 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
834 uint64_t value)
835 {
836 /* Invalidate by IPA. This has to invalidate any structures that
837 * contain only stage 2 translation information, but does not need
838 * to apply to structures that contain combined stage 1 and stage 2
839 * translation information.
840 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
841 */
842 CPUState *cs = env_cpu(env);
843 uint64_t pageaddr;
844
845 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
846 return;
847 }
848
849 pageaddr = sextract64(value << 12, 0, 40);
850
851 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
852 }
853
854 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
855 uint64_t value)
856 {
857 CPUState *cs = env_cpu(env);
858 uint64_t pageaddr;
859
860 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
861 return;
862 }
863
864 pageaddr = sextract64(value << 12, 0, 40);
865
866 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
867 ARMMMUIdxBit_Stage2);
868 }
869
870 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
871 uint64_t value)
872 {
873 CPUState *cs = env_cpu(env);
874
875 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
876 }
877
878 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
879 uint64_t value)
880 {
881 CPUState *cs = env_cpu(env);
882
883 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
884 }
885
886 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
887 uint64_t value)
888 {
889 CPUState *cs = env_cpu(env);
890 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
891
892 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
893 }
894
895 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
896 uint64_t value)
897 {
898 CPUState *cs = env_cpu(env);
899 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
900
901 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
902 ARMMMUIdxBit_E2);
903 }
904
905 static const ARMCPRegInfo cp_reginfo[] = {
906 /* Define the secure and non-secure FCSE identifier CP registers
907 * separately because there is no secure bank in V8 (no _EL3). This allows
908 * the secure register to be properly reset and migrated. There is also no
909 * v8 EL1 version of the register so the non-secure instance stands alone.
910 */
911 { .name = "FCSEIDR",
912 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
913 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
914 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
915 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
916 { .name = "FCSEIDR_S",
917 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
918 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
919 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
920 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
921 /* Define the secure and non-secure context identifier CP registers
922 * separately because there is no secure bank in V8 (no _EL3). This allows
923 * the secure register to be properly reset and migrated. In the
924 * non-secure case, the 32-bit register will have reset and migration
925 * disabled during registration as it is handled by the 64-bit instance.
926 */
927 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
928 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
929 .access = PL1_RW, .accessfn = access_tvm_trvm,
930 .secure = ARM_CP_SECSTATE_NS,
931 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
932 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
933 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
934 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
935 .access = PL1_RW, .accessfn = access_tvm_trvm,
936 .secure = ARM_CP_SECSTATE_S,
937 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
938 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
939 REGINFO_SENTINEL
940 };
941
942 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
943 /* NB: Some of these registers exist in v8 but with more precise
944 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
945 */
946 /* MMU Domain access control / MPU write buffer control */
947 { .name = "DACR",
948 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
949 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
950 .writefn = dacr_write, .raw_writefn = raw_write,
951 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
952 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
953 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
954 * For v6 and v5, these mappings are overly broad.
955 */
956 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
957 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
958 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
959 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
960 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
961 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
962 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
963 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
964 /* Cache maintenance ops; some of this space may be overridden later. */
965 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
966 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
967 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
968 REGINFO_SENTINEL
969 };
970
971 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
972 /* Not all pre-v6 cores implemented this WFI, so this is slightly
973 * over-broad.
974 */
975 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
976 .access = PL1_W, .type = ARM_CP_WFI },
977 REGINFO_SENTINEL
978 };
979
980 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
981 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
982 * is UNPREDICTABLE; we choose to NOP as most implementations do).
983 */
984 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
985 .access = PL1_W, .type = ARM_CP_WFI },
986 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
987 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
988 * OMAPCP will override this space.
989 */
990 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
991 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
992 .resetvalue = 0 },
993 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
994 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
995 .resetvalue = 0 },
996 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
997 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
998 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
999 .resetvalue = 0 },
1000 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
1001 * implementing it as RAZ means the "debug architecture version" bits
1002 * will read as a reserved value, which should cause Linux to not try
1003 * to use the debug hardware.
1004 */
1005 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
1006 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1007 /* MMU TLB control. Note that the wildcarding means we cover not just
1008 * the unified TLB ops but also the dside/iside/inner-shareable variants.
1009 */
1010 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
1011 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
1012 .type = ARM_CP_NO_RAW },
1013 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
1014 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
1015 .type = ARM_CP_NO_RAW },
1016 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
1017 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
1018 .type = ARM_CP_NO_RAW },
1019 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
1020 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
1021 .type = ARM_CP_NO_RAW },
1022 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
1023 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
1024 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
1025 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
1026 REGINFO_SENTINEL
1027 };
1028
1029 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1030 uint64_t value)
1031 {
1032 uint32_t mask = 0;
1033
1034 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
1035 if (!arm_feature(env, ARM_FEATURE_V8)) {
1036 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
1037 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
1038 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
1039 */
1040 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
1041 /* VFP coprocessor: cp10 & cp11 [23:20] */
1042 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
1043
1044 if (!arm_feature(env, ARM_FEATURE_NEON)) {
1045 /* ASEDIS [31] bit is RAO/WI */
1046 value |= (1 << 31);
1047 }
1048
1049 /* VFPv3 and upwards with NEON implement 32 double precision
1050 * registers (D0-D31).
1051 */
1052 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
1053 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
1054 value |= (1 << 30);
1055 }
1056 }
1057 value &= mask;
1058 }
1059
1060 /*
1061 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1062 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1063 */
1064 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
1065 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
1066 value &= ~(0xf << 20);
1067 value |= env->cp15.cpacr_el1 & (0xf << 20);
1068 }
1069
1070 env->cp15.cpacr_el1 = value;
1071 }
1072
1073 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1074 {
1075 /*
1076 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1077 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1078 */
1079 uint64_t value = env->cp15.cpacr_el1;
1080
1081 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
1082 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
1083 value &= ~(0xf << 20);
1084 }
1085 return value;
1086 }
1087
1088
1089 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1090 {
1091 /* Call cpacr_write() so that we reset with the correct RAO bits set
1092 * for our CPU features.
1093 */
1094 cpacr_write(env, ri, 0);
1095 }
1096
1097 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1098 bool isread)
1099 {
1100 if (arm_feature(env, ARM_FEATURE_V8)) {
1101 /* Check if CPACR accesses are to be trapped to EL2 */
1102 if (arm_current_el(env) == 1 &&
1103 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
1104 return CP_ACCESS_TRAP_EL2;
1105 /* Check if CPACR accesses are to be trapped to EL3 */
1106 } else if (arm_current_el(env) < 3 &&
1107 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
1108 return CP_ACCESS_TRAP_EL3;
1109 }
1110 }
1111
1112 return CP_ACCESS_OK;
1113 }
1114
1115 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1116 bool isread)
1117 {
1118 /* Check if CPTR accesses are set to trap to EL3 */
1119 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
1120 return CP_ACCESS_TRAP_EL3;
1121 }
1122
1123 return CP_ACCESS_OK;
1124 }
1125
1126 static const ARMCPRegInfo v6_cp_reginfo[] = {
1127 /* prefetch by MVA in v6, NOP in v7 */
1128 { .name = "MVA_prefetch",
1129 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
1130 .access = PL1_W, .type = ARM_CP_NOP },
1131 /* We need to break the TB after ISB to execute self-modifying code
1132 * correctly and also to take any pending interrupts immediately.
1133 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
1134 */
1135 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
1136 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
1137 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
1138 .access = PL0_W, .type = ARM_CP_NOP },
1139 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
1140 .access = PL0_W, .type = ARM_CP_NOP },
1141 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
1142 .access = PL1_RW, .accessfn = access_tvm_trvm,
1143 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
1144 offsetof(CPUARMState, cp15.ifar_ns) },
1145 .resetvalue = 0, },
1146 /* Watchpoint Fault Address Register : should actually only be present
1147 * for 1136, 1176, 11MPCore.
1148 */
1149 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
1150 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
1151 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
1152 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
1153 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
1154 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
1155 REGINFO_SENTINEL
1156 };
1157
1158 /* Definitions for the PMU registers */
1159 #define PMCRN_MASK 0xf800
1160 #define PMCRN_SHIFT 11
1161 #define PMCRLC 0x40
1162 #define PMCRDP 0x20
1163 #define PMCRX 0x10
1164 #define PMCRD 0x8
1165 #define PMCRC 0x4
1166 #define PMCRP 0x2
1167 #define PMCRE 0x1
1168 /*
1169 * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
1170 * which can be written as 1 to trigger behaviour but which stay RAZ).
1171 */
1172 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1173
1174 #define PMXEVTYPER_P 0x80000000
1175 #define PMXEVTYPER_U 0x40000000
1176 #define PMXEVTYPER_NSK 0x20000000
1177 #define PMXEVTYPER_NSU 0x10000000
1178 #define PMXEVTYPER_NSH 0x08000000
1179 #define PMXEVTYPER_M 0x04000000
1180 #define PMXEVTYPER_MT 0x02000000
1181 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1182 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1183 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1184 PMXEVTYPER_M | PMXEVTYPER_MT | \
1185 PMXEVTYPER_EVTCOUNT)
1186
1187 #define PMCCFILTR 0xf8000000
1188 #define PMCCFILTR_M PMXEVTYPER_M
1189 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1190
1191 static inline uint32_t pmu_num_counters(CPUARMState *env)
1192 {
1193 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1194 }
1195
1196 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1197 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1198 {
1199 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1200 }
1201
1202 typedef struct pm_event {
1203 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
1204 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1205 bool (*supported)(CPUARMState *);
1206 /*
1207 * Retrieve the current count of the underlying event. The programmed
1208 * counters hold a difference from the return value from this function
1209 */
1210 uint64_t (*get_count)(CPUARMState *);
1211 /*
1212 * Return how many nanoseconds it will take (at a minimum) for count events
1213 * to occur. A negative value indicates the counter will never overflow, or
1214 * that the counter has otherwise arranged for the overflow bit to be set
1215 * and the PMU interrupt to be raised on overflow.
1216 */
1217 int64_t (*ns_per_count)(uint64_t);
1218 } pm_event;
1219
1220 static bool event_always_supported(CPUARMState *env)
1221 {
1222 return true;
1223 }
1224
1225 static uint64_t swinc_get_count(CPUARMState *env)
1226 {
1227 /*
1228 * SW_INCR events are written directly to the pmevcntr's by writes to
1229 * PMSWINC, so there is no underlying count maintained by the PMU itself
1230 */
1231 return 0;
1232 }
1233
1234 static int64_t swinc_ns_per(uint64_t ignored)
1235 {
1236 return -1;
1237 }
1238
1239 /*
1240 * Return the underlying cycle count for the PMU cycle counters. If we're in
1241 * usermode, simply return 0.
1242 */
1243 static uint64_t cycles_get_count(CPUARMState *env)
1244 {
1245 #ifndef CONFIG_USER_ONLY
1246 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1247 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1248 #else
1249 return cpu_get_host_ticks();
1250 #endif
1251 }
1252
1253 #ifndef CONFIG_USER_ONLY
1254 static int64_t cycles_ns_per(uint64_t cycles)
1255 {
1256 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
1257 }
1258
1259 static bool instructions_supported(CPUARMState *env)
1260 {
1261 return use_icount == 1 /* Precise instruction counting */;
1262 }
1263
1264 static uint64_t instructions_get_count(CPUARMState *env)
1265 {
1266 return (uint64_t)cpu_get_icount_raw();
1267 }
1268
1269 static int64_t instructions_ns_per(uint64_t icount)
1270 {
1271 return cpu_icount_to_ns((int64_t)icount);
1272 }
1273 #endif
1274
1275 static bool pmu_8_1_events_supported(CPUARMState *env)
1276 {
1277 /* For events which are supported in any v8.1 PMU */
1278 return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
1279 }
1280
1281 static bool pmu_8_4_events_supported(CPUARMState *env)
1282 {
1283 /* For events which are supported in any v8.1 PMU */
1284 return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
1285 }
1286
1287 static uint64_t zero_event_get_count(CPUARMState *env)
1288 {
1289 /* For events which on QEMU never fire, so their count is always zero */
1290 return 0;
1291 }
1292
1293 static int64_t zero_event_ns_per(uint64_t cycles)
1294 {
1295 /* An event which never fires can never overflow */
1296 return -1;
1297 }
1298
1299 static const pm_event pm_events[] = {
1300 { .number = 0x000, /* SW_INCR */
1301 .supported = event_always_supported,
1302 .get_count = swinc_get_count,
1303 .ns_per_count = swinc_ns_per,
1304 },
1305 #ifndef CONFIG_USER_ONLY
1306 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1307 .supported = instructions_supported,
1308 .get_count = instructions_get_count,
1309 .ns_per_count = instructions_ns_per,
1310 },
1311 { .number = 0x011, /* CPU_CYCLES, Cycle */
1312 .supported = event_always_supported,
1313 .get_count = cycles_get_count,
1314 .ns_per_count = cycles_ns_per,
1315 },
1316 #endif
1317 { .number = 0x023, /* STALL_FRONTEND */
1318 .supported = pmu_8_1_events_supported,
1319 .get_count = zero_event_get_count,
1320 .ns_per_count = zero_event_ns_per,
1321 },
1322 { .number = 0x024, /* STALL_BACKEND */
1323 .supported = pmu_8_1_events_supported,
1324 .get_count = zero_event_get_count,
1325 .ns_per_count = zero_event_ns_per,
1326 },
1327 { .number = 0x03c, /* STALL */
1328 .supported = pmu_8_4_events_supported,
1329 .get_count = zero_event_get_count,
1330 .ns_per_count = zero_event_ns_per,
1331 },
1332 };
1333
1334 /*
1335 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1336 * events (i.e. the statistical profiling extension), this implementation
1337 * should first be updated to something sparse instead of the current
1338 * supported_event_map[] array.
1339 */
1340 #define MAX_EVENT_ID 0x3c
1341 #define UNSUPPORTED_EVENT UINT16_MAX
1342 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1343
1344 /*
1345 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1346 * of ARM event numbers to indices in our pm_events array.
1347 *
1348 * Note: Events in the 0x40XX range are not currently supported.
1349 */
1350 void pmu_init(ARMCPU *cpu)
1351 {
1352 unsigned int i;
1353
1354 /*
1355 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1356 * events to them
1357 */
1358 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1359 supported_event_map[i] = UNSUPPORTED_EVENT;
1360 }
1361 cpu->pmceid0 = 0;
1362 cpu->pmceid1 = 0;
1363
1364 for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1365 const pm_event *cnt = &pm_events[i];
1366 assert(cnt->number <= MAX_EVENT_ID);
1367 /* We do not currently support events in the 0x40xx range */
1368 assert(cnt->number <= 0x3f);
1369
1370 if (cnt->supported(&cpu->env)) {
1371 supported_event_map[cnt->number] = i;
1372 uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1373 if (cnt->number & 0x20) {
1374 cpu->pmceid1 |= event_mask;
1375 } else {
1376 cpu->pmceid0 |= event_mask;
1377 }
1378 }
1379 }
1380 }
1381
1382 /*
1383 * Check at runtime whether a PMU event is supported for the current machine
1384 */
1385 static bool event_supported(uint16_t number)
1386 {
1387 if (number > MAX_EVENT_ID) {
1388 return false;
1389 }
1390 return supported_event_map[number] != UNSUPPORTED_EVENT;
1391 }
1392
1393 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1394 bool isread)
1395 {
1396 /* Performance monitor registers user accessibility is controlled
1397 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1398 * trapping to EL2 or EL3 for other accesses.
1399 */
1400 int el = arm_current_el(env);
1401
1402 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1403 return CP_ACCESS_TRAP;
1404 }
1405 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1406 && !arm_is_secure_below_el3(env)) {
1407 return CP_ACCESS_TRAP_EL2;
1408 }
1409 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1410 return CP_ACCESS_TRAP_EL3;
1411 }
1412
1413 return CP_ACCESS_OK;
1414 }
1415
1416 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1417 const ARMCPRegInfo *ri,
1418 bool isread)
1419 {
1420 /* ER: event counter read trap control */
1421 if (arm_feature(env, ARM_FEATURE_V8)
1422 && arm_current_el(env) == 0
1423 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1424 && isread) {
1425 return CP_ACCESS_OK;
1426 }
1427
1428 return pmreg_access(env, ri, isread);
1429 }
1430
1431 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1432 const ARMCPRegInfo *ri,
1433 bool isread)
1434 {
1435 /* SW: software increment write trap control */
1436 if (arm_feature(env, ARM_FEATURE_V8)
1437 && arm_current_el(env) == 0
1438 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1439 && !isread) {
1440 return CP_ACCESS_OK;
1441 }
1442
1443 return pmreg_access(env, ri, isread);
1444 }
1445
1446 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1447 const ARMCPRegInfo *ri,
1448 bool isread)
1449 {
1450 /* ER: event counter read trap control */
1451 if (arm_feature(env, ARM_FEATURE_V8)
1452 && arm_current_el(env) == 0
1453 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1454 return CP_ACCESS_OK;
1455 }
1456
1457 return pmreg_access(env, ri, isread);
1458 }
1459
1460 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1461 const ARMCPRegInfo *ri,
1462 bool isread)
1463 {
1464 /* CR: cycle counter read trap control */
1465 if (arm_feature(env, ARM_FEATURE_V8)
1466 && arm_current_el(env) == 0
1467 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1468 && isread) {
1469 return CP_ACCESS_OK;
1470 }
1471
1472 return pmreg_access(env, ri, isread);
1473 }
1474
1475 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1476 * the current EL, security state, and register configuration.
1477 */
1478 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1479 {
1480 uint64_t filter;
1481 bool e, p, u, nsk, nsu, nsh, m;
1482 bool enabled, prohibited, filtered;
1483 bool secure = arm_is_secure(env);
1484 int el = arm_current_el(env);
1485 uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1486
1487 if (!arm_feature(env, ARM_FEATURE_PMU)) {
1488 return false;
1489 }
1490
1491 if (!arm_feature(env, ARM_FEATURE_EL2) ||
1492 (counter < hpmn || counter == 31)) {
1493 e = env->cp15.c9_pmcr & PMCRE;
1494 } else {
1495 e = env->cp15.mdcr_el2 & MDCR_HPME;
1496 }
1497 enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1498
1499 if (!secure) {
1500 if (el == 2 && (counter < hpmn || counter == 31)) {
1501 prohibited = env->cp15.mdcr_el2 & MDCR_HPMD;
1502 } else {
1503 prohibited = false;
1504 }
1505 } else {
1506 prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1507 (env->cp15.mdcr_el3 & MDCR_SPME);
1508 }
1509
1510 if (prohibited && counter == 31) {
1511 prohibited = env->cp15.c9_pmcr & PMCRDP;
1512 }
1513
1514 if (counter == 31) {
1515 filter = env->cp15.pmccfiltr_el0;
1516 } else {
1517 filter = env->cp15.c14_pmevtyper[counter];
1518 }
1519
1520 p = filter & PMXEVTYPER_P;
1521 u = filter & PMXEVTYPER_U;
1522 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1523 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1524 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1525 m = arm_el_is_aa64(env, 1) &&
1526 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1527
1528 if (el == 0) {
1529 filtered = secure ? u : u != nsu;
1530 } else if (el == 1) {
1531 filtered = secure ? p : p != nsk;
1532 } else if (el == 2) {
1533 filtered = !nsh;
1534 } else { /* EL3 */
1535 filtered = m != p;
1536 }
1537
1538 if (counter != 31) {
1539 /*
1540 * If not checking PMCCNTR, ensure the counter is setup to an event we
1541 * support
1542 */
1543 uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1544 if (!event_supported(event)) {
1545 return false;
1546 }
1547 }
1548
1549 return enabled && !prohibited && !filtered;
1550 }
1551
1552 static void pmu_update_irq(CPUARMState *env)
1553 {
1554 ARMCPU *cpu = env_archcpu(env);
1555 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1556 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1557 }
1558
1559 /*
1560 * Ensure c15_ccnt is the guest-visible count so that operations such as
1561 * enabling/disabling the counter or filtering, modifying the count itself,
1562 * etc. can be done logically. This is essentially a no-op if the counter is
1563 * not enabled at the time of the call.
1564 */
1565 static void pmccntr_op_start(CPUARMState *env)
1566 {
1567 uint64_t cycles = cycles_get_count(env);
1568
1569 if (pmu_counter_enabled(env, 31)) {
1570 uint64_t eff_cycles = cycles;
1571 if (env->cp15.c9_pmcr & PMCRD) {
1572 /* Increment once every 64 processor clock cycles */
1573 eff_cycles /= 64;
1574 }
1575
1576 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1577
1578 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1579 1ull << 63 : 1ull << 31;
1580 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1581 env->cp15.c9_pmovsr |= (1 << 31);
1582 pmu_update_irq(env);
1583 }
1584
1585 env->cp15.c15_ccnt = new_pmccntr;
1586 }
1587 env->cp15.c15_ccnt_delta = cycles;
1588 }
1589
1590 /*
1591 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1592 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1593 * pmccntr_op_start.
1594 */
1595 static void pmccntr_op_finish(CPUARMState *env)
1596 {
1597 if (pmu_counter_enabled(env, 31)) {
1598 #ifndef CONFIG_USER_ONLY
1599 /* Calculate when the counter will next overflow */
1600 uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1601 if (!(env->cp15.c9_pmcr & PMCRLC)) {
1602 remaining_cycles = (uint32_t)remaining_cycles;
1603 }
1604 int64_t overflow_in = cycles_ns_per(remaining_cycles);
1605
1606 if (overflow_in > 0) {
1607 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1608 overflow_in;
1609 ARMCPU *cpu = env_archcpu(env);
1610 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1611 }
1612 #endif
1613
1614 uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1615 if (env->cp15.c9_pmcr & PMCRD) {
1616 /* Increment once every 64 processor clock cycles */
1617 prev_cycles /= 64;
1618 }
1619 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1620 }
1621 }
1622
1623 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1624 {
1625
1626 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1627 uint64_t count = 0;
1628 if (event_supported(event)) {
1629 uint16_t event_idx = supported_event_map[event];
1630 count = pm_events[event_idx].get_count(env);
1631 }
1632
1633 if (pmu_counter_enabled(env, counter)) {
1634 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1635
1636 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1637 env->cp15.c9_pmovsr |= (1 << counter);
1638 pmu_update_irq(env);
1639 }
1640 env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1641 }
1642 env->cp15.c14_pmevcntr_delta[counter] = count;
1643 }
1644
1645 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1646 {
1647 if (pmu_counter_enabled(env, counter)) {
1648 #ifndef CONFIG_USER_ONLY
1649 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1650 uint16_t event_idx = supported_event_map[event];
1651 uint64_t delta = UINT32_MAX -
1652 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1653 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1654
1655 if (overflow_in > 0) {
1656 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1657 overflow_in;
1658 ARMCPU *cpu = env_archcpu(env);
1659 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1660 }
1661 #endif
1662
1663 env->cp15.c14_pmevcntr_delta[counter] -=
1664 env->cp15.c14_pmevcntr[counter];
1665 }
1666 }
1667
1668 void pmu_op_start(CPUARMState *env)
1669 {
1670 unsigned int i;
1671 pmccntr_op_start(env);
1672 for (i = 0; i < pmu_num_counters(env); i++) {
1673 pmevcntr_op_start(env, i);
1674 }
1675 }
1676
1677 void pmu_op_finish(CPUARMState *env)
1678 {
1679 unsigned int i;
1680 pmccntr_op_finish(env);
1681 for (i = 0; i < pmu_num_counters(env); i++) {
1682 pmevcntr_op_finish(env, i);
1683 }
1684 }
1685
1686 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1687 {
1688 pmu_op_start(&cpu->env);
1689 }
1690
1691 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1692 {
1693 pmu_op_finish(&cpu->env);
1694 }
1695
1696 void arm_pmu_timer_cb(void *opaque)
1697 {
1698 ARMCPU *cpu = opaque;
1699
1700 /*
1701 * Update all the counter values based on the current underlying counts,
1702 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1703 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1704 * counter may expire.
1705 */
1706 pmu_op_start(&cpu->env);
1707 pmu_op_finish(&cpu->env);
1708 }
1709
1710 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1711 uint64_t value)
1712 {
1713 pmu_op_start(env);
1714
1715 if (value & PMCRC) {
1716 /* The counter has been reset */
1717 env->cp15.c15_ccnt = 0;
1718 }
1719
1720 if (value & PMCRP) {
1721 unsigned int i;
1722 for (i = 0; i < pmu_num_counters(env); i++) {
1723 env->cp15.c14_pmevcntr[i] = 0;
1724 }
1725 }
1726
1727 env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
1728 env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK);
1729
1730 pmu_op_finish(env);
1731 }
1732
1733 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1734 uint64_t value)
1735 {
1736 unsigned int i;
1737 for (i = 0; i < pmu_num_counters(env); i++) {
1738 /* Increment a counter's count iff: */
1739 if ((value & (1 << i)) && /* counter's bit is set */
1740 /* counter is enabled and not filtered */
1741 pmu_counter_enabled(env, i) &&
1742 /* counter is SW_INCR */
1743 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1744 pmevcntr_op_start(env, i);
1745
1746 /*
1747 * Detect if this write causes an overflow since we can't predict
1748 * PMSWINC overflows like we can for other events
1749 */
1750 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1751
1752 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1753 env->cp15.c9_pmovsr |= (1 << i);
1754 pmu_update_irq(env);
1755 }
1756
1757 env->cp15.c14_pmevcntr[i] = new_pmswinc;
1758
1759 pmevcntr_op_finish(env, i);
1760 }
1761 }
1762 }
1763
1764 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1765 {
1766 uint64_t ret;
1767 pmccntr_op_start(env);
1768 ret = env->cp15.c15_ccnt;
1769 pmccntr_op_finish(env);
1770 return ret;
1771 }
1772
1773 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1774 uint64_t value)
1775 {
1776 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1777 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1778 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1779 * accessed.
1780 */
1781 env->cp15.c9_pmselr = value & 0x1f;
1782 }
1783
1784 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1785 uint64_t value)
1786 {
1787 pmccntr_op_start(env);
1788 env->cp15.c15_ccnt = value;
1789 pmccntr_op_finish(env);
1790 }
1791
1792 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1793 uint64_t value)
1794 {
1795 uint64_t cur_val = pmccntr_read(env, NULL);
1796
1797 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1798 }
1799
1800 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1801 uint64_t value)
1802 {
1803 pmccntr_op_start(env);
1804 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1805 pmccntr_op_finish(env);
1806 }
1807
1808 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1809 uint64_t value)
1810 {
1811 pmccntr_op_start(env);
1812 /* M is not accessible from AArch32 */
1813 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1814 (value & PMCCFILTR);
1815 pmccntr_op_finish(env);
1816 }
1817
1818 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1819 {
1820 /* M is not visible in AArch32 */
1821 return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1822 }
1823
1824 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1825 uint64_t value)
1826 {
1827 value &= pmu_counter_mask(env);
1828 env->cp15.c9_pmcnten |= value;
1829 }
1830
1831 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1832 uint64_t value)
1833 {
1834 value &= pmu_counter_mask(env);
1835 env->cp15.c9_pmcnten &= ~value;
1836 }
1837
1838 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1839 uint64_t value)
1840 {
1841 value &= pmu_counter_mask(env);
1842 env->cp15.c9_pmovsr &= ~value;
1843 pmu_update_irq(env);
1844 }
1845
1846 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1847 uint64_t value)
1848 {
1849 value &= pmu_counter_mask(env);
1850 env->cp15.c9_pmovsr |= value;
1851 pmu_update_irq(env);
1852 }
1853
1854 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1855 uint64_t value, const uint8_t counter)
1856 {
1857 if (counter == 31) {
1858 pmccfiltr_write(env, ri, value);
1859 } else if (counter < pmu_num_counters(env)) {
1860 pmevcntr_op_start(env, counter);
1861
1862 /*
1863 * If this counter's event type is changing, store the current
1864 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1865 * pmevcntr_op_finish has the correct baseline when it converts back to
1866 * a delta.
1867 */
1868 uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1869 PMXEVTYPER_EVTCOUNT;
1870 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1871 if (old_event != new_event) {
1872 uint64_t count = 0;
1873 if (event_supported(new_event)) {
1874 uint16_t event_idx = supported_event_map[new_event];
1875 count = pm_events[event_idx].get_count(env);
1876 }
1877 env->cp15.c14_pmevcntr_delta[counter] = count;
1878 }
1879
1880 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1881 pmevcntr_op_finish(env, counter);
1882 }
1883 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1884 * PMSELR value is equal to or greater than the number of implemented
1885 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1886 */
1887 }
1888
1889 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1890 const uint8_t counter)
1891 {
1892 if (counter == 31) {
1893 return env->cp15.pmccfiltr_el0;
1894 } else if (counter < pmu_num_counters(env)) {
1895 return env->cp15.c14_pmevtyper[counter];
1896 } else {
1897 /*
1898 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1899 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1900 */
1901 return 0;
1902 }
1903 }
1904
1905 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1906 uint64_t value)
1907 {
1908 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1909 pmevtyper_write(env, ri, value, counter);
1910 }
1911
1912 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1913 uint64_t value)
1914 {
1915 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1916 env->cp15.c14_pmevtyper[counter] = value;
1917
1918 /*
1919 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1920 * pmu_op_finish calls when loading saved state for a migration. Because
1921 * we're potentially updating the type of event here, the value written to
1922 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1923 * different counter type. Therefore, we need to set this value to the
1924 * current count for the counter type we're writing so that pmu_op_finish
1925 * has the correct count for its calculation.
1926 */
1927 uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1928 if (event_supported(event)) {
1929 uint16_t event_idx = supported_event_map[event];
1930 env->cp15.c14_pmevcntr_delta[counter] =
1931 pm_events[event_idx].get_count(env);
1932 }
1933 }
1934
1935 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1936 {
1937 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1938 return pmevtyper_read(env, ri, counter);
1939 }
1940
1941 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1942 uint64_t value)
1943 {
1944 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1945 }
1946
1947 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1948 {
1949 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1950 }
1951
1952 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1953 uint64_t value, uint8_t counter)
1954 {
1955 if (counter < pmu_num_counters(env)) {
1956 pmevcntr_op_start(env, counter);
1957 env->cp15.c14_pmevcntr[counter] = value;
1958 pmevcntr_op_finish(env, counter);
1959 }
1960 /*
1961 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1962 * are CONSTRAINED UNPREDICTABLE.
1963 */
1964 }
1965
1966 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1967 uint8_t counter)
1968 {
1969 if (counter < pmu_num_counters(env)) {
1970 uint64_t ret;
1971 pmevcntr_op_start(env, counter);
1972 ret = env->cp15.c14_pmevcntr[counter];
1973 pmevcntr_op_finish(env, counter);
1974 return ret;
1975 } else {
1976 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1977 * are CONSTRAINED UNPREDICTABLE. */
1978 return 0;
1979 }
1980 }
1981
1982 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1983 uint64_t value)
1984 {
1985 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1986 pmevcntr_write(env, ri, value, counter);
1987 }
1988
1989 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1990 {
1991 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1992 return pmevcntr_read(env, ri, counter);
1993 }
1994
1995 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1996 uint64_t value)
1997 {
1998 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1999 assert(counter < pmu_num_counters(env));
2000 env->cp15.c14_pmevcntr[counter] = value;
2001 pmevcntr_write(env, ri, value, counter);
2002 }
2003
2004 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
2005 {
2006 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
2007 assert(counter < pmu_num_counters(env));
2008 return env->cp15.c14_pmevcntr[counter];
2009 }
2010
2011 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2012 uint64_t value)
2013 {
2014 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
2015 }
2016
2017 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2018 {
2019 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
2020 }
2021
2022 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2023 uint64_t value)
2024 {
2025 if (arm_feature(env, ARM_FEATURE_V8)) {
2026 env->cp15.c9_pmuserenr = value & 0xf;
2027 } else {
2028 env->cp15.c9_pmuserenr = value & 1;
2029 }
2030 }
2031
2032 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
2033 uint64_t value)
2034 {
2035 /* We have no event counters so only the C bit can be changed */
2036 value &= pmu_counter_mask(env);
2037 env->cp15.c9_pminten |= value;
2038 pmu_update_irq(env);
2039 }
2040
2041 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2042 uint64_t value)
2043 {
2044 value &= pmu_counter_mask(env);
2045 env->cp15.c9_pminten &= ~value;
2046 pmu_update_irq(env);
2047 }
2048
2049 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2050 uint64_t value)
2051 {
2052 /* Note that even though the AArch64 view of this register has bits
2053 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
2054 * architectural requirements for bits which are RES0 only in some
2055 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
2056 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
2057 */
2058 raw_write(env, ri, value & ~0x1FULL);
2059 }
2060
2061 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2062 {
2063 /* Begin with base v8.0 state. */
2064 uint32_t valid_mask = 0x3fff;
2065 ARMCPU *cpu = env_archcpu(env);
2066
2067 if (arm_el_is_aa64(env, 3)) {
2068 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
2069 valid_mask &= ~SCR_NET;
2070 } else {
2071 valid_mask &= ~(SCR_RW | SCR_ST);
2072 }
2073
2074 if (!arm_feature(env, ARM_FEATURE_EL2)) {
2075 valid_mask &= ~SCR_HCE;
2076
2077 /* On ARMv7, SMD (or SCD as it is called in v7) is only
2078 * supported if EL2 exists. The bit is UNK/SBZP when
2079 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
2080 * when EL2 is unavailable.
2081 * On ARMv8, this bit is always available.
2082 */
2083 if (arm_feature(env, ARM_FEATURE_V7) &&
2084 !arm_feature(env, ARM_FEATURE_V8)) {
2085 valid_mask &= ~SCR_SMD;
2086 }
2087 }
2088 if (cpu_isar_feature(aa64_lor, cpu)) {
2089 valid_mask |= SCR_TLOR;
2090 }
2091 if (cpu_isar_feature(aa64_pauth, cpu)) {
2092 valid_mask |= SCR_API | SCR_APK;
2093 }
2094
2095 /* Clear all-context RES0 bits. */
2096 value &= valid_mask;
2097 raw_write(env, ri, value);
2098 }
2099
2100 static CPAccessResult access_aa64_tid2(CPUARMState *env,
2101 const ARMCPRegInfo *ri,
2102 bool isread)
2103 {
2104 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
2105 return CP_ACCESS_TRAP_EL2;
2106 }
2107
2108 return CP_ACCESS_OK;
2109 }
2110
2111 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2112 {
2113 ARMCPU *cpu = env_archcpu(env);
2114
2115 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
2116 * bank
2117 */
2118 uint32_t index = A32_BANKED_REG_GET(env, csselr,
2119 ri->secure & ARM_CP_SECSTATE_S);
2120
2121 return cpu->ccsidr[index];
2122 }
2123
2124 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2125 uint64_t value)
2126 {
2127 raw_write(env, ri, value & 0xf);
2128 }
2129
2130 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2131 {
2132 CPUState *cs = env_cpu(env);
2133 uint64_t hcr_el2 = arm_hcr_el2_eff(env);
2134 uint64_t ret = 0;
2135 bool allow_virt = (arm_current_el(env) == 1 &&
2136 (!arm_is_secure_below_el3(env) ||
2137 (env->cp15.scr_el3 & SCR_EEL2)));
2138
2139 if (allow_virt && (hcr_el2 & HCR_IMO)) {
2140 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
2141 ret |= CPSR_I;
2142 }
2143 } else {
2144 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
2145 ret |= CPSR_I;
2146 }
2147 }
2148
2149 if (allow_virt && (hcr_el2 & HCR_FMO)) {
2150 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
2151 ret |= CPSR_F;
2152 }
2153 } else {
2154 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
2155 ret |= CPSR_F;
2156 }
2157 }
2158
2159 /* External aborts are not possible in QEMU so A bit is always clear */
2160 return ret;
2161 }
2162
2163 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2164 bool isread)
2165 {
2166 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
2167 return CP_ACCESS_TRAP_EL2;
2168 }
2169
2170 return CP_ACCESS_OK;
2171 }
2172
2173 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2174 bool isread)
2175 {
2176 if (arm_feature(env, ARM_FEATURE_V8)) {
2177 return access_aa64_tid1(env, ri, isread);
2178 }
2179
2180 return CP_ACCESS_OK;
2181 }
2182
2183 static const ARMCPRegInfo v7_cp_reginfo[] = {
2184 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2185 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
2186 .access = PL1_W, .type = ARM_CP_NOP },
2187 /* Performance monitors are implementation defined in v7,
2188 * but with an ARM recommended set of registers, which we
2189 * follow.
2190 *
2191 * Performance registers fall into three categories:
2192 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2193 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2194 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2195 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2196 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2197 */
2198 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
2199 .access = PL0_RW, .type = ARM_CP_ALIAS,
2200 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2201 .writefn = pmcntenset_write,
2202 .accessfn = pmreg_access,
2203 .raw_writefn = raw_write },
2204 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
2205 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
2206 .access = PL0_RW, .accessfn = pmreg_access,
2207 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
2208 .writefn = pmcntenset_write, .raw_writefn = raw_write },
2209 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
2210 .access = PL0_RW,
2211 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2212 .accessfn = pmreg_access,
2213 .writefn = pmcntenclr_write,
2214 .type = ARM_CP_ALIAS },
2215 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
2216 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
2217 .access = PL0_RW, .accessfn = pmreg_access,
2218 .type = ARM_CP_ALIAS,
2219 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
2220 .writefn = pmcntenclr_write },
2221 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
2222 .access = PL0_RW, .type = ARM_CP_IO,
2223 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2224 .accessfn = pmreg_access,
2225 .writefn = pmovsr_write,
2226 .raw_writefn = raw_write },
2227 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2228 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2229 .access = PL0_RW, .accessfn = pmreg_access,
2230 .type = ARM_CP_ALIAS | ARM_CP_IO,
2231 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2232 .writefn = pmovsr_write,
2233 .raw_writefn = raw_write },
2234 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2235 .access = PL0_W, .accessfn = pmreg_access_swinc,
2236 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2237 .writefn = pmswinc_write },
2238 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2239 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2240 .access = PL0_W, .accessfn = pmreg_access_swinc,
2241 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2242 .writefn = pmswinc_write },
2243 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2244 .access = PL0_RW, .type = ARM_CP_ALIAS,
2245 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2246 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2247 .raw_writefn = raw_write},
2248 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2249 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2250 .access = PL0_RW, .accessfn = pmreg_access_selr,
2251 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2252 .writefn = pmselr_write, .raw_writefn = raw_write, },
2253 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2254 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2255 .readfn = pmccntr_read, .writefn = pmccntr_write32,
2256 .accessfn = pmreg_access_ccntr },
2257 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2258 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2259 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2260 .type = ARM_CP_IO,
2261 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2262 .readfn = pmccntr_read, .writefn = pmccntr_write,
2263 .raw_readfn = raw_read, .raw_writefn = raw_write, },
2264 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2265 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2266 .access = PL0_RW, .accessfn = pmreg_access,
2267 .type = ARM_CP_ALIAS | ARM_CP_IO,
2268 .resetvalue = 0, },
2269 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2270 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2271 .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2272 .access = PL0_RW, .accessfn = pmreg_access,
2273 .type = ARM_CP_IO,
2274 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2275 .resetvalue = 0, },
2276 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2277 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2278 .accessfn = pmreg_access,
2279 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2280 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2281 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2282 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2283 .accessfn = pmreg_access,
2284 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2285 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2286 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2287 .accessfn = pmreg_access_xevcntr,
2288 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2289 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2290 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2291 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2292 .accessfn = pmreg_access_xevcntr,
2293 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2294 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2295 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2296 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2297 .resetvalue = 0,
2298 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2299 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2300 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2301 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2302 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2303 .resetvalue = 0,
2304 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2305 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2306 .access = PL1_RW, .accessfn = access_tpm,
2307 .type = ARM_CP_ALIAS | ARM_CP_IO,
2308 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2309 .resetvalue = 0,
2310 .writefn = pmintenset_write, .raw_writefn = raw_write },
2311 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2312 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2313 .access = PL1_RW, .accessfn = access_tpm,
2314 .type = ARM_CP_IO,
2315 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2316 .writefn = pmintenset_write, .raw_writefn = raw_write,
2317 .resetvalue = 0x0 },
2318 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2319 .access = PL1_RW, .accessfn = access_tpm,
2320 .type = ARM_CP_ALIAS | ARM_CP_IO,
2321 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2322 .writefn = pmintenclr_write, },
2323 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2324 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2325 .access = PL1_RW, .accessfn = access_tpm,
2326 .type = ARM_CP_ALIAS | ARM_CP_IO,
2327 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2328 .writefn = pmintenclr_write },
2329 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2330 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2331 .access = PL1_R,
2332 .accessfn = access_aa64_tid2,
2333 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2334 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2335 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2336 .access = PL1_RW,
2337 .accessfn = access_aa64_tid2,
2338 .writefn = csselr_write, .resetvalue = 0,
2339 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2340 offsetof(CPUARMState, cp15.csselr_ns) } },
2341 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2342 * just RAZ for all cores:
2343 */
2344 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2345 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2346 .access = PL1_R, .type = ARM_CP_CONST,
2347 .accessfn = access_aa64_tid1,
2348 .resetvalue = 0 },
2349 /* Auxiliary fault status registers: these also are IMPDEF, and we
2350 * choose to RAZ/WI for all cores.
2351 */
2352 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2353 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2354 .access = PL1_RW, .accessfn = access_tvm_trvm,
2355 .type = ARM_CP_CONST, .resetvalue = 0 },
2356 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2357 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2358 .access = PL1_RW, .accessfn = access_tvm_trvm,
2359 .type = ARM_CP_CONST, .resetvalue = 0 },
2360 /* MAIR can just read-as-written because we don't implement caches
2361 * and so don't need to care about memory attributes.
2362 */
2363 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2364 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2365 .access = PL1_RW, .accessfn = access_tvm_trvm,
2366 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2367 .resetvalue = 0 },
2368 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2369 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2370 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2371 .resetvalue = 0 },
2372 /* For non-long-descriptor page tables these are PRRR and NMRR;
2373 * regardless they still act as reads-as-written for QEMU.
2374 */
2375 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2376 * allows them to assign the correct fieldoffset based on the endianness
2377 * handled in the field definitions.
2378 */
2379 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2380 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2381 .access = PL1_RW, .accessfn = access_tvm_trvm,
2382 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2383 offsetof(CPUARMState, cp15.mair0_ns) },
2384 .resetfn = arm_cp_reset_ignore },
2385 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2386 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2387 .access = PL1_RW, .accessfn = access_tvm_trvm,
2388 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2389 offsetof(CPUARMState, cp15.mair1_ns) },
2390 .resetfn = arm_cp_reset_ignore },
2391 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2392 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2393 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2394 /* 32 bit ITLB invalidates */
2395 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2396 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2397 .writefn = tlbiall_write },
2398 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2399 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2400 .writefn = tlbimva_write },
2401 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2402 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2403 .writefn = tlbiasid_write },
2404 /* 32 bit DTLB invalidates */
2405 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2406 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2407 .writefn = tlbiall_write },
2408 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2409 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2410 .writefn = tlbimva_write },
2411 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2412 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2413 .writefn = tlbiasid_write },
2414 /* 32 bit TLB invalidates */
2415 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2416 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2417 .writefn = tlbiall_write },
2418 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2419 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2420 .writefn = tlbimva_write },
2421 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2422 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2423 .writefn = tlbiasid_write },
2424 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2425 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2426 .writefn = tlbimvaa_write },
2427 REGINFO_SENTINEL
2428 };
2429
2430 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2431 /* 32 bit TLB invalidates, Inner Shareable */
2432 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2433 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2434 .writefn = tlbiall_is_write },
2435 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2436 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2437 .writefn = tlbimva_is_write },
2438 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2439 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2440 .writefn = tlbiasid_is_write },
2441 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2442 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2443 .writefn = tlbimvaa_is_write },
2444 REGINFO_SENTINEL
2445 };
2446
2447 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2448 /* PMOVSSET is not implemented in v7 before v7ve */
2449 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2450 .access = PL0_RW, .accessfn = pmreg_access,
2451 .type = ARM_CP_ALIAS | ARM_CP_IO,
2452 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2453 .writefn = pmovsset_write,
2454 .raw_writefn = raw_write },
2455 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2456 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2457 .access = PL0_RW, .accessfn = pmreg_access,
2458 .type = ARM_CP_ALIAS | ARM_CP_IO,
2459 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2460 .writefn = pmovsset_write,
2461 .raw_writefn = raw_write },
2462 REGINFO_SENTINEL
2463 };
2464
2465 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2466 uint64_t value)
2467 {
2468 value &= 1;
2469 env->teecr = value;
2470 }
2471
2472 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2473 bool isread)
2474 {
2475 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2476 return CP_ACCESS_TRAP;
2477 }
2478 return CP_ACCESS_OK;
2479 }
2480
2481 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2482 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2483 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2484 .resetvalue = 0,
2485 .writefn = teecr_write },
2486 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2487 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2488 .accessfn = teehbr_access, .resetvalue = 0 },
2489 REGINFO_SENTINEL
2490 };
2491
2492 static const ARMCPRegInfo v6k_cp_reginfo[] = {
2493 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2494 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2495 .access = PL0_RW,
2496 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2497 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2498 .access = PL0_RW,
2499 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2500 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2501 .resetfn = arm_cp_reset_ignore },
2502 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2503 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2504 .access = PL0_R|PL1_W,
2505 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2506 .resetvalue = 0},
2507 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2508 .access = PL0_R|PL1_W,
2509 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2510 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2511 .resetfn = arm_cp_reset_ignore },
2512 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2513 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2514 .access = PL1_RW,
2515 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2516 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2517 .access = PL1_RW,
2518 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2519 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2520 .resetvalue = 0 },
2521 REGINFO_SENTINEL
2522 };
2523
2524 #ifndef CONFIG_USER_ONLY
2525
2526 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2527 bool isread)
2528 {
2529 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2530 * Writable only at the highest implemented exception level.
2531 */
2532 int el = arm_current_el(env);
2533 uint64_t hcr;
2534 uint32_t cntkctl;
2535
2536 switch (el) {
2537 case 0:
2538 hcr = arm_hcr_el2_eff(env);
2539 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2540 cntkctl = env->cp15.cnthctl_el2;
2541 } else {
2542 cntkctl = env->cp15.c14_cntkctl;
2543 }
2544 if (!extract32(cntkctl, 0, 2)) {
2545 return CP_ACCESS_TRAP;
2546 }
2547 break;
2548 case 1:
2549 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2550 arm_is_secure_below_el3(env)) {
2551 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2552 return CP_ACCESS_TRAP_UNCATEGORIZED;
2553 }
2554 break;
2555 case 2:
2556 case 3:
2557 break;
2558 }
2559
2560 if (!isread && el < arm_highest_el(env)) {
2561 return CP_ACCESS_TRAP_UNCATEGORIZED;
2562 }
2563
2564 return CP_ACCESS_OK;
2565 }
2566
2567 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2568 bool isread)
2569 {
2570 unsigned int cur_el = arm_current_el(env);
2571 bool secure = arm_is_secure(env);
2572 uint64_t hcr = arm_hcr_el2_eff(env);
2573
2574 switch (cur_el) {
2575 case 0:
2576 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2577 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2578 return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2579 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2580 }
2581
2582 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2583 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2584 return CP_ACCESS_TRAP;
2585 }
2586
2587 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2588 if (hcr & HCR_E2H) {
2589 if (timeridx == GTIMER_PHYS &&
2590 !extract32(env->cp15.cnthctl_el2, 10, 1)) {
2591 return CP_ACCESS_TRAP_EL2;
2592 }
2593 } else {
2594 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2595 if (arm_feature(env, ARM_FEATURE_EL2) &&
2596 timeridx == GTIMER_PHYS && !secure &&
2597 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2598 return CP_ACCESS_TRAP_EL2;
2599 }
2600 }
2601 break;
2602
2603 case 1:
2604 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2605 if (arm_feature(env, ARM_FEATURE_EL2) &&
2606 timeridx == GTIMER_PHYS && !secure &&
2607 (hcr & HCR_E2H
2608 ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2609 : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2610 return CP_ACCESS_TRAP_EL2;
2611 }
2612 break;
2613 }
2614 return CP_ACCESS_OK;
2615 }
2616
2617 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2618 bool isread)
2619 {
2620 unsigned int cur_el = arm_current_el(env);
2621 bool secure = arm_is_secure(env);
2622 uint64_t hcr = arm_hcr_el2_eff(env);
2623
2624 switch (cur_el) {
2625 case 0:
2626 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2627 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2628 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2629 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2630 }
2631
2632 /*
2633 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2634 * EL0 if EL0[PV]TEN is zero.
2635 */
2636 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2637 return CP_ACCESS_TRAP;
2638 }
2639 /* fall through */
2640
2641 case 1:
2642 if (arm_feature(env, ARM_FEATURE_EL2) &&
2643 timeridx == GTIMER_PHYS && !secure) {
2644 if (hcr & HCR_E2H) {
2645 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2646 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2647 return CP_ACCESS_TRAP_EL2;
2648 }
2649 } else {
2650 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2651 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2652 return CP_ACCESS_TRAP_EL2;
2653 }
2654 }
2655 }
2656 break;
2657 }
2658 return CP_ACCESS_OK;
2659 }
2660
2661 static CPAccessResult gt_pct_access(CPUARMState *env,
2662 const ARMCPRegInfo *ri,
2663 bool isread)
2664 {
2665 return gt_counter_access(env, GTIMER_PHYS, isread);
2666 }
2667
2668 static CPAccessResult gt_vct_access(CPUARMState *env,
2669 const ARMCPRegInfo *ri,
2670 bool isread)
2671 {
2672 return gt_counter_access(env, GTIMER_VIRT, isread);
2673 }
2674
2675 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2676 bool isread)
2677 {
2678 return gt_timer_access(env, GTIMER_PHYS, isread);
2679 }
2680
2681 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2682 bool isread)
2683 {
2684 return gt_timer_access(env, GTIMER_VIRT, isread);
2685 }
2686
2687 static CPAccessResult gt_stimer_access(CPUARMState *env,
2688 const ARMCPRegInfo *ri,
2689 bool isread)
2690 {
2691 /* The AArch64 register view of the secure physical timer is
2692 * always accessible from EL3, and configurably accessible from
2693 * Secure EL1.
2694 */
2695 switch (arm_current_el(env)) {
2696 case 1:
2697 if (!arm_is_secure(env)) {
2698 return CP_ACCESS_TRAP;
2699 }
2700 if (!(env->cp15.scr_el3 & SCR_ST)) {
2701 return CP_ACCESS_TRAP_EL3;
2702 }
2703 return CP_ACCESS_OK;
2704 case 0:
2705 case 2:
2706 return CP_ACCESS_TRAP;
2707 case 3:
2708 return CP_ACCESS_OK;
2709 default:
2710 g_assert_not_reached();
2711 }
2712 }
2713
2714 static uint64_t gt_get_countervalue(CPUARMState *env)
2715 {
2716 ARMCPU *cpu = env_archcpu(env);
2717
2718 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
2719 }
2720
2721 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2722 {
2723 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2724
2725 if (gt->ctl & 1) {
2726 /* Timer enabled: calculate and set current ISTATUS, irq, and
2727 * reset timer to when ISTATUS next has to change
2728 */
2729 uint64_t offset = timeridx == GTIMER_VIRT ?
2730 cpu->env.cp15.cntvoff_el2 : 0;
2731 uint64_t count = gt_get_countervalue(&cpu->env);
2732 /* Note that this must be unsigned 64 bit arithmetic: */
2733 int istatus = count - offset >= gt->cval;
2734 uint64_t nexttick;
2735 int irqstate;
2736
2737 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2738
2739 irqstate = (istatus && !(gt->ctl & 2));
2740 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2741
2742 if (istatus) {
2743 /* Next transition is when count rolls back over to zero */
2744 nexttick = UINT64_MAX;
2745 } else {
2746 /* Next transition is when we hit cval */
2747 nexttick = gt->cval + offset;
2748 }
2749 /* Note that the desired next expiry time might be beyond the
2750 * signed-64-bit range of a QEMUTimer -- in this case we just
2751 * set the timer for as far in the future as possible. When the
2752 * timer expires we will reset the timer for any remaining period.
2753 */
2754 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
2755 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2756 } else {
2757 timer_mod(cpu->gt_timer[timeridx], nexttick);
2758 }
2759 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2760 } else {
2761 /* Timer disabled: ISTATUS and timer output always clear */
2762 gt->ctl &= ~4;
2763 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2764 timer_del(cpu->gt_timer[timeridx]);
2765 trace_arm_gt_recalc_disabled(timeridx);
2766 }
2767 }
2768
2769 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2770 int timeridx)
2771 {
2772 ARMCPU *cpu = env_archcpu(env);
2773
2774 timer_del(cpu->gt_timer[timeridx]);
2775 }
2776
2777 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2778 {
2779 return gt_get_countervalue(env);
2780 }
2781
2782 static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2783 {
2784 uint64_t hcr;
2785
2786 switch (arm_current_el(env)) {
2787 case 2:
2788 hcr = arm_hcr_el2_eff(env);
2789 if (hcr & HCR_E2H) {
2790 return 0;
2791 }
2792 break;
2793 case 0:
2794 hcr = arm_hcr_el2_eff(env);
2795 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2796 return 0;
2797 }
2798 break;
2799 }
2800
2801 return env->cp15.cntvoff_el2;
2802 }
2803
2804 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2805 {
2806 return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
2807 }
2808
2809 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2810 int timeridx,
2811 uint64_t value)
2812 {
2813 trace_arm_gt_cval_write(timeridx, value);
2814 env->cp15.c14_timer[timeridx].cval = value;
2815 gt_recalc_timer(env_archcpu(env), timeridx);
2816 }
2817
2818 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2819 int timeridx)
2820 {
2821 uint64_t offset = 0;
2822
2823 switch (timeridx) {
2824 case GTIMER_VIRT:
2825 case GTIMER_HYPVIRT:
2826 offset = gt_virt_cnt_offset(env);
2827 break;
2828 }
2829
2830 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2831 (gt_get_countervalue(env) - offset));
2832 }
2833
2834 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2835 int timeridx,
2836 uint64_t value)
2837 {
2838 uint64_t offset = 0;
2839
2840 switch (timeridx) {
2841 case GTIMER_VIRT:
2842 case GTIMER_HYPVIRT:
2843 offset = gt_virt_cnt_offset(env);
2844 break;
2845 }
2846
2847 trace_arm_gt_tval_write(timeridx, value);
2848 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2849 sextract64(value, 0, 32);
2850 gt_recalc_timer(env_archcpu(env), timeridx);
2851 }
2852
2853 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2854 int timeridx,
2855 uint64_t value)
2856 {
2857 ARMCPU *cpu = env_archcpu(env);
2858 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2859
2860 trace_arm_gt_ctl_write(timeridx, value);
2861 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2862 if ((oldval ^ value) & 1) {
2863 /* Enable toggled */
2864 gt_recalc_timer(cpu, timeridx);
2865 } else if ((oldval ^ value) & 2) {
2866 /* IMASK toggled: don't need to recalculate,
2867 * just set the interrupt line based on ISTATUS
2868 */
2869 int irqstate = (oldval & 4) && !(value & 2);
2870
2871 trace_arm_gt_imask_toggle(timeridx, irqstate);
2872 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2873 }
2874 }
2875
2876 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2877 {
2878 gt_timer_reset(env, ri, GTIMER_PHYS);
2879 }
2880
2881 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2882 uint64_t value)
2883 {
2884 gt_cval_write(env, ri, GTIMER_PHYS, value);
2885 }
2886
2887 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2888 {
2889 return gt_tval_read(env, ri, GTIMER_PHYS);
2890 }
2891
2892 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2893 uint64_t value)
2894 {
2895 gt_tval_write(env, ri, GTIMER_PHYS, value);
2896 }
2897
2898 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2899 uint64_t value)
2900 {
2901 gt_ctl_write(env, ri, GTIMER_PHYS, value);
2902 }
2903
2904 static int gt_phys_redir_timeridx(CPUARMState *env)
2905 {
2906 switch (arm_mmu_idx(env)) {
2907 case ARMMMUIdx_E20_0:
2908 case ARMMMUIdx_E20_2:
2909 case ARMMMUIdx_E20_2_PAN:
2910 return GTIMER_HYP;
2911 default:
2912 return GTIMER_PHYS;
2913 }
2914 }
2915
2916 static int gt_virt_redir_timeridx(CPUARMState *env)
2917 {
2918 switch (arm_mmu_idx(env)) {
2919 case ARMMMUIdx_E20_0:
2920 case ARMMMUIdx_E20_2:
2921 case ARMMMUIdx_E20_2_PAN:
2922 return GTIMER_HYPVIRT;
2923 default:
2924 return GTIMER_VIRT;
2925 }
2926 }
2927
2928 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2929 const ARMCPRegInfo *ri)
2930 {
2931 int timeridx = gt_phys_redir_timeridx(env);
2932 return env->cp15.c14_timer[timeridx].cval;
2933 }
2934
2935 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2936 uint64_t value)
2937 {
2938 int timeridx = gt_phys_redir_timeridx(env);
2939 gt_cval_write(env, ri, timeridx, value);
2940 }
2941
2942 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2943 const ARMCPRegInfo *ri)
2944 {
2945 int timeridx = gt_phys_redir_timeridx(env);
2946 return gt_tval_read(env, ri, timeridx);
2947 }
2948
2949 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2950 uint64_t value)
2951 {
2952 int timeridx = gt_phys_redir_timeridx(env);
2953 gt_tval_write(env, ri, timeridx, value);
2954 }
2955
2956 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2957 const ARMCPRegInfo *ri)
2958 {
2959 int timeridx = gt_phys_redir_timeridx(env);
2960 return env->cp15.c14_timer[timeridx].ctl;
2961 }
2962
2963 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2964 uint64_t value)
2965 {
2966 int timeridx = gt_phys_redir_timeridx(env);
2967 gt_ctl_write(env, ri, timeridx, value);
2968 }
2969
2970 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2971 {
2972 gt_timer_reset(env, ri, GTIMER_VIRT);
2973 }
2974
2975 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2976 uint64_t value)
2977 {
2978 gt_cval_write(env, ri, GTIMER_VIRT, value);
2979 }
2980
2981 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2982 {
2983 return gt_tval_read(env, ri, GTIMER_VIRT);
2984 }
2985
2986 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2987 uint64_t value)
2988 {
2989 gt_tval_write(env, ri, GTIMER_VIRT, value);
2990 }
2991
2992 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2993 uint64_t value)
2994 {
2995 gt_ctl_write(env, ri, GTIMER_VIRT, value);
2996 }
2997
2998 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2999 uint64_t value)
3000 {
3001 ARMCPU *cpu = env_archcpu(env);
3002
3003 trace_arm_gt_cntvoff_write(value);
3004 raw_write(env, ri, value);
3005 gt_recalc_timer(cpu, GTIMER_VIRT);
3006 }
3007
3008 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
3009 const ARMCPRegInfo *ri)
3010 {
3011 int timeridx = gt_virt_redir_timeridx(env);
3012 return env->cp15.c14_timer[timeridx].cval;
3013 }
3014
3015 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3016 uint64_t value)
3017 {
3018 int timeridx = gt_virt_redir_timeridx(env);
3019 gt_cval_write(env, ri, timeridx, value);
3020 }
3021
3022 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
3023 const ARMCPRegInfo *ri)
3024 {
3025 int timeridx = gt_virt_redir_timeridx(env);
3026 return gt_tval_read(env, ri, timeridx);
3027 }
3028
3029 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3030 uint64_t value)
3031 {
3032 int timeridx = gt_virt_redir_timeridx(env);
3033 gt_tval_write(env, ri, timeridx, value);
3034 }
3035
3036 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
3037 const ARMCPRegInfo *ri)
3038 {
3039 int timeridx = gt_virt_redir_timeridx(env);
3040 return env->cp15.c14_timer[timeridx].ctl;
3041 }
3042
3043 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3044 uint64_t value)
3045 {
3046 int timeridx = gt_virt_redir_timeridx(env);
3047 gt_ctl_write(env, ri, timeridx, value);
3048 }
3049
3050 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3051 {
3052 gt_timer_reset(env, ri, GTIMER_HYP);
3053 }
3054
3055 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3056 uint64_t value)
3057 {
3058 gt_cval_write(env, ri, GTIMER_HYP, value);
3059 }
3060
3061 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3062 {
3063 return gt_tval_read(env, ri, GTIMER_HYP);
3064 }
3065
3066 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3067 uint64_t value)
3068 {
3069 gt_tval_write(env, ri, GTIMER_HYP, value);
3070 }
3071
3072 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3073 uint64_t value)
3074 {
3075 gt_ctl_write(env, ri, GTIMER_HYP, value);
3076 }
3077
3078 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3079 {
3080 gt_timer_reset(env, ri, GTIMER_SEC);
3081 }
3082
3083 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3084 uint64_t value)
3085 {
3086 gt_cval_write(env, ri, GTIMER_SEC, value);
3087 }
3088
3089 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3090 {
3091 return gt_tval_read(env, ri, GTIMER_SEC);
3092 }
3093
3094 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3095 uint64_t value)
3096 {
3097 gt_tval_write(env, ri, GTIMER_SEC, value);
3098 }
3099
3100 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3101 uint64_t value)
3102 {
3103 gt_ctl_write(env, ri, GTIMER_SEC, value);
3104 }
3105
3106 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3107 {
3108 gt_timer_reset(env, ri, GTIMER_HYPVIRT);
3109 }
3110
3111 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3112 uint64_t value)
3113 {
3114 gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
3115 }
3116
3117 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3118 {
3119 return gt_tval_read(env, ri, GTIMER_HYPVIRT);
3120 }
3121
3122 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3123 uint64_t value)
3124 {
3125 gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
3126 }
3127
3128 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3129 uint64_t value)
3130 {
3131 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
3132 }
3133
3134 void arm_gt_ptimer_cb(void *opaque)
3135 {
3136 ARMCPU *cpu = opaque;
3137
3138 gt_recalc_timer(cpu, GTIMER_PHYS);