target/arm: Ignore HCR_EL2.ATA when {E2H,TGE} != 11
[qemu.git] / target / arm / helper.c
1 /*
2 * ARM generic helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
12 #include "trace.h"
13 #include "cpu.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
24 #include "hw/irq.h"
25 #include "hw/semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/cpu-timers.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/tcg.h"
30 #include "qemu/range.h"
31 #include "qapi/qapi-commands-machine-target.h"
32 #include "qapi/error.h"
33 #include "qemu/guest-random.h"
34 #ifdef CONFIG_TCG
35 #include "arm_ldst.h"
36 #include "exec/cpu_ldst.h"
37 #endif
38
39 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
40
41 #ifndef CONFIG_USER_ONLY
42
43 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
44 MMUAccessType access_type, ARMMMUIdx mmu_idx,
45 bool s1_is_el0,
46 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
47 target_ulong *page_size_ptr,
48 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
49 __attribute__((nonnull));
50 #endif
51
52 static void switch_mode(CPUARMState *env, int mode);
53 static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
54
55 static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
56 {
57 ARMCPU *cpu = env_archcpu(env);
58 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
59
60 /* VFP data registers are always little-endian. */
61 if (reg < nregs) {
62 return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg));
63 }
64 if (arm_feature(env, ARM_FEATURE_NEON)) {
65 /* Aliases for Q regs. */
66 nregs += 16;
67 if (reg < nregs) {
68 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
69 return gdb_get_reg128(buf, q[0], q[1]);
70 }
71 }
72 switch (reg - nregs) {
73 case 0: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); break;
74 case 1: return gdb_get_reg32(buf, vfp_get_fpscr(env)); break;
75 case 2: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]); break;
76 }
77 return 0;
78 }
79
80 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
81 {
82 ARMCPU *cpu = env_archcpu(env);
83 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
84
85 if (reg < nregs) {
86 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
87 return 8;
88 }
89 if (arm_feature(env, ARM_FEATURE_NEON)) {
90 nregs += 16;
91 if (reg < nregs) {
92 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
93 q[0] = ldq_le_p(buf);
94 q[1] = ldq_le_p(buf + 8);
95 return 16;
96 }
97 }
98 switch (reg - nregs) {
99 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
100 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
101 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
102 }
103 return 0;
104 }
105
106 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
107 {
108 switch (reg) {
109 case 0 ... 31:
110 {
111 /* 128 bit FP register - quads are in LE order */
112 uint64_t *q = aa64_vfp_qreg(env, reg);
113 return gdb_get_reg128(buf, q[1], q[0]);
114 }
115 case 32:
116 /* FPSR */
117 return gdb_get_reg32(buf, vfp_get_fpsr(env));
118 case 33:
119 /* FPCR */
120 return gdb_get_reg32(buf,vfp_get_fpcr(env));
121 default:
122 return 0;
123 }
124 }
125
126 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
127 {
128 switch (reg) {
129 case 0 ... 31:
130 /* 128 bit FP register */
131 {
132 uint64_t *q = aa64_vfp_qreg(env, reg);
133 q[0] = ldq_le_p(buf);
134 q[1] = ldq_le_p(buf + 8);
135 return 16;
136 }
137 case 32:
138 /* FPSR */
139 vfp_set_fpsr(env, ldl_p(buf));
140 return 4;
141 case 33:
142 /* FPCR */
143 vfp_set_fpcr(env, ldl_p(buf));
144 return 4;
145 default:
146 return 0;
147 }
148 }
149
150 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
151 {
152 assert(ri->fieldoffset);
153 if (cpreg_field_is_64bit(ri)) {
154 return CPREG_FIELD64(env, ri);
155 } else {
156 return CPREG_FIELD32(env, ri);
157 }
158 }
159
160 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
161 uint64_t value)
162 {
163 assert(ri->fieldoffset);
164 if (cpreg_field_is_64bit(ri)) {
165 CPREG_FIELD64(env, ri) = value;
166 } else {
167 CPREG_FIELD32(env, ri) = value;
168 }
169 }
170
171 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
172 {
173 return (char *)env + ri->fieldoffset;
174 }
175
176 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
177 {
178 /* Raw read of a coprocessor register (as needed for migration, etc). */
179 if (ri->type & ARM_CP_CONST) {
180 return ri->resetvalue;
181 } else if (ri->raw_readfn) {
182 return ri->raw_readfn(env, ri);
183 } else if (ri->readfn) {
184 return ri->readfn(env, ri);
185 } else {
186 return raw_read(env, ri);
187 }
188 }
189
190 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
191 uint64_t v)
192 {
193 /* Raw write of a coprocessor register (as needed for migration, etc).
194 * Note that constant registers are treated as write-ignored; the
195 * caller should check for success by whether a readback gives the
196 * value written.
197 */
198 if (ri->type & ARM_CP_CONST) {
199 return;
200 } else if (ri->raw_writefn) {
201 ri->raw_writefn(env, ri, v);
202 } else if (ri->writefn) {
203 ri->writefn(env, ri, v);
204 } else {
205 raw_write(env, ri, v);
206 }
207 }
208
209 /**
210 * arm_get/set_gdb_*: get/set a gdb register
211 * @env: the CPU state
212 * @buf: a buffer to copy to/from
213 * @reg: register number (offset from start of group)
214 *
215 * We return the number of bytes copied
216 */
217
218 static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
219 {
220 ARMCPU *cpu = env_archcpu(env);
221 const ARMCPRegInfo *ri;
222 uint32_t key;
223
224 key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg];
225 ri = get_arm_cp_reginfo(cpu->cp_regs, key);
226 if (ri) {
227 if (cpreg_field_is_64bit(ri)) {
228 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
229 } else {
230 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
231 }
232 }
233 return 0;
234 }
235
236 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
237 {
238 return 0;
239 }
240
241 #ifdef TARGET_AARCH64
242 static int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg)
243 {
244 ARMCPU *cpu = env_archcpu(env);
245
246 switch (reg) {
247 /* The first 32 registers are the zregs */
248 case 0 ... 31:
249 {
250 int vq, len = 0;
251 for (vq = 0; vq < cpu->sve_max_vq; vq++) {
252 len += gdb_get_reg128(buf,
253 env->vfp.zregs[reg].d[vq * 2 + 1],
254 env->vfp.zregs[reg].d[vq * 2]);
255 }
256 return len;
257 }
258 case 32:
259 return gdb_get_reg32(buf, vfp_get_fpsr(env));
260 case 33:
261 return gdb_get_reg32(buf, vfp_get_fpcr(env));
262 /* then 16 predicates and the ffr */
263 case 34 ... 50:
264 {
265 int preg = reg - 34;
266 int vq, len = 0;
267 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
268 len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]);
269 }
270 return len;
271 }
272 case 51:
273 {
274 /*
275 * We report in Vector Granules (VG) which is 64bit in a Z reg
276 * while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
277 */
278 int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1;
279 return gdb_get_reg32(buf, vq * 2);
280 }
281 default:
282 /* gdbstub asked for something out our range */
283 qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg);
284 break;
285 }
286
287 return 0;
288 }
289
290 static int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg)
291 {
292 ARMCPU *cpu = env_archcpu(env);
293
294 /* The first 32 registers are the zregs */
295 switch (reg) {
296 /* The first 32 registers are the zregs */
297 case 0 ... 31:
298 {
299 int vq, len = 0;
300 uint64_t *p = (uint64_t *) buf;
301 for (vq = 0; vq < cpu->sve_max_vq; vq++) {
302 env->vfp.zregs[reg].d[vq * 2 + 1] = *p++;
303 env->vfp.zregs[reg].d[vq * 2] = *p++;
304 len += 16;
305 }
306 return len;
307 }
308 case 32:
309 vfp_set_fpsr(env, *(uint32_t *)buf);
310 return 4;
311 case 33:
312 vfp_set_fpcr(env, *(uint32_t *)buf);
313 return 4;
314 case 34 ... 50:
315 {
316 int preg = reg - 34;
317 int vq, len = 0;
318 uint64_t *p = (uint64_t *) buf;
319 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
320 env->vfp.pregs[preg].p[vq / 4] = *p++;
321 len += 8;
322 }
323 return len;
324 }
325 case 51:
326 /* cannot set vg via gdbstub */
327 return 0;
328 default:
329 /* gdbstub asked for something out our range */
330 break;
331 }
332
333 return 0;
334 }
335 #endif /* TARGET_AARCH64 */
336
337 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
338 {
339 /* Return true if the regdef would cause an assertion if you called
340 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
341 * program bug for it not to have the NO_RAW flag).
342 * NB that returning false here doesn't necessarily mean that calling
343 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
344 * read/write access functions which are safe for raw use" from "has
345 * read/write access functions which have side effects but has forgotten
346 * to provide raw access functions".
347 * The tests here line up with the conditions in read/write_raw_cp_reg()
348 * and assertions in raw_read()/raw_write().
349 */
350 if ((ri->type & ARM_CP_CONST) ||
351 ri->fieldoffset ||
352 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
353 return false;
354 }
355 return true;
356 }
357
358 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
359 {
360 /* Write the coprocessor state from cpu->env to the (index,value) list. */
361 int i;
362 bool ok = true;
363
364 for (i = 0; i < cpu->cpreg_array_len; i++) {
365 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
366 const ARMCPRegInfo *ri;
367 uint64_t newval;
368
369 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
370 if (!ri) {
371 ok = false;
372 continue;
373 }
374 if (ri->type & ARM_CP_NO_RAW) {
375 continue;
376 }
377
378 newval = read_raw_cp_reg(&cpu->env, ri);
379 if (kvm_sync) {
380 /*
381 * Only sync if the previous list->cpustate sync succeeded.
382 * Rather than tracking the success/failure state for every
383 * item in the list, we just recheck "does the raw write we must
384 * have made in write_list_to_cpustate() read back OK" here.
385 */
386 uint64_t oldval = cpu->cpreg_values[i];
387
388 if (oldval == newval) {
389 continue;
390 }
391
392 write_raw_cp_reg(&cpu->env, ri, oldval);
393 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
394 continue;
395 }
396
397 write_raw_cp_reg(&cpu->env, ri, newval);
398 }
399 cpu->cpreg_values[i] = newval;
400 }
401 return ok;
402 }
403
404 bool write_list_to_cpustate(ARMCPU *cpu)
405 {
406 int i;
407 bool ok = true;
408
409 for (i = 0; i < cpu->cpreg_array_len; i++) {
410 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
411 uint64_t v = cpu->cpreg_values[i];
412 const ARMCPRegInfo *ri;
413
414 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
415 if (!ri) {
416 ok = false;
417 continue;
418 }
419 if (ri->type & ARM_CP_NO_RAW) {
420 continue;
421 }
422 /* Write value and confirm it reads back as written
423 * (to catch read-only registers and partially read-only
424 * registers where the incoming migration value doesn't match)
425 */
426 write_raw_cp_reg(&cpu->env, ri, v);
427 if (read_raw_cp_reg(&cpu->env, ri) != v) {
428 ok = false;
429 }
430 }
431 return ok;
432 }
433
434 static void add_cpreg_to_list(gpointer key, gpointer opaque)
435 {
436 ARMCPU *cpu = opaque;
437 uint64_t regidx;
438 const ARMCPRegInfo *ri;
439
440 regidx = *(uint32_t *)key;
441 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
442
443 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
444 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
445 /* The value array need not be initialized at this point */
446 cpu->cpreg_array_len++;
447 }
448 }
449
450 static void count_cpreg(gpointer key, gpointer opaque)
451 {
452 ARMCPU *cpu = opaque;
453 uint64_t regidx;
454 const ARMCPRegInfo *ri;
455
456 regidx = *(uint32_t *)key;
457 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
458
459 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
460 cpu->cpreg_array_len++;
461 }
462 }
463
464 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
465 {
466 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
467 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
468
469 if (aidx > bidx) {
470 return 1;
471 }
472 if (aidx < bidx) {
473 return -1;
474 }
475 return 0;
476 }
477
478 void init_cpreg_list(ARMCPU *cpu)
479 {
480 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
481 * Note that we require cpreg_tuples[] to be sorted by key ID.
482 */
483 GList *keys;
484 int arraylen;
485
486 keys = g_hash_table_get_keys(cpu->cp_regs);
487 keys = g_list_sort(keys, cpreg_key_compare);
488
489 cpu->cpreg_array_len = 0;
490
491 g_list_foreach(keys, count_cpreg, cpu);
492
493 arraylen = cpu->cpreg_array_len;
494 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
495 cpu->cpreg_values = g_new(uint64_t, arraylen);
496 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
497 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
498 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
499 cpu->cpreg_array_len = 0;
500
501 g_list_foreach(keys, add_cpreg_to_list, cpu);
502
503 assert(cpu->cpreg_array_len == arraylen);
504
505 g_list_free(keys);
506 }
507
508 /*
509 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
510 */
511 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
512 const ARMCPRegInfo *ri,
513 bool isread)
514 {
515 if (!is_a64(env) && arm_current_el(env) == 3 &&
516 arm_is_secure_below_el3(env)) {
517 return CP_ACCESS_TRAP_UNCATEGORIZED;
518 }
519 return CP_ACCESS_OK;
520 }
521
522 /* Some secure-only AArch32 registers trap to EL3 if used from
523 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
524 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
525 * We assume that the .access field is set to PL1_RW.
526 */
527 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
528 const ARMCPRegInfo *ri,
529 bool isread)
530 {
531 if (arm_current_el(env) == 3) {
532 return CP_ACCESS_OK;
533 }
534 if (arm_is_secure_below_el3(env)) {
535 return CP_ACCESS_TRAP_EL3;
536 }
537 /* This will be EL1 NS and EL2 NS, which just UNDEF */
538 return CP_ACCESS_TRAP_UNCATEGORIZED;
539 }
540
541 /* Check for traps to "powerdown debug" registers, which are controlled
542 * by MDCR.TDOSA
543 */
544 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
545 bool isread)
546 {
547 int el = arm_current_el(env);
548 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
549 (env->cp15.mdcr_el2 & MDCR_TDE) ||
550 (arm_hcr_el2_eff(env) & HCR_TGE);
551
552 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
553 return CP_ACCESS_TRAP_EL2;
554 }
555 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
556 return CP_ACCESS_TRAP_EL3;
557 }
558 return CP_ACCESS_OK;
559 }
560
561 /* Check for traps to "debug ROM" registers, which are controlled
562 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
563 */
564 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
565 bool isread)
566 {
567 int el = arm_current_el(env);
568 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
569 (env->cp15.mdcr_el2 & MDCR_TDE) ||
570 (arm_hcr_el2_eff(env) & HCR_TGE);
571
572 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
573 return CP_ACCESS_TRAP_EL2;
574 }
575 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
576 return CP_ACCESS_TRAP_EL3;
577 }
578 return CP_ACCESS_OK;
579 }
580
581 /* Check for traps to general debug registers, which are controlled
582 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
583 */
584 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
585 bool isread)
586 {
587 int el = arm_current_el(env);
588 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
589 (env->cp15.mdcr_el2 & MDCR_TDE) ||
590 (arm_hcr_el2_eff(env) & HCR_TGE);
591
592 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
593 return CP_ACCESS_TRAP_EL2;
594 }
595 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
596 return CP_ACCESS_TRAP_EL3;
597 }
598 return CP_ACCESS_OK;
599 }
600
601 /* Check for traps to performance monitor registers, which are controlled
602 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
603 */
604 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
605 bool isread)
606 {
607 int el = arm_current_el(env);
608
609 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
610 && !arm_is_secure_below_el3(env)) {
611 return CP_ACCESS_TRAP_EL2;
612 }
613 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
614 return CP_ACCESS_TRAP_EL3;
615 }
616 return CP_ACCESS_OK;
617 }
618
619 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
620 static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
621 bool isread)
622 {
623 if (arm_current_el(env) == 1) {
624 uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
625 if (arm_hcr_el2_eff(env) & trap) {
626 return CP_ACCESS_TRAP_EL2;
627 }
628 }
629 return CP_ACCESS_OK;
630 }
631
632 /* Check for traps from EL1 due to HCR_EL2.TSW. */
633 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
634 bool isread)
635 {
636 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
637 return CP_ACCESS_TRAP_EL2;
638 }
639 return CP_ACCESS_OK;
640 }
641
642 /* Check for traps from EL1 due to HCR_EL2.TACR. */
643 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
644 bool isread)
645 {
646 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
647 return CP_ACCESS_TRAP_EL2;
648 }
649 return CP_ACCESS_OK;
650 }
651
652 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
653 static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
654 bool isread)
655 {
656 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
657 return CP_ACCESS_TRAP_EL2;
658 }
659 return CP_ACCESS_OK;
660 }
661
662 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
663 {
664 ARMCPU *cpu = env_archcpu(env);
665
666 raw_write(env, ri, value);
667 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
668 }
669
670 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
671 {
672 ARMCPU *cpu = env_archcpu(env);
673
674 if (raw_read(env, ri) != value) {
675 /* Unlike real hardware the qemu TLB uses virtual addresses,
676 * not modified virtual addresses, so this causes a TLB flush.
677 */
678 tlb_flush(CPU(cpu));
679 raw_write(env, ri, value);
680 }
681 }
682
683 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
684 uint64_t value)
685 {
686 ARMCPU *cpu = env_archcpu(env);
687
688 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
689 && !extended_addresses_enabled(env)) {
690 /* For VMSA (when not using the LPAE long descriptor page table
691 * format) this register includes the ASID, so do a TLB flush.
692 * For PMSA it is purely a process ID and no action is needed.
693 */
694 tlb_flush(CPU(cpu));
695 }
696 raw_write(env, ri, value);
697 }
698
699 /* IS variants of TLB operations must affect all cores */
700 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
701 uint64_t value)
702 {
703 CPUState *cs = env_cpu(env);
704
705 tlb_flush_all_cpus_synced(cs);
706 }
707
708 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
709 uint64_t value)
710 {
711 CPUState *cs = env_cpu(env);
712
713 tlb_flush_all_cpus_synced(cs);
714 }
715
716 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
717 uint64_t value)
718 {
719 CPUState *cs = env_cpu(env);
720
721 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
722 }
723
724 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
725 uint64_t value)
726 {
727 CPUState *cs = env_cpu(env);
728
729 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
730 }
731
732 /*
733 * Non-IS variants of TLB operations are upgraded to
734 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
735 * force broadcast of these operations.
736 */
737 static bool tlb_force_broadcast(CPUARMState *env)
738 {
739 return (env->cp15.hcr_el2 & HCR_FB) &&
740 arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
741 }
742
743 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
744 uint64_t value)
745 {
746 /* Invalidate all (TLBIALL) */
747 CPUState *cs = env_cpu(env);
748
749 if (tlb_force_broadcast(env)) {
750 tlb_flush_all_cpus_synced(cs);
751 } else {
752 tlb_flush(cs);
753 }
754 }
755
756 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
757 uint64_t value)
758 {
759 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
760 CPUState *cs = env_cpu(env);
761
762 value &= TARGET_PAGE_MASK;
763 if (tlb_force_broadcast(env)) {
764 tlb_flush_page_all_cpus_synced(cs, value);
765 } else {
766 tlb_flush_page(cs, value);
767 }
768 }
769
770 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
771 uint64_t value)
772 {
773 /* Invalidate by ASID (TLBIASID) */
774 CPUState *cs = env_cpu(env);
775
776 if (tlb_force_broadcast(env)) {
777 tlb_flush_all_cpus_synced(cs);
778 } else {
779 tlb_flush(cs);
780 }
781 }
782
783 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
784 uint64_t value)
785 {
786 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
787 CPUState *cs = env_cpu(env);
788
789 value &= TARGET_PAGE_MASK;
790 if (tlb_force_broadcast(env)) {
791 tlb_flush_page_all_cpus_synced(cs, value);
792 } else {
793 tlb_flush_page(cs, value);
794 }
795 }
796
797 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
798 uint64_t value)
799 {
800 CPUState *cs = env_cpu(env);
801
802 tlb_flush_by_mmuidx(cs,
803 ARMMMUIdxBit_E10_1 |
804 ARMMMUIdxBit_E10_1_PAN |
805 ARMMMUIdxBit_E10_0);
806 }
807
808 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
809 uint64_t value)
810 {
811 CPUState *cs = env_cpu(env);
812
813 tlb_flush_by_mmuidx_all_cpus_synced(cs,
814 ARMMMUIdxBit_E10_1 |
815 ARMMMUIdxBit_E10_1_PAN |
816 ARMMMUIdxBit_E10_0);
817 }
818
819
820 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
821 uint64_t value)
822 {
823 CPUState *cs = env_cpu(env);
824
825 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
826 }
827
828 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
829 uint64_t value)
830 {
831 CPUState *cs = env_cpu(env);
832
833 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
834 }
835
836 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
837 uint64_t value)
838 {
839 CPUState *cs = env_cpu(env);
840 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
841
842 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
843 }
844
845 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
846 uint64_t value)
847 {
848 CPUState *cs = env_cpu(env);
849 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
850
851 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
852 ARMMMUIdxBit_E2);
853 }
854
855 static const ARMCPRegInfo cp_reginfo[] = {
856 /* Define the secure and non-secure FCSE identifier CP registers
857 * separately because there is no secure bank in V8 (no _EL3). This allows
858 * the secure register to be properly reset and migrated. There is also no
859 * v8 EL1 version of the register so the non-secure instance stands alone.
860 */
861 { .name = "FCSEIDR",
862 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
863 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
864 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
865 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
866 { .name = "FCSEIDR_S",
867 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
868 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
869 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
870 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
871 /* Define the secure and non-secure context identifier CP registers
872 * separately because there is no secure bank in V8 (no _EL3). This allows
873 * the secure register to be properly reset and migrated. In the
874 * non-secure case, the 32-bit register will have reset and migration
875 * disabled during registration as it is handled by the 64-bit instance.
876 */
877 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
878 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
879 .access = PL1_RW, .accessfn = access_tvm_trvm,
880 .secure = ARM_CP_SECSTATE_NS,
881 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
882 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
883 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
884 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
885 .access = PL1_RW, .accessfn = access_tvm_trvm,
886 .secure = ARM_CP_SECSTATE_S,
887 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
888 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
889 REGINFO_SENTINEL
890 };
891
892 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
893 /* NB: Some of these registers exist in v8 but with more precise
894 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
895 */
896 /* MMU Domain access control / MPU write buffer control */
897 { .name = "DACR",
898 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
899 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
900 .writefn = dacr_write, .raw_writefn = raw_write,
901 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
902 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
903 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
904 * For v6 and v5, these mappings are overly broad.
905 */
906 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
907 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
908 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
909 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
910 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
911 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
912 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
913 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
914 /* Cache maintenance ops; some of this space may be overridden later. */
915 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
916 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
917 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
918 REGINFO_SENTINEL
919 };
920
921 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
922 /* Not all pre-v6 cores implemented this WFI, so this is slightly
923 * over-broad.
924 */
925 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
926 .access = PL1_W, .type = ARM_CP_WFI },
927 REGINFO_SENTINEL
928 };
929
930 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
931 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
932 * is UNPREDICTABLE; we choose to NOP as most implementations do).
933 */
934 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
935 .access = PL1_W, .type = ARM_CP_WFI },
936 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
937 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
938 * OMAPCP will override this space.
939 */
940 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
941 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
942 .resetvalue = 0 },
943 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
944 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
945 .resetvalue = 0 },
946 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
947 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
948 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
949 .resetvalue = 0 },
950 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
951 * implementing it as RAZ means the "debug architecture version" bits
952 * will read as a reserved value, which should cause Linux to not try
953 * to use the debug hardware.
954 */
955 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
956 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
957 /* MMU TLB control. Note that the wildcarding means we cover not just
958 * the unified TLB ops but also the dside/iside/inner-shareable variants.
959 */
960 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
961 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
962 .type = ARM_CP_NO_RAW },
963 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
964 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
965 .type = ARM_CP_NO_RAW },
966 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
967 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
968 .type = ARM_CP_NO_RAW },
969 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
970 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
971 .type = ARM_CP_NO_RAW },
972 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
973 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
974 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
975 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
976 REGINFO_SENTINEL
977 };
978
979 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
980 uint64_t value)
981 {
982 uint32_t mask = 0;
983
984 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
985 if (!arm_feature(env, ARM_FEATURE_V8)) {
986 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
987 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
988 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
989 */
990 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
991 /* VFP coprocessor: cp10 & cp11 [23:20] */
992 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
993
994 if (!arm_feature(env, ARM_FEATURE_NEON)) {
995 /* ASEDIS [31] bit is RAO/WI */
996 value |= (1 << 31);
997 }
998
999 /* VFPv3 and upwards with NEON implement 32 double precision
1000 * registers (D0-D31).
1001 */
1002 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
1003 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
1004 value |= (1 << 30);
1005 }
1006 }
1007 value &= mask;
1008 }
1009
1010 /*
1011 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1012 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1013 */
1014 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
1015 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
1016 value &= ~(0xf << 20);
1017 value |= env->cp15.cpacr_el1 & (0xf << 20);
1018 }
1019
1020 env->cp15.cpacr_el1 = value;
1021 }
1022
1023 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1024 {
1025 /*
1026 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1027 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1028 */
1029 uint64_t value = env->cp15.cpacr_el1;
1030
1031 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
1032 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
1033 value &= ~(0xf << 20);
1034 }
1035 return value;
1036 }
1037
1038
1039 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1040 {
1041 /* Call cpacr_write() so that we reset with the correct RAO bits set
1042 * for our CPU features.
1043 */
1044 cpacr_write(env, ri, 0);
1045 }
1046
1047 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1048 bool isread)
1049 {
1050 if (arm_feature(env, ARM_FEATURE_V8)) {
1051 /* Check if CPACR accesses are to be trapped to EL2 */
1052 if (arm_current_el(env) == 1 &&
1053 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
1054 return CP_ACCESS_TRAP_EL2;
1055 /* Check if CPACR accesses are to be trapped to EL3 */
1056 } else if (arm_current_el(env) < 3 &&
1057 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
1058 return CP_ACCESS_TRAP_EL3;
1059 }
1060 }
1061
1062 return CP_ACCESS_OK;
1063 }
1064
1065 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1066 bool isread)
1067 {
1068 /* Check if CPTR accesses are set to trap to EL3 */
1069 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
1070 return CP_ACCESS_TRAP_EL3;
1071 }
1072
1073 return CP_ACCESS_OK;
1074 }
1075
1076 static const ARMCPRegInfo v6_cp_reginfo[] = {
1077 /* prefetch by MVA in v6, NOP in v7 */
1078 { .name = "MVA_prefetch",
1079 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
1080 .access = PL1_W, .type = ARM_CP_NOP },
1081 /* We need to break the TB after ISB to execute self-modifying code
1082 * correctly and also to take any pending interrupts immediately.
1083 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
1084 */
1085 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
1086 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
1087 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
1088 .access = PL0_W, .type = ARM_CP_NOP },
1089 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
1090 .access = PL0_W, .type = ARM_CP_NOP },
1091 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
1092 .access = PL1_RW, .accessfn = access_tvm_trvm,
1093 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
1094 offsetof(CPUARMState, cp15.ifar_ns) },
1095 .resetvalue = 0, },
1096 /* Watchpoint Fault Address Register : should actually only be present
1097 * for 1136, 1176, 11MPCore.
1098 */
1099 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
1100 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
1101 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
1102 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
1103 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
1104 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
1105 REGINFO_SENTINEL
1106 };
1107
1108 /* Definitions for the PMU registers */
1109 #define PMCRN_MASK 0xf800
1110 #define PMCRN_SHIFT 11
1111 #define PMCRLC 0x40
1112 #define PMCRDP 0x20
1113 #define PMCRX 0x10
1114 #define PMCRD 0x8
1115 #define PMCRC 0x4
1116 #define PMCRP 0x2
1117 #define PMCRE 0x1
1118 /*
1119 * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
1120 * which can be written as 1 to trigger behaviour but which stay RAZ).
1121 */
1122 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1123
1124 #define PMXEVTYPER_P 0x80000000
1125 #define PMXEVTYPER_U 0x40000000
1126 #define PMXEVTYPER_NSK 0x20000000
1127 #define PMXEVTYPER_NSU 0x10000000
1128 #define PMXEVTYPER_NSH 0x08000000
1129 #define PMXEVTYPER_M 0x04000000
1130 #define PMXEVTYPER_MT 0x02000000
1131 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1132 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1133 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1134 PMXEVTYPER_M | PMXEVTYPER_MT | \
1135 PMXEVTYPER_EVTCOUNT)
1136
1137 #define PMCCFILTR 0xf8000000
1138 #define PMCCFILTR_M PMXEVTYPER_M
1139 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1140
1141 static inline uint32_t pmu_num_counters(CPUARMState *env)
1142 {
1143 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1144 }
1145
1146 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1147 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1148 {
1149 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1150 }
1151
1152 typedef struct pm_event {
1153 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
1154 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1155 bool (*supported)(CPUARMState *);
1156 /*
1157 * Retrieve the current count of the underlying event. The programmed
1158 * counters hold a difference from the return value from this function
1159 */
1160 uint64_t (*get_count)(CPUARMState *);
1161 /*
1162 * Return how many nanoseconds it will take (at a minimum) for count events
1163 * to occur. A negative value indicates the counter will never overflow, or
1164 * that the counter has otherwise arranged for the overflow bit to be set
1165 * and the PMU interrupt to be raised on overflow.
1166 */
1167 int64_t (*ns_per_count)(uint64_t);
1168 } pm_event;
1169
1170 static bool event_always_supported(CPUARMState *env)
1171 {
1172 return true;
1173 }
1174
1175 static uint64_t swinc_get_count(CPUARMState *env)
1176 {
1177 /*
1178 * SW_INCR events are written directly to the pmevcntr's by writes to
1179 * PMSWINC, so there is no underlying count maintained by the PMU itself
1180 */
1181 return 0;
1182 }
1183
1184 static int64_t swinc_ns_per(uint64_t ignored)
1185 {
1186 return -1;
1187 }
1188
1189 /*
1190 * Return the underlying cycle count for the PMU cycle counters. If we're in
1191 * usermode, simply return 0.
1192 */
1193 static uint64_t cycles_get_count(CPUARMState *env)
1194 {
1195 #ifndef CONFIG_USER_ONLY
1196 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1197 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1198 #else
1199 return cpu_get_host_ticks();
1200 #endif
1201 }
1202
1203 #ifndef CONFIG_USER_ONLY
1204 static int64_t cycles_ns_per(uint64_t cycles)
1205 {
1206 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
1207 }
1208
1209 static bool instructions_supported(CPUARMState *env)
1210 {
1211 return icount_enabled() == 1; /* Precise instruction counting */
1212 }
1213
1214 static uint64_t instructions_get_count(CPUARMState *env)
1215 {
1216 return (uint64_t)icount_get_raw();
1217 }
1218
1219 static int64_t instructions_ns_per(uint64_t icount)
1220 {
1221 return icount_to_ns((int64_t)icount);
1222 }
1223 #endif
1224
1225 static bool pmu_8_1_events_supported(CPUARMState *env)
1226 {
1227 /* For events which are supported in any v8.1 PMU */
1228 return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
1229 }
1230
1231 static bool pmu_8_4_events_supported(CPUARMState *env)
1232 {
1233 /* For events which are supported in any v8.1 PMU */
1234 return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
1235 }
1236
1237 static uint64_t zero_event_get_count(CPUARMState *env)
1238 {
1239 /* For events which on QEMU never fire, so their count is always zero */
1240 return 0;
1241 }
1242
1243 static int64_t zero_event_ns_per(uint64_t cycles)
1244 {
1245 /* An event which never fires can never overflow */
1246 return -1;
1247 }
1248
1249 static const pm_event pm_events[] = {
1250 { .number = 0x000, /* SW_INCR */
1251 .supported = event_always_supported,
1252 .get_count = swinc_get_count,
1253 .ns_per_count = swinc_ns_per,
1254 },
1255 #ifndef CONFIG_USER_ONLY
1256 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1257 .supported = instructions_supported,
1258 .get_count = instructions_get_count,
1259 .ns_per_count = instructions_ns_per,
1260 },
1261 { .number = 0x011, /* CPU_CYCLES, Cycle */
1262 .supported = event_always_supported,
1263 .get_count = cycles_get_count,
1264 .ns_per_count = cycles_ns_per,
1265 },
1266 #endif
1267 { .number = 0x023, /* STALL_FRONTEND */
1268 .supported = pmu_8_1_events_supported,
1269 .get_count = zero_event_get_count,
1270 .ns_per_count = zero_event_ns_per,
1271 },
1272 { .number = 0x024, /* STALL_BACKEND */
1273 .supported = pmu_8_1_events_supported,
1274 .get_count = zero_event_get_count,
1275 .ns_per_count = zero_event_ns_per,
1276 },
1277 { .number = 0x03c, /* STALL */
1278 .supported = pmu_8_4_events_supported,
1279 .get_count = zero_event_get_count,
1280 .ns_per_count = zero_event_ns_per,
1281 },
1282 };
1283
1284 /*
1285 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1286 * events (i.e. the statistical profiling extension), this implementation
1287 * should first be updated to something sparse instead of the current
1288 * supported_event_map[] array.
1289 */
1290 #define MAX_EVENT_ID 0x3c
1291 #define UNSUPPORTED_EVENT UINT16_MAX
1292 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1293
1294 /*
1295 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1296 * of ARM event numbers to indices in our pm_events array.
1297 *
1298 * Note: Events in the 0x40XX range are not currently supported.
1299 */
1300 void pmu_init(ARMCPU *cpu)
1301 {
1302 unsigned int i;
1303
1304 /*
1305 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1306 * events to them
1307 */
1308 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1309 supported_event_map[i] = UNSUPPORTED_EVENT;
1310 }
1311 cpu->pmceid0 = 0;
1312 cpu->pmceid1 = 0;
1313
1314 for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1315 const pm_event *cnt = &pm_events[i];
1316 assert(cnt->number <= MAX_EVENT_ID);
1317 /* We do not currently support events in the 0x40xx range */
1318 assert(cnt->number <= 0x3f);
1319
1320 if (cnt->supported(&cpu->env)) {
1321 supported_event_map[cnt->number] = i;
1322 uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1323 if (cnt->number & 0x20) {
1324 cpu->pmceid1 |= event_mask;
1325 } else {
1326 cpu->pmceid0 |= event_mask;
1327 }
1328 }
1329 }
1330 }
1331
1332 /*
1333 * Check at runtime whether a PMU event is supported for the current machine
1334 */
1335 static bool event_supported(uint16_t number)
1336 {
1337 if (number > MAX_EVENT_ID) {
1338 return false;
1339 }
1340 return supported_event_map[number] != UNSUPPORTED_EVENT;
1341 }
1342
1343 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1344 bool isread)
1345 {
1346 /* Performance monitor registers user accessibility is controlled
1347 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1348 * trapping to EL2 or EL3 for other accesses.
1349 */
1350 int el = arm_current_el(env);
1351
1352 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1353 return CP_ACCESS_TRAP;
1354 }
1355 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1356 && !arm_is_secure_below_el3(env)) {
1357 return CP_ACCESS_TRAP_EL2;
1358 }
1359 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1360 return CP_ACCESS_TRAP_EL3;
1361 }
1362
1363 return CP_ACCESS_OK;
1364 }
1365
1366 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1367 const ARMCPRegInfo *ri,
1368 bool isread)
1369 {
1370 /* ER: event counter read trap control */
1371 if (arm_feature(env, ARM_FEATURE_V8)
1372 && arm_current_el(env) == 0
1373 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1374 && isread) {
1375 return CP_ACCESS_OK;
1376 }
1377
1378 return pmreg_access(env, ri, isread);
1379 }
1380
1381 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1382 const ARMCPRegInfo *ri,
1383 bool isread)
1384 {
1385 /* SW: software increment write trap control */
1386 if (arm_feature(env, ARM_FEATURE_V8)
1387 && arm_current_el(env) == 0
1388 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1389 && !isread) {
1390 return CP_ACCESS_OK;
1391 }
1392
1393 return pmreg_access(env, ri, isread);
1394 }
1395
1396 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1397 const ARMCPRegInfo *ri,
1398 bool isread)
1399 {
1400 /* ER: event counter read trap control */
1401 if (arm_feature(env, ARM_FEATURE_V8)
1402 && arm_current_el(env) == 0
1403 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1404 return CP_ACCESS_OK;
1405 }
1406
1407 return pmreg_access(env, ri, isread);
1408 }
1409
1410 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1411 const ARMCPRegInfo *ri,
1412 bool isread)
1413 {
1414 /* CR: cycle counter read trap control */
1415 if (arm_feature(env, ARM_FEATURE_V8)
1416 && arm_current_el(env) == 0
1417 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1418 && isread) {
1419 return CP_ACCESS_OK;
1420 }
1421
1422 return pmreg_access(env, ri, isread);
1423 }
1424
1425 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1426 * the current EL, security state, and register configuration.
1427 */
1428 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1429 {
1430 uint64_t filter;
1431 bool e, p, u, nsk, nsu, nsh, m;
1432 bool enabled, prohibited, filtered;
1433 bool secure = arm_is_secure(env);
1434 int el = arm_current_el(env);
1435 uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1436
1437 if (!arm_feature(env, ARM_FEATURE_PMU)) {
1438 return false;
1439 }
1440
1441 if (!arm_feature(env, ARM_FEATURE_EL2) ||
1442 (counter < hpmn || counter == 31)) {
1443 e = env->cp15.c9_pmcr & PMCRE;
1444 } else {
1445 e = env->cp15.mdcr_el2 & MDCR_HPME;
1446 }
1447 enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1448
1449 if (!secure) {
1450 if (el == 2 && (counter < hpmn || counter == 31)) {
1451 prohibited = env->cp15.mdcr_el2 & MDCR_HPMD;
1452 } else {
1453 prohibited = false;
1454 }
1455 } else {
1456 prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1457 !(env->cp15.mdcr_el3 & MDCR_SPME);
1458 }
1459
1460 if (prohibited && counter == 31) {
1461 prohibited = env->cp15.c9_pmcr & PMCRDP;
1462 }
1463
1464 if (counter == 31) {
1465 filter = env->cp15.pmccfiltr_el0;
1466 } else {
1467 filter = env->cp15.c14_pmevtyper[counter];
1468 }
1469
1470 p = filter & PMXEVTYPER_P;
1471 u = filter & PMXEVTYPER_U;
1472 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1473 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1474 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1475 m = arm_el_is_aa64(env, 1) &&
1476 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1477
1478 if (el == 0) {
1479 filtered = secure ? u : u != nsu;
1480 } else if (el == 1) {
1481 filtered = secure ? p : p != nsk;
1482 } else if (el == 2) {
1483 filtered = !nsh;
1484 } else { /* EL3 */
1485 filtered = m != p;
1486 }
1487
1488 if (counter != 31) {
1489 /*
1490 * If not checking PMCCNTR, ensure the counter is setup to an event we
1491 * support
1492 */
1493 uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1494 if (!event_supported(event)) {
1495 return false;
1496 }
1497 }
1498
1499 return enabled && !prohibited && !filtered;
1500 }
1501
1502 static void pmu_update_irq(CPUARMState *env)
1503 {
1504 ARMCPU *cpu = env_archcpu(env);
1505 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1506 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1507 }
1508
1509 /*
1510 * Ensure c15_ccnt is the guest-visible count so that operations such as
1511 * enabling/disabling the counter or filtering, modifying the count itself,
1512 * etc. can be done logically. This is essentially a no-op if the counter is
1513 * not enabled at the time of the call.
1514 */
1515 static void pmccntr_op_start(CPUARMState *env)
1516 {
1517 uint64_t cycles = cycles_get_count(env);
1518
1519 if (pmu_counter_enabled(env, 31)) {
1520 uint64_t eff_cycles = cycles;
1521 if (env->cp15.c9_pmcr & PMCRD) {
1522 /* Increment once every 64 processor clock cycles */
1523 eff_cycles /= 64;
1524 }
1525
1526 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1527
1528 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1529 1ull << 63 : 1ull << 31;
1530 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1531 env->cp15.c9_pmovsr |= (1 << 31);
1532 pmu_update_irq(env);
1533 }
1534
1535 env->cp15.c15_ccnt = new_pmccntr;
1536 }
1537 env->cp15.c15_ccnt_delta = cycles;
1538 }
1539
1540 /*
1541 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1542 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1543 * pmccntr_op_start.
1544 */
1545 static void pmccntr_op_finish(CPUARMState *env)
1546 {
1547 if (pmu_counter_enabled(env, 31)) {
1548 #ifndef CONFIG_USER_ONLY
1549 /* Calculate when the counter will next overflow */
1550 uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1551 if (!(env->cp15.c9_pmcr & PMCRLC)) {
1552 remaining_cycles = (uint32_t)remaining_cycles;
1553 }
1554 int64_t overflow_in = cycles_ns_per(remaining_cycles);
1555
1556 if (overflow_in > 0) {
1557 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1558 overflow_in;
1559 ARMCPU *cpu = env_archcpu(env);
1560 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1561 }
1562 #endif
1563
1564 uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1565 if (env->cp15.c9_pmcr & PMCRD) {
1566 /* Increment once every 64 processor clock cycles */
1567 prev_cycles /= 64;
1568 }
1569 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1570 }
1571 }
1572
1573 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1574 {
1575
1576 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1577 uint64_t count = 0;
1578 if (event_supported(event)) {
1579 uint16_t event_idx = supported_event_map[event];
1580 count = pm_events[event_idx].get_count(env);
1581 }
1582
1583 if (pmu_counter_enabled(env, counter)) {
1584 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1585
1586 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1587 env->cp15.c9_pmovsr |= (1 << counter);
1588 pmu_update_irq(env);
1589 }
1590 env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1591 }
1592 env->cp15.c14_pmevcntr_delta[counter] = count;
1593 }
1594
1595 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1596 {
1597 if (pmu_counter_enabled(env, counter)) {
1598 #ifndef CONFIG_USER_ONLY
1599 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1600 uint16_t event_idx = supported_event_map[event];
1601 uint64_t delta = UINT32_MAX -
1602 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1603 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1604
1605 if (overflow_in > 0) {
1606 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1607 overflow_in;
1608 ARMCPU *cpu = env_archcpu(env);
1609 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1610 }
1611 #endif
1612
1613 env->cp15.c14_pmevcntr_delta[counter] -=
1614 env->cp15.c14_pmevcntr[counter];
1615 }
1616 }
1617
1618 void pmu_op_start(CPUARMState *env)
1619 {
1620 unsigned int i;
1621 pmccntr_op_start(env);
1622 for (i = 0; i < pmu_num_counters(env); i++) {
1623 pmevcntr_op_start(env, i);
1624 }
1625 }
1626
1627 void pmu_op_finish(CPUARMState *env)
1628 {
1629 unsigned int i;
1630 pmccntr_op_finish(env);
1631 for (i = 0; i < pmu_num_counters(env); i++) {
1632 pmevcntr_op_finish(env, i);
1633 }
1634 }
1635
1636 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1637 {
1638 pmu_op_start(&cpu->env);
1639 }
1640
1641 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1642 {
1643 pmu_op_finish(&cpu->env);
1644 }
1645
1646 void arm_pmu_timer_cb(void *opaque)
1647 {
1648 ARMCPU *cpu = opaque;
1649
1650 /*
1651 * Update all the counter values based on the current underlying counts,
1652 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1653 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1654 * counter may expire.
1655 */
1656 pmu_op_start(&cpu->env);
1657 pmu_op_finish(&cpu->env);
1658 }
1659
1660 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1661 uint64_t value)
1662 {
1663 pmu_op_start(env);
1664
1665 if (value & PMCRC) {
1666 /* The counter has been reset */
1667 env->cp15.c15_ccnt = 0;
1668 }
1669
1670 if (value & PMCRP) {
1671 unsigned int i;
1672 for (i = 0; i < pmu_num_counters(env); i++) {
1673 env->cp15.c14_pmevcntr[i] = 0;
1674 }
1675 }
1676
1677 env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
1678 env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK);
1679
1680 pmu_op_finish(env);
1681 }
1682
1683 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1684 uint64_t value)
1685 {
1686 unsigned int i;
1687 for (i = 0; i < pmu_num_counters(env); i++) {
1688 /* Increment a counter's count iff: */
1689 if ((value & (1 << i)) && /* counter's bit is set */
1690 /* counter is enabled and not filtered */
1691 pmu_counter_enabled(env, i) &&
1692 /* counter is SW_INCR */
1693 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1694 pmevcntr_op_start(env, i);
1695
1696 /*
1697 * Detect if this write causes an overflow since we can't predict
1698 * PMSWINC overflows like we can for other events
1699 */
1700 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1701
1702 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1703 env->cp15.c9_pmovsr |= (1 << i);
1704 pmu_update_irq(env);
1705 }
1706
1707 env->cp15.c14_pmevcntr[i] = new_pmswinc;
1708
1709 pmevcntr_op_finish(env, i);
1710 }
1711 }
1712 }
1713
1714 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1715 {
1716 uint64_t ret;
1717 pmccntr_op_start(env);
1718 ret = env->cp15.c15_ccnt;
1719 pmccntr_op_finish(env);
1720 return ret;
1721 }
1722
1723 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1724 uint64_t value)
1725 {
1726 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1727 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1728 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1729 * accessed.
1730 */
1731 env->cp15.c9_pmselr = value & 0x1f;
1732 }
1733
1734 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1735 uint64_t value)
1736 {
1737 pmccntr_op_start(env);
1738 env->cp15.c15_ccnt = value;
1739 pmccntr_op_finish(env);
1740 }
1741
1742 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1743 uint64_t value)
1744 {
1745 uint64_t cur_val = pmccntr_read(env, NULL);
1746
1747 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1748 }
1749
1750 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1751 uint64_t value)
1752 {
1753 pmccntr_op_start(env);
1754 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1755 pmccntr_op_finish(env);
1756 }
1757
1758 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1759 uint64_t value)
1760 {
1761 pmccntr_op_start(env);
1762 /* M is not accessible from AArch32 */
1763 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1764 (value & PMCCFILTR);
1765 pmccntr_op_finish(env);
1766 }
1767
1768 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1769 {
1770 /* M is not visible in AArch32 */
1771 return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1772 }
1773
1774 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1775 uint64_t value)
1776 {
1777 value &= pmu_counter_mask(env);
1778 env->cp15.c9_pmcnten |= value;
1779 }
1780
1781 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1782 uint64_t value)
1783 {
1784 value &= pmu_counter_mask(env);
1785 env->cp15.c9_pmcnten &= ~value;
1786 }
1787
1788 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1789 uint64_t value)
1790 {
1791 value &= pmu_counter_mask(env);
1792 env->cp15.c9_pmovsr &= ~value;
1793 pmu_update_irq(env);
1794 }
1795
1796 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1797 uint64_t value)
1798 {
1799 value &= pmu_counter_mask(env);
1800 env->cp15.c9_pmovsr |= value;
1801 pmu_update_irq(env);
1802 }
1803
1804 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1805 uint64_t value, const uint8_t counter)
1806 {
1807 if (counter == 31) {
1808 pmccfiltr_write(env, ri, value);
1809 } else if (counter < pmu_num_counters(env)) {
1810 pmevcntr_op_start(env, counter);
1811
1812 /*
1813 * If this counter's event type is changing, store the current
1814 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1815 * pmevcntr_op_finish has the correct baseline when it converts back to
1816 * a delta.
1817 */
1818 uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1819 PMXEVTYPER_EVTCOUNT;
1820 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1821 if (old_event != new_event) {
1822 uint64_t count = 0;
1823 if (event_supported(new_event)) {
1824 uint16_t event_idx = supported_event_map[new_event];
1825 count = pm_events[event_idx].get_count(env);
1826 }
1827 env->cp15.c14_pmevcntr_delta[counter] = count;
1828 }
1829
1830 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1831 pmevcntr_op_finish(env, counter);
1832 }
1833 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1834 * PMSELR value is equal to or greater than the number of implemented
1835 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1836 */
1837 }
1838
1839 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1840 const uint8_t counter)
1841 {
1842 if (counter == 31) {
1843 return env->cp15.pmccfiltr_el0;
1844 } else if (counter < pmu_num_counters(env)) {
1845 return env->cp15.c14_pmevtyper[counter];
1846 } else {
1847 /*
1848 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1849 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1850 */
1851 return 0;
1852 }
1853 }
1854
1855 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1856 uint64_t value)
1857 {
1858 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1859 pmevtyper_write(env, ri, value, counter);
1860 }
1861
1862 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1863 uint64_t value)
1864 {
1865 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1866 env->cp15.c14_pmevtyper[counter] = value;
1867
1868 /*
1869 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1870 * pmu_op_finish calls when loading saved state for a migration. Because
1871 * we're potentially updating the type of event here, the value written to
1872 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1873 * different counter type. Therefore, we need to set this value to the
1874 * current count for the counter type we're writing so that pmu_op_finish
1875 * has the correct count for its calculation.
1876 */
1877 uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1878 if (event_supported(event)) {
1879 uint16_t event_idx = supported_event_map[event];
1880 env->cp15.c14_pmevcntr_delta[counter] =
1881 pm_events[event_idx].get_count(env);
1882 }
1883 }
1884
1885 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1886 {
1887 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1888 return pmevtyper_read(env, ri, counter);
1889 }
1890
1891 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1892 uint64_t value)
1893 {
1894 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1895 }
1896
1897 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1898 {
1899 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1900 }
1901
1902 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1903 uint64_t value, uint8_t counter)
1904 {
1905 if (counter < pmu_num_counters(env)) {
1906 pmevcntr_op_start(env, counter);
1907 env->cp15.c14_pmevcntr[counter] = value;
1908 pmevcntr_op_finish(env, counter);
1909 }
1910 /*
1911 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1912 * are CONSTRAINED UNPREDICTABLE.
1913 */
1914 }
1915
1916 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1917 uint8_t counter)
1918 {
1919 if (counter < pmu_num_counters(env)) {
1920 uint64_t ret;
1921 pmevcntr_op_start(env, counter);
1922 ret = env->cp15.c14_pmevcntr[counter];
1923 pmevcntr_op_finish(env, counter);
1924 return ret;
1925 } else {
1926 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1927 * are CONSTRAINED UNPREDICTABLE. */
1928 return 0;
1929 }
1930 }
1931
1932 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1933 uint64_t value)
1934 {
1935 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1936 pmevcntr_write(env, ri, value, counter);
1937 }
1938
1939 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1940 {
1941 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1942 return pmevcntr_read(env, ri, counter);
1943 }
1944
1945 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1946 uint64_t value)
1947 {
1948 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1949 assert(counter < pmu_num_counters(env));
1950 env->cp15.c14_pmevcntr[counter] = value;
1951 pmevcntr_write(env, ri, value, counter);
1952 }
1953
1954 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1955 {
1956 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1957 assert(counter < pmu_num_counters(env));
1958 return env->cp15.c14_pmevcntr[counter];
1959 }
1960
1961 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1962 uint64_t value)
1963 {
1964 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1965 }
1966
1967 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1968 {
1969 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1970 }
1971
1972 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1973 uint64_t value)
1974 {
1975 if (arm_feature(env, ARM_FEATURE_V8)) {
1976 env->cp15.c9_pmuserenr = value & 0xf;
1977 } else {
1978 env->cp15.c9_pmuserenr = value & 1;
1979 }
1980 }
1981
1982 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1983 uint64_t value)
1984 {
1985 /* We have no event counters so only the C bit can be changed */
1986 value &= pmu_counter_mask(env);
1987 env->cp15.c9_pminten |= value;
1988 pmu_update_irq(env);
1989 }
1990
1991 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1992 uint64_t value)
1993 {
1994 value &= pmu_counter_mask(env);
1995 env->cp15.c9_pminten &= ~value;
1996 pmu_update_irq(env);
1997 }
1998
1999 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2000 uint64_t value)
2001 {
2002 /* Note that even though the AArch64 view of this register has bits
2003 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
2004 * architectural requirements for bits which are RES0 only in some
2005 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
2006 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
2007 */
2008 raw_write(env, ri, value & ~0x1FULL);
2009 }
2010
2011 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2012 {
2013 /* Begin with base v8.0 state. */
2014 uint32_t valid_mask = 0x3fff;
2015 ARMCPU *cpu = env_archcpu(env);
2016
2017 if (ri->state == ARM_CP_STATE_AA64) {
2018 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
2019 valid_mask &= ~SCR_NET;
2020
2021 if (cpu_isar_feature(aa64_lor, cpu)) {
2022 valid_mask |= SCR_TLOR;
2023 }
2024 if (cpu_isar_feature(aa64_pauth, cpu)) {
2025 valid_mask |= SCR_API | SCR_APK;
2026 }
2027 if (cpu_isar_feature(aa64_mte, cpu)) {
2028 valid_mask |= SCR_ATA;
2029 }
2030 } else {
2031 valid_mask &= ~(SCR_RW | SCR_ST);
2032 }
2033
2034 if (!arm_feature(env, ARM_FEATURE_EL2)) {
2035 valid_mask &= ~SCR_HCE;
2036
2037 /* On ARMv7, SMD (or SCD as it is called in v7) is only
2038 * supported if EL2 exists. The bit is UNK/SBZP when
2039 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
2040 * when EL2 is unavailable.
2041 * On ARMv8, this bit is always available.
2042 */
2043 if (arm_feature(env, ARM_FEATURE_V7) &&
2044 !arm_feature(env, ARM_FEATURE_V8)) {
2045 valid_mask &= ~SCR_SMD;
2046 }
2047 }
2048
2049 /* Clear all-context RES0 bits. */
2050 value &= valid_mask;
2051 raw_write(env, ri, value);
2052 }
2053
2054 static CPAccessResult access_aa64_tid2(CPUARMState *env,
2055 const ARMCPRegInfo *ri,
2056 bool isread)
2057 {
2058 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
2059 return CP_ACCESS_TRAP_EL2;
2060 }
2061
2062 return CP_ACCESS_OK;
2063 }
2064
2065 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2066 {
2067 ARMCPU *cpu = env_archcpu(env);
2068
2069 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
2070 * bank
2071 */
2072 uint32_t index = A32_BANKED_REG_GET(env, csselr,
2073 ri->secure & ARM_CP_SECSTATE_S);
2074
2075 return cpu->ccsidr[index];
2076 }
2077
2078 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2079 uint64_t value)
2080 {
2081 raw_write(env, ri, value & 0xf);
2082 }
2083
2084 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2085 {
2086 CPUState *cs = env_cpu(env);
2087 uint64_t hcr_el2 = arm_hcr_el2_eff(env);
2088 uint64_t ret = 0;
2089 bool allow_virt = (arm_current_el(env) == 1 &&
2090 (!arm_is_secure_below_el3(env) ||
2091 (env->cp15.scr_el3 & SCR_EEL2)));
2092
2093 if (allow_virt && (hcr_el2 & HCR_IMO)) {
2094 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
2095 ret |= CPSR_I;
2096 }
2097 } else {
2098 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
2099 ret |= CPSR_I;
2100 }
2101 }
2102
2103 if (allow_virt && (hcr_el2 & HCR_FMO)) {
2104 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
2105 ret |= CPSR_F;
2106 }
2107 } else {
2108 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
2109 ret |= CPSR_F;
2110 }
2111 }
2112
2113 /* External aborts are not possible in QEMU so A bit is always clear */
2114 return ret;
2115 }
2116
2117 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2118 bool isread)
2119 {
2120 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
2121 return CP_ACCESS_TRAP_EL2;
2122 }
2123
2124 return CP_ACCESS_OK;
2125 }
2126
2127 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2128 bool isread)
2129 {
2130 if (arm_feature(env, ARM_FEATURE_V8)) {
2131 return access_aa64_tid1(env, ri, isread);
2132 }
2133
2134 return CP_ACCESS_OK;
2135 }
2136
2137 static const ARMCPRegInfo v7_cp_reginfo[] = {
2138 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2139 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
2140 .access = PL1_W, .type = ARM_CP_NOP },
2141 /* Performance monitors are implementation defined in v7,
2142 * but with an ARM recommended set of registers, which we
2143 * follow.
2144 *
2145 * Performance registers fall into three categories:
2146 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2147 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2148 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2149 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2150 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2151 */
2152 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
2153 .access = PL0_RW, .type = ARM_CP_ALIAS,
2154 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2155 .writefn = pmcntenset_write,
2156 .accessfn = pmreg_access,
2157 .raw_writefn = raw_write },
2158 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
2159 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
2160 .access = PL0_RW, .accessfn = pmreg_access,
2161 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
2162 .writefn = pmcntenset_write, .raw_writefn = raw_write },
2163 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
2164 .access = PL0_RW,
2165 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2166 .accessfn = pmreg_access,
2167 .writefn = pmcntenclr_write,
2168 .type = ARM_CP_ALIAS },
2169 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
2170 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
2171 .access = PL0_RW, .accessfn = pmreg_access,
2172 .type = ARM_CP_ALIAS,
2173 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
2174 .writefn = pmcntenclr_write },
2175 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
2176 .access = PL0_RW, .type = ARM_CP_IO,
2177 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2178 .accessfn = pmreg_access,
2179 .writefn = pmovsr_write,
2180 .raw_writefn = raw_write },
2181 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2182 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2183 .access = PL0_RW, .accessfn = pmreg_access,
2184 .type = ARM_CP_ALIAS | ARM_CP_IO,
2185 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2186 .writefn = pmovsr_write,
2187 .raw_writefn = raw_write },
2188 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2189 .access = PL0_W, .accessfn = pmreg_access_swinc,
2190 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2191 .writefn = pmswinc_write },
2192 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2193 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2194 .access = PL0_W, .accessfn = pmreg_access_swinc,
2195 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2196 .writefn = pmswinc_write },
2197 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2198 .access = PL0_RW, .type = ARM_CP_ALIAS,
2199 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2200 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2201 .raw_writefn = raw_write},
2202 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2203 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2204 .access = PL0_RW, .accessfn = pmreg_access_selr,
2205 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2206 .writefn = pmselr_write, .raw_writefn = raw_write, },
2207 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2208 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2209 .readfn = pmccntr_read, .writefn = pmccntr_write32,
2210 .accessfn = pmreg_access_ccntr },
2211 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2212 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2213 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2214 .type = ARM_CP_IO,
2215 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2216 .readfn = pmccntr_read, .writefn = pmccntr_write,
2217 .raw_readfn = raw_read, .raw_writefn = raw_write, },
2218 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2219 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2220 .access = PL0_RW, .accessfn = pmreg_access,
2221 .type = ARM_CP_ALIAS | ARM_CP_IO,
2222 .resetvalue = 0, },
2223 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2224 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2225 .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2226 .access = PL0_RW, .accessfn = pmreg_access,
2227 .type = ARM_CP_IO,
2228 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2229 .resetvalue = 0, },
2230 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2231 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2232 .accessfn = pmreg_access,
2233 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2234 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2235 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2236 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2237 .accessfn = pmreg_access,
2238 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2239 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2240 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2241 .accessfn = pmreg_access_xevcntr,
2242 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2243 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2244 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2245 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2246 .accessfn = pmreg_access_xevcntr,
2247 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2248 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2249 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2250 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2251 .resetvalue = 0,
2252 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2253 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2254 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2255 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2256 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2257 .resetvalue = 0,
2258 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2259 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2260 .access = PL1_RW, .accessfn = access_tpm,
2261 .type = ARM_CP_ALIAS | ARM_CP_IO,
2262 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2263 .resetvalue = 0,
2264 .writefn = pmintenset_write, .raw_writefn = raw_write },
2265 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2266 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2267 .access = PL1_RW, .accessfn = access_tpm,
2268 .type = ARM_CP_IO,
2269 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2270 .writefn = pmintenset_write, .raw_writefn = raw_write,
2271 .resetvalue = 0x0 },
2272 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2273 .access = PL1_RW, .accessfn = access_tpm,
2274 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2275 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2276 .writefn = pmintenclr_write, },
2277 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2278 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2279 .access = PL1_RW, .accessfn = access_tpm,
2280 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2281 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2282 .writefn = pmintenclr_write },
2283 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2284 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2285 .access = PL1_R,
2286 .accessfn = access_aa64_tid2,
2287 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2288 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2289 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2290 .access = PL1_RW,
2291 .accessfn = access_aa64_tid2,
2292 .writefn = csselr_write, .resetvalue = 0,
2293 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2294 offsetof(CPUARMState, cp15.csselr_ns) } },
2295 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2296 * just RAZ for all cores:
2297 */
2298 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2299 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2300 .access = PL1_R, .type = ARM_CP_CONST,
2301 .accessfn = access_aa64_tid1,
2302 .resetvalue = 0 },
2303 /* Auxiliary fault status registers: these also are IMPDEF, and we
2304 * choose to RAZ/WI for all cores.
2305 */
2306 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2307 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2308 .access = PL1_RW, .accessfn = access_tvm_trvm,
2309 .type = ARM_CP_CONST, .resetvalue = 0 },
2310 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2311 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2312 .access = PL1_RW, .accessfn = access_tvm_trvm,
2313 .type = ARM_CP_CONST, .resetvalue = 0 },
2314 /* MAIR can just read-as-written because we don't implement caches
2315 * and so don't need to care about memory attributes.
2316 */
2317 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2318 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2319 .access = PL1_RW, .accessfn = access_tvm_trvm,
2320 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2321 .resetvalue = 0 },
2322 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2323 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2324 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2325 .resetvalue = 0 },
2326 /* For non-long-descriptor page tables these are PRRR and NMRR;
2327 * regardless they still act as reads-as-written for QEMU.
2328 */
2329 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2330 * allows them to assign the correct fieldoffset based on the endianness
2331 * handled in the field definitions.
2332 */
2333 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2334 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2335 .access = PL1_RW, .accessfn = access_tvm_trvm,
2336 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2337 offsetof(CPUARMState, cp15.mair0_ns) },
2338 .resetfn = arm_cp_reset_ignore },
2339 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2340 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2341 .access = PL1_RW, .accessfn = access_tvm_trvm,
2342 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2343 offsetof(CPUARMState, cp15.mair1_ns) },
2344 .resetfn = arm_cp_reset_ignore },
2345 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2346 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2347 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2348 /* 32 bit ITLB invalidates */
2349 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2350 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2351 .writefn = tlbiall_write },
2352 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2353 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2354 .writefn = tlbimva_write },
2355 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2356 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2357 .writefn = tlbiasid_write },
2358 /* 32 bit DTLB invalidates */
2359 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2360 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2361 .writefn = tlbiall_write },
2362 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2363 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2364 .writefn = tlbimva_write },
2365 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2366 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2367 .writefn = tlbiasid_write },
2368 /* 32 bit TLB invalidates */
2369 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2370 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2371 .writefn = tlbiall_write },
2372 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2373 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2374 .writefn = tlbimva_write },
2375 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2376 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2377 .writefn = tlbiasid_write },
2378 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2379 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2380 .writefn = tlbimvaa_write },
2381 REGINFO_SENTINEL
2382 };
2383
2384 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2385 /* 32 bit TLB invalidates, Inner Shareable */
2386 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2387 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2388 .writefn = tlbiall_is_write },
2389 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2390 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2391 .writefn = tlbimva_is_write },
2392 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2393 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2394 .writefn = tlbiasid_is_write },
2395 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2396 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2397 .writefn = tlbimvaa_is_write },
2398 REGINFO_SENTINEL
2399 };
2400
2401 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2402 /* PMOVSSET is not implemented in v7 before v7ve */
2403 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2404 .access = PL0_RW, .accessfn = pmreg_access,
2405 .type = ARM_CP_ALIAS | ARM_CP_IO,
2406 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2407 .writefn = pmovsset_write,
2408 .raw_writefn = raw_write },
2409 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2410 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2411 .access = PL0_RW, .accessfn = pmreg_access,
2412 .type = ARM_CP_ALIAS | ARM_CP_IO,
2413 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2414 .writefn = pmovsset_write,
2415 .raw_writefn = raw_write },
2416 REGINFO_SENTINEL
2417 };
2418
2419 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2420 uint64_t value)
2421 {
2422 value &= 1;
2423 env->teecr = value;
2424 }
2425
2426 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2427 bool isread)
2428 {
2429 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2430 return CP_ACCESS_TRAP;
2431 }
2432 return CP_ACCESS_OK;
2433 }
2434
2435 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2436 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2437 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2438 .resetvalue = 0,
2439 .writefn = teecr_write },
2440 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2441 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2442 .accessfn = teehbr_access, .resetvalue = 0 },
2443 REGINFO_SENTINEL
2444 };
2445
2446 static const ARMCPRegInfo v6k_cp_reginfo[] = {
2447 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2448 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2449 .access = PL0_RW,
2450 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2451 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2452 .access = PL0_RW,
2453 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2454 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2455 .resetfn = arm_cp_reset_ignore },
2456 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2457 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2458 .access = PL0_R|PL1_W,
2459 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2460 .resetvalue = 0},
2461 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2462 .access = PL0_R|PL1_W,
2463 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2464 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2465 .resetfn = arm_cp_reset_ignore },
2466 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2467 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2468 .access = PL1_RW,
2469 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2470 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2471 .access = PL1_RW,
2472 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2473 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2474 .resetvalue = 0 },
2475 REGINFO_SENTINEL
2476 };
2477
2478 #ifndef CONFIG_USER_ONLY
2479
2480 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2481 bool isread)
2482 {
2483 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2484 * Writable only at the highest implemented exception level.
2485 */
2486 int el = arm_current_el(env);
2487 uint64_t hcr;
2488 uint32_t cntkctl;
2489
2490 switch (el) {
2491 case 0:
2492 hcr = arm_hcr_el2_eff(env);
2493 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2494 cntkctl = env->cp15.cnthctl_el2;
2495 } else {
2496 cntkctl = env->cp15.c14_cntkctl;
2497 }
2498 if (!extract32(cntkctl, 0, 2)) {
2499 return CP_ACCESS_TRAP;
2500 }
2501 break;
2502 case 1:
2503 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2504 arm_is_secure_below_el3(env)) {
2505 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2506 return CP_ACCESS_TRAP_UNCATEGORIZED;
2507 }
2508 break;
2509 case 2:
2510 case 3:
2511 break;
2512 }
2513
2514 if (!isread && el < arm_highest_el(env)) {
2515 return CP_ACCESS_TRAP_UNCATEGORIZED;
2516 }
2517
2518 return CP_ACCESS_OK;
2519 }
2520
2521 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2522 bool isread)
2523 {
2524 unsigned int cur_el = arm_current_el(env);
2525 bool secure = arm_is_secure(env);
2526 uint64_t hcr = arm_hcr_el2_eff(env);
2527
2528 switch (cur_el) {
2529 case 0:
2530 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2531 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2532 return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2533 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2534 }
2535
2536 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2537 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2538 return CP_ACCESS_TRAP;
2539 }
2540
2541 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2542 if (hcr & HCR_E2H) {
2543 if (timeridx == GTIMER_PHYS &&
2544 !extract32(env->cp15.cnthctl_el2, 10, 1)) {
2545 return CP_ACCESS_TRAP_EL2;
2546 }
2547 } else {
2548 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2549 if (arm_feature(env, ARM_FEATURE_EL2) &&
2550 timeridx == GTIMER_PHYS && !secure &&
2551 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2552 return CP_ACCESS_TRAP_EL2;
2553 }
2554 }
2555 break;
2556
2557 case 1:
2558 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2559 if (arm_feature(env, ARM_FEATURE_EL2) &&
2560 timeridx == GTIMER_PHYS && !secure &&
2561 (hcr & HCR_E2H
2562 ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2563 : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2564 return CP_ACCESS_TRAP_EL2;
2565 }
2566 break;
2567 }
2568 return CP_ACCESS_OK;
2569 }
2570
2571 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2572 bool isread)
2573 {
2574 unsigned int cur_el = arm_current_el(env);
2575 bool secure = arm_is_secure(env);
2576 uint64_t hcr = arm_hcr_el2_eff(env);
2577
2578 switch (cur_el) {
2579 case 0:
2580 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2581 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2582 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2583 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2584 }
2585
2586 /*
2587 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2588 * EL0 if EL0[PV]TEN is zero.
2589 */
2590 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2591 return CP_ACCESS_TRAP;
2592 }
2593 /* fall through */
2594
2595 case 1:
2596 if (arm_feature(env, ARM_FEATURE_EL2) &&
2597 timeridx == GTIMER_PHYS && !secure) {
2598 if (hcr & HCR_E2H) {
2599 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2600 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2601 return CP_ACCESS_TRAP_EL2;
2602 }
2603 } else {
2604 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2605 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2606 return CP_ACCESS_TRAP_EL2;
2607 }
2608 }
2609 }
2610 break;
2611 }
2612 return CP_ACCESS_OK;
2613 }
2614
2615 static CPAccessResult gt_pct_access(CPUARMState *env,
2616 const ARMCPRegInfo *ri,
2617 bool isread)
2618 {
2619 return gt_counter_access(env, GTIMER_PHYS, isread);
2620 }
2621
2622 static CPAccessResult gt_vct_access(CPUARMState *env,
2623 const ARMCPRegInfo *ri,
2624 bool isread)
2625 {
2626 return gt_counter_access(env, GTIMER_VIRT, isread);
2627 }
2628
2629 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2630 bool isread)
2631 {
2632 return gt_timer_access(env, GTIMER_PHYS, isread);
2633 }
2634
2635 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2636 bool isread)
2637 {
2638 return gt_timer_access(env, GTIMER_VIRT, isread);
2639 }
2640
2641 static CPAccessResult gt_stimer_access(CPUARMState *env,
2642 const ARMCPRegInfo *ri,
2643 bool isread)
2644 {
2645 /* The AArch64 register view of the secure physical timer is
2646 * always accessible from EL3, and configurably accessible from
2647 * Secure EL1.
2648 */
2649 switch (arm_current_el(env)) {
2650 case 1:
2651 if (!arm_is_secure(env)) {
2652 return CP_ACCESS_TRAP;
2653 }
2654 if (!(env->cp15.scr_el3 & SCR_ST)) {
2655 return CP_ACCESS_TRAP_EL3;
2656 }
2657 return CP_ACCESS_OK;
2658 case 0:
2659 case 2:
2660 return CP_ACCESS_TRAP;
2661 case 3:
2662 return CP_ACCESS_OK;
2663 default:
2664 g_assert_not_reached();
2665 }
2666 }
2667
2668 static uint64_t gt_get_countervalue(CPUARMState *env)
2669 {
2670 ARMCPU *cpu = env_archcpu(env);
2671
2672 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
2673 }
2674
2675 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2676 {
2677 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2678
2679 if (gt->ctl & 1) {
2680 /* Timer enabled: calculate and set current ISTATUS, irq, and
2681 * reset timer to when ISTATUS next has to change
2682 */
2683 uint64_t offset = timeridx == GTIMER_VIRT ?
2684 cpu->env.cp15.cntvoff_el2 : 0;
2685 uint64_t count = gt_get_countervalue(&cpu->env);
2686 /* Note that this must be unsigned 64 bit arithmetic: */
2687 int istatus = count - offset >= gt->cval;
2688 uint64_t nexttick;
2689 int irqstate;
2690
2691 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2692
2693 irqstate = (istatus && !(gt->ctl & 2));
2694 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2695
2696 if (istatus) {
2697 /* Next transition is when count rolls back over to zero */
2698 nexttick = UINT64_MAX;
2699 } else {
2700 /* Next transition is when we hit cval */
2701 nexttick = gt->cval + offset;
2702 }
2703 /* Note that the desired next expiry time might be beyond the
2704 * signed-64-bit range of a QEMUTimer -- in this case we just
2705 * set the timer for as far in the future as possible. When the
2706 * timer expires we will reset the timer for any remaining period.
2707 */
2708 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
2709 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2710 } else {
2711 timer_mod(cpu->gt_timer[timeridx], nexttick);
2712 }
2713 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2714 } else {
2715 /* Timer disabled: ISTATUS and timer output always clear */
2716 gt->ctl &= ~4;
2717 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2718 timer_del(cpu->gt_timer[timeridx]);
2719 trace_arm_gt_recalc_disabled(timeridx);
2720 }
2721 }
2722
2723 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2724 int timeridx)
2725 {
2726 ARMCPU *cpu = env_archcpu(env);
2727
2728 timer_del(cpu->gt_timer[timeridx]);
2729 }
2730
2731 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2732 {
2733 return gt_get_countervalue(env);
2734 }
2735
2736 static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2737 {
2738 uint64_t hcr;
2739
2740 switch (arm_current_el(env)) {
2741 case 2:
2742 hcr = arm_hcr_el2_eff(env);
2743 if (hcr & HCR_E2H) {
2744 return 0;
2745 }
2746 break;
2747 case 0:
2748 hcr = arm_hcr_el2_eff(env);
2749 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2750 return 0;
2751 }
2752 break;
2753 }
2754
2755 return env->cp15.cntvoff_el2;
2756 }
2757
2758 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2759 {
2760 return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
2761 }
2762
2763 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2764 int timeridx,
2765 uint64_t value)
2766 {
2767 trace_arm_gt_cval_write(timeridx, value);
2768 env->cp15.c14_timer[timeridx].cval = value;
2769 gt_recalc_timer(env_archcpu(env), timeridx);
2770 }
2771
2772 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2773 int timeridx)
2774 {
2775 uint64_t offset = 0;
2776
2777 switch (timeridx) {
2778 case GTIMER_VIRT:
2779 case GTIMER_HYPVIRT:
2780 offset = gt_virt_cnt_offset(env);
2781 break;
2782 }
2783
2784 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2785 (gt_get_countervalue(env) - offset));
2786 }
2787
2788 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2789 int timeridx,
2790 uint64_t value)
2791 {
2792 uint64_t offset = 0;
2793
2794 switch (timeridx) {
2795 case GTIMER_VIRT:
2796 case GTIMER_HYPVIRT:
2797 offset = gt_virt_cnt_offset(env);
2798 break;
2799 }
2800
2801 trace_arm_gt_tval_write(timeridx, value);
2802 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2803 sextract64(value, 0, 32);
2804 gt_recalc_timer(env_archcpu(env), timeridx);
2805 }
2806
2807 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2808 int timeridx,
2809 uint64_t value)
2810 {
2811 ARMCPU *cpu = env_archcpu(env);
2812 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2813
2814 trace_arm_gt_ctl_write(timeridx, value);
2815 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2816 if ((oldval ^ value) & 1) {
2817 /* Enable toggled */
2818 gt_recalc_timer(cpu, timeridx);
2819 } else if ((oldval ^ value) & 2) {
2820 /* IMASK toggled: don't need to recalculate,
2821 * just set the interrupt line based on ISTATUS
2822 */
2823 int irqstate = (oldval & 4) && !(value & 2);
2824
2825 trace_arm_gt_imask_toggle(timeridx, irqstate);
2826 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2827 }
2828 }
2829
2830 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2831 {
2832 gt_timer_reset(env, ri, GTIMER_PHYS);
2833 }
2834
2835 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2836 uint64_t value)
2837 {
2838 gt_cval_write(env, ri, GTIMER_PHYS, value);
2839 }
2840
2841 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2842 {
2843 return gt_tval_read(env, ri, GTIMER_PHYS);
2844 }
2845
2846 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2847 uint64_t value)
2848 {
2849 gt_tval_write(env, ri, GTIMER_PHYS, value);
2850 }
2851
2852 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2853 uint64_t value)
2854 {
2855 gt_ctl_write(env, ri, GTIMER_PHYS, value);
2856 }
2857
2858 static int gt_phys_redir_timeridx(CPUARMState *env)
2859 {
2860 switch (arm_mmu_idx(env)) {
2861 case ARMMMUIdx_E20_0:
2862 case ARMMMUIdx_E20_2:
2863 case ARMMMUIdx_E20_2_PAN:
2864 return GTIMER_HYP;
2865 default:
2866 return GTIMER_PHYS;
2867 }
2868 }
2869
2870 static int gt_virt_redir_timeridx(CPUARMState *env)
2871 {
2872 switch (arm_mmu_idx(env)) {
2873 case ARMMMUIdx_E20_0:
2874 case ARMMMUIdx_E20_2:
2875 case ARMMMUIdx_E20_2_PAN:
2876 return GTIMER_HYPVIRT;
2877 default:
2878 return GTIMER_VIRT;
2879 }
2880 }
2881
2882 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2883 const ARMCPRegInfo *ri)
2884 {
2885 int timeridx = gt_phys_redir_timeridx(env);
2886 return env->cp15.c14_timer[timeridx].cval;
2887 }
2888
2889 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2890 uint64_t value)
2891 {
2892 int timeridx = gt_phys_redir_timeridx(env);
2893 gt_cval_write(env, ri, timeridx, value);
2894 }
2895
2896 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2897 const ARMCPRegInfo *ri)
2898 {
2899 int timeridx = gt_phys_redir_timeridx(env);
2900 return gt_tval_read(env, ri, timeridx);
2901 }
2902
2903 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2904 uint64_t value)
2905 {
2906 int timeridx = gt_phys_redir_timeridx(env);
2907 gt_tval_write(env, ri, timeridx, value);
2908 }
2909
2910 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2911 const ARMCPRegInfo *ri)
2912 {
2913 int timeridx = gt_phys_redir_timeridx(env);
2914 return env->cp15.c14_timer[timeridx].ctl;
2915 }
2916
2917 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2918 uint64_t value)
2919 {
2920 int timeridx = gt_phys_redir_timeridx(env);
2921 gt_ctl_write(env, ri, timeridx, value);
2922 }
2923
2924 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2925 {
2926 gt_timer_reset(env, ri, GTIMER_VIRT);
2927 }
2928
2929 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2930 uint64_t value)
2931 {
2932 gt_cval_write(env, ri, GTIMER_VIRT, value);
2933 }
2934
2935 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2936 {
2937 return gt_tval_read(env, ri, GTIMER_VIRT);
2938 }
2939
2940 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2941 uint64_t value)
2942 {
2943 gt_tval_write(env, ri, GTIMER_VIRT, value);
2944 }
2945
2946 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2947 uint64_t value)
2948 {
2949 gt_ctl_write(env, ri, GTIMER_VIRT, value);
2950 }
2951
2952 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2953 uint64_t value)
2954 {
2955 ARMCPU *cpu = env_archcpu(env);
2956
2957 trace_arm_gt_cntvoff_write(value);
2958 raw_write(env, ri, value);
2959 gt_recalc_timer(cpu, GTIMER_VIRT);
2960 }
2961
2962 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
2963 const ARMCPRegInfo *ri)
2964 {
2965 int timeridx = gt_virt_redir_timeridx(env);
2966 return env->cp15.c14_timer[timeridx].cval;
2967 }
2968
2969 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2970 uint64_t value)
2971 {
2972 int timeridx = gt_virt_redir_timeridx(env);
2973 gt_cval_write(env, ri, timeridx, value);
2974 }
2975
2976 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
2977 const ARMCPRegInfo *ri)
2978 {
2979 int timeridx = gt_virt_redir_timeridx(env);
2980 return gt_tval_read(env, ri, timeridx);
2981 }
2982
2983 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2984 uint64_t value)
2985 {
2986 int timeridx = gt_virt_redir_timeridx(env);
2987 gt_tval_write(env, ri, timeridx, value);
2988 }
2989
2990 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
2991 const ARMCPRegInfo *ri)
2992 {
2993 int timeridx = gt_virt_redir_timeridx(env);
2994 return env->cp15.c14_timer[timeridx].ctl;
2995 }
2996
2997 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2998 uint64_t value)
2999 {
3000 int timeridx = gt_virt_redir_timeridx(env);
3001 gt_ctl_write(env, ri, timeridx, value);
3002 }
3003
3004 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3005 {
3006 gt_timer_reset(env, ri, GTIMER_HYP);
3007 }
3008
3009 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3010 uint64_t value)
3011 {
3012 gt_cval_write(env, ri, GTIMER_HYP, value);
3013 }
3014
3015 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3016 {
3017 return gt_tval_read(env, ri, GTIMER_HYP);
3018 }
3019
3020 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3021 uint64_t value)
3022 {
3023 gt_tval_write(env, ri, GTIMER_HYP, value);
3024 }
3025
3026 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3027 uint64_t value)
3028 {
3029 gt_ctl_write(env, ri, GTIMER_HYP, value);
3030 }
3031
3032 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3033 {
3034 gt_timer_reset(env, ri, GTIMER_SEC);
3035 }
3036
3037 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3038 uint64_t value)
3039 {
3040 gt_cval_write(env, ri, GTIMER_SEC, value);
3041 }
3042
3043 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3044 {
3045 return gt_tval_read(env, ri, GTIMER_SEC);
3046 }
3047
3048 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3049 uint64_t value)
3050 {
3051 gt_tval_write(env, ri, GTIMER_SEC, value);
3052 }
3053
3054 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3055 uint64_t value)
3056 {
3057 gt_ctl_write(env, ri, GTIMER_SEC, value);
3058 }
3059
3060 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3061 {
3062 gt_timer_reset(env, ri, GTIMER_HYPVIRT);
3063 }
3064
3065 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3066 uint64_t value)
3067 {
3068 gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
3069 }
3070
3071 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3072 {
3073 return gt_tval_read(env, ri, GTIMER_HYPVIRT);
3074 }
3075
3076 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3077 uint64_t value)
3078 {
3079 gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
3080 }
3081
3082 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3083 uint64_t value)
3084 {
3085 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
3086 }
3087
3088 void arm_gt_ptimer_cb(void *opaque)
3089 {
3090 ARMCPU *cpu = opaque;
3091
3092 gt_recalc_timer(cpu, GTIMER_PHYS);
3093 }
3094
3095 void arm_gt_vtimer_cb(void *opaque)
3096 {
3097 ARMCPU *cpu = opaque;
3098
3099 gt_recalc_timer(cpu, GTIMER_VIRT);
3100 }
3101
3102 void arm_gt_htimer_cb(void *opaque)
3103 {
3104 ARMCPU *cpu = opaque;
3105
3106 gt_recalc_timer(cpu, GTIMER_HYP);
3107 }
3108
3109 void arm_gt_stimer_cb(void *opaque)
3110 {
3111 ARMCPU *cpu = opaque;
3112
3113 gt_recalc_timer(cpu, GTIMER_SEC);
3114 }
3115
3116 void arm_gt_hvtimer_cb(void *opaque)
3117 {
3118 ARMCPU *cpu = opaque;
3119
3120 gt_recalc_timer(cpu, GTIMER_HYPVIRT);
3121 }
3122
3123 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
3124 {
3125 ARMCPU *cpu = env_archcpu(env);
3126
3127 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
3128 }
3129
3130 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3131 /* Note that CNTFRQ is purely reads-as-written for the benefit
3132 * of software; writing it doesn't actually change the timer frequency.
3133 * Our reset value matches the fixed frequency we implement the timer at.
3134 */
3135 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
3136 .type = ARM_CP_ALIAS,
3137 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3138 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
3139 },