target/arm: fix stage 2 page-walks in 32-bit emulation
[qemu.git] / target / arm / helper.c
1 /*
2 * ARM generic helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
12 #include "trace.h"
13 #include "cpu.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
24 #include "hw/irq.h"
25 #include "hw/semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/cpu-timers.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/tcg.h"
30 #include "qemu/range.h"
31 #include "qapi/qapi-commands-machine-target.h"
32 #include "qapi/error.h"
33 #include "qemu/guest-random.h"
34 #ifdef CONFIG_TCG
35 #include "arm_ldst.h"
36 #include "exec/cpu_ldst.h"
37 #endif
38
39 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
40
41 #ifndef CONFIG_USER_ONLY
42
43 static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
44 MMUAccessType access_type, ARMMMUIdx mmu_idx,
45 bool s1_is_el0,
46 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
47 target_ulong *page_size_ptr,
48 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
49 __attribute__((nonnull));
50 #endif
51
52 static void switch_mode(CPUARMState *env, int mode);
53 static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
54
55 static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
56 {
57 ARMCPU *cpu = env_archcpu(env);
58 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
59
60 /* VFP data registers are always little-endian. */
61 if (reg < nregs) {
62 return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg));
63 }
64 if (arm_feature(env, ARM_FEATURE_NEON)) {
65 /* Aliases for Q regs. */
66 nregs += 16;
67 if (reg < nregs) {
68 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
69 return gdb_get_reg128(buf, q[0], q[1]);
70 }
71 }
72 switch (reg - nregs) {
73 case 0: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); break;
74 case 1: return gdb_get_reg32(buf, vfp_get_fpscr(env)); break;
75 case 2: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]); break;
76 }
77 return 0;
78 }
79
80 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
81 {
82 ARMCPU *cpu = env_archcpu(env);
83 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
84
85 if (reg < nregs) {
86 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
87 return 8;
88 }
89 if (arm_feature(env, ARM_FEATURE_NEON)) {
90 nregs += 16;
91 if (reg < nregs) {
92 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
93 q[0] = ldq_le_p(buf);
94 q[1] = ldq_le_p(buf + 8);
95 return 16;
96 }
97 }
98 switch (reg - nregs) {
99 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
100 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
101 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
102 }
103 return 0;
104 }
105
106 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
107 {
108 switch (reg) {
109 case 0 ... 31:
110 {
111 /* 128 bit FP register - quads are in LE order */
112 uint64_t *q = aa64_vfp_qreg(env, reg);
113 return gdb_get_reg128(buf, q[1], q[0]);
114 }
115 case 32:
116 /* FPSR */
117 return gdb_get_reg32(buf, vfp_get_fpsr(env));
118 case 33:
119 /* FPCR */
120 return gdb_get_reg32(buf,vfp_get_fpcr(env));
121 default:
122 return 0;
123 }
124 }
125
126 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
127 {
128 switch (reg) {
129 case 0 ... 31:
130 /* 128 bit FP register */
131 {
132 uint64_t *q = aa64_vfp_qreg(env, reg);
133 q[0] = ldq_le_p(buf);
134 q[1] = ldq_le_p(buf + 8);
135 return 16;
136 }
137 case 32:
138 /* FPSR */
139 vfp_set_fpsr(env, ldl_p(buf));
140 return 4;
141 case 33:
142 /* FPCR */
143 vfp_set_fpcr(env, ldl_p(buf));
144 return 4;
145 default:
146 return 0;
147 }
148 }
149
150 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
151 {
152 assert(ri->fieldoffset);
153 if (cpreg_field_is_64bit(ri)) {
154 return CPREG_FIELD64(env, ri);
155 } else {
156 return CPREG_FIELD32(env, ri);
157 }
158 }
159
160 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
161 uint64_t value)
162 {
163 assert(ri->fieldoffset);
164 if (cpreg_field_is_64bit(ri)) {
165 CPREG_FIELD64(env, ri) = value;
166 } else {
167 CPREG_FIELD32(env, ri) = value;
168 }
169 }
170
171 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
172 {
173 return (char *)env + ri->fieldoffset;
174 }
175
176 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
177 {
178 /* Raw read of a coprocessor register (as needed for migration, etc). */
179 if (ri->type & ARM_CP_CONST) {
180 return ri->resetvalue;
181 } else if (ri->raw_readfn) {
182 return ri->raw_readfn(env, ri);
183 } else if (ri->readfn) {
184 return ri->readfn(env, ri);
185 } else {
186 return raw_read(env, ri);
187 }
188 }
189
190 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
191 uint64_t v)
192 {
193 /* Raw write of a coprocessor register (as needed for migration, etc).
194 * Note that constant registers are treated as write-ignored; the
195 * caller should check for success by whether a readback gives the
196 * value written.
197 */
198 if (ri->type & ARM_CP_CONST) {
199 return;
200 } else if (ri->raw_writefn) {
201 ri->raw_writefn(env, ri, v);
202 } else if (ri->writefn) {
203 ri->writefn(env, ri, v);
204 } else {
205 raw_write(env, ri, v);
206 }
207 }
208
209 /**
210 * arm_get/set_gdb_*: get/set a gdb register
211 * @env: the CPU state
212 * @buf: a buffer to copy to/from
213 * @reg: register number (offset from start of group)
214 *
215 * We return the number of bytes copied
216 */
217
218 static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
219 {
220 ARMCPU *cpu = env_archcpu(env);
221 const ARMCPRegInfo *ri;
222 uint32_t key;
223
224 key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg];
225 ri = get_arm_cp_reginfo(cpu->cp_regs, key);
226 if (ri) {
227 if (cpreg_field_is_64bit(ri)) {
228 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
229 } else {
230 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
231 }
232 }
233 return 0;
234 }
235
236 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
237 {
238 return 0;
239 }
240
241 #ifdef TARGET_AARCH64
242 static int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg)
243 {
244 ARMCPU *cpu = env_archcpu(env);
245
246 switch (reg) {
247 /* The first 32 registers are the zregs */
248 case 0 ... 31:
249 {
250 int vq, len = 0;
251 for (vq = 0; vq < cpu->sve_max_vq; vq++) {
252 len += gdb_get_reg128(buf,
253 env->vfp.zregs[reg].d[vq * 2 + 1],
254 env->vfp.zregs[reg].d[vq * 2]);
255 }
256 return len;
257 }
258 case 32:
259 return gdb_get_reg32(buf, vfp_get_fpsr(env));
260 case 33:
261 return gdb_get_reg32(buf, vfp_get_fpcr(env));
262 /* then 16 predicates and the ffr */
263 case 34 ... 50:
264 {
265 int preg = reg - 34;
266 int vq, len = 0;
267 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
268 len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]);
269 }
270 return len;
271 }
272 case 51:
273 {
274 /*
275 * We report in Vector Granules (VG) which is 64bit in a Z reg
276 * while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
277 */
278 int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1;
279 return gdb_get_reg32(buf, vq * 2);
280 }
281 default:
282 /* gdbstub asked for something out our range */
283 qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg);
284 break;
285 }
286
287 return 0;
288 }
289
290 static int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg)
291 {
292 ARMCPU *cpu = env_archcpu(env);
293
294 /* The first 32 registers are the zregs */
295 switch (reg) {
296 /* The first 32 registers are the zregs */
297 case 0 ... 31:
298 {
299 int vq, len = 0;
300 uint64_t *p = (uint64_t *) buf;
301 for (vq = 0; vq < cpu->sve_max_vq; vq++) {
302 env->vfp.zregs[reg].d[vq * 2 + 1] = *p++;
303 env->vfp.zregs[reg].d[vq * 2] = *p++;
304 len += 16;
305 }
306 return len;
307 }
308 case 32:
309 vfp_set_fpsr(env, *(uint32_t *)buf);
310 return 4;
311 case 33:
312 vfp_set_fpcr(env, *(uint32_t *)buf);
313 return 4;
314 case 34 ... 50:
315 {
316 int preg = reg - 34;
317 int vq, len = 0;
318 uint64_t *p = (uint64_t *) buf;
319 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
320 env->vfp.pregs[preg].p[vq / 4] = *p++;
321 len += 8;
322 }
323 return len;
324 }
325 case 51:
326 /* cannot set vg via gdbstub */
327 return 0;
328 default:
329 /* gdbstub asked for something out our range */
330 break;
331 }
332
333 return 0;
334 }
335 #endif /* TARGET_AARCH64 */
336
337 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
338 {
339 /* Return true if the regdef would cause an assertion if you called
340 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
341 * program bug for it not to have the NO_RAW flag).
342 * NB that returning false here doesn't necessarily mean that calling
343 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
344 * read/write access functions which are safe for raw use" from "has
345 * read/write access functions which have side effects but has forgotten
346 * to provide raw access functions".
347 * The tests here line up with the conditions in read/write_raw_cp_reg()
348 * and assertions in raw_read()/raw_write().
349 */
350 if ((ri->type & ARM_CP_CONST) ||
351 ri->fieldoffset ||
352 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
353 return false;
354 }
355 return true;
356 }
357
358 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
359 {
360 /* Write the coprocessor state from cpu->env to the (index,value) list. */
361 int i;
362 bool ok = true;
363
364 for (i = 0; i < cpu->cpreg_array_len; i++) {
365 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
366 const ARMCPRegInfo *ri;
367 uint64_t newval;
368
369 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
370 if (!ri) {
371 ok = false;
372 continue;
373 }
374 if (ri->type & ARM_CP_NO_RAW) {
375 continue;
376 }
377
378 newval = read_raw_cp_reg(&cpu->env, ri);
379 if (kvm_sync) {
380 /*
381 * Only sync if the previous list->cpustate sync succeeded.
382 * Rather than tracking the success/failure state for every
383 * item in the list, we just recheck "does the raw write we must
384 * have made in write_list_to_cpustate() read back OK" here.
385 */
386 uint64_t oldval = cpu->cpreg_values[i];
387
388 if (oldval == newval) {
389 continue;
390 }
391
392 write_raw_cp_reg(&cpu->env, ri, oldval);
393 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
394 continue;
395 }
396
397 write_raw_cp_reg(&cpu->env, ri, newval);
398 }
399 cpu->cpreg_values[i] = newval;
400 }
401 return ok;
402 }
403
404 bool write_list_to_cpustate(ARMCPU *cpu)
405 {
406 int i;
407 bool ok = true;
408
409 for (i = 0; i < cpu->cpreg_array_len; i++) {
410 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
411 uint64_t v = cpu->cpreg_values[i];
412 const ARMCPRegInfo *ri;
413
414 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
415 if (!ri) {
416 ok = false;
417 continue;
418 }
419 if (ri->type & ARM_CP_NO_RAW) {
420 continue;
421 }
422 /* Write value and confirm it reads back as written
423 * (to catch read-only registers and partially read-only
424 * registers where the incoming migration value doesn't match)
425 */
426 write_raw_cp_reg(&cpu->env, ri, v);
427 if (read_raw_cp_reg(&cpu->env, ri) != v) {
428 ok = false;
429 }
430 }
431 return ok;
432 }
433
434 static void add_cpreg_to_list(gpointer key, gpointer opaque)
435 {
436 ARMCPU *cpu = opaque;
437 uint64_t regidx;
438 const ARMCPRegInfo *ri;
439
440 regidx = *(uint32_t *)key;
441 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
442
443 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
444 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
445 /* The value array need not be initialized at this point */
446 cpu->cpreg_array_len++;
447 }
448 }
449
450 static void count_cpreg(gpointer key, gpointer opaque)
451 {
452 ARMCPU *cpu = opaque;
453 uint64_t regidx;
454 const ARMCPRegInfo *ri;
455
456 regidx = *(uint32_t *)key;
457 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
458
459 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
460 cpu->cpreg_array_len++;
461 }
462 }
463
464 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
465 {
466 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
467 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
468
469 if (aidx > bidx) {
470 return 1;
471 }
472 if (aidx < bidx) {
473 return -1;
474 }
475 return 0;
476 }
477
478 void init_cpreg_list(ARMCPU *cpu)
479 {
480 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
481 * Note that we require cpreg_tuples[] to be sorted by key ID.
482 */
483 GList *keys;
484 int arraylen;
485
486 keys = g_hash_table_get_keys(cpu->cp_regs);
487 keys = g_list_sort(keys, cpreg_key_compare);
488
489 cpu->cpreg_array_len = 0;
490
491 g_list_foreach(keys, count_cpreg, cpu);
492
493 arraylen = cpu->cpreg_array_len;
494 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
495 cpu->cpreg_values = g_new(uint64_t, arraylen);
496 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
497 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
498 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
499 cpu->cpreg_array_len = 0;
500
501 g_list_foreach(keys, add_cpreg_to_list, cpu);
502
503 assert(cpu->cpreg_array_len == arraylen);
504
505 g_list_free(keys);
506 }
507
508 /*
509 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
510 */
511 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
512 const ARMCPRegInfo *ri,
513 bool isread)
514 {
515 if (!is_a64(env) && arm_current_el(env) == 3 &&
516 arm_is_secure_below_el3(env)) {
517 return CP_ACCESS_TRAP_UNCATEGORIZED;
518 }
519 return CP_ACCESS_OK;
520 }
521
522 /* Some secure-only AArch32 registers trap to EL3 if used from
523 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
524 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
525 * We assume that the .access field is set to PL1_RW.
526 */
527 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
528 const ARMCPRegInfo *ri,
529 bool isread)
530 {
531 if (arm_current_el(env) == 3) {
532 return CP_ACCESS_OK;
533 }
534 if (arm_is_secure_below_el3(env)) {
535 return CP_ACCESS_TRAP_EL3;
536 }
537 /* This will be EL1 NS and EL2 NS, which just UNDEF */
538 return CP_ACCESS_TRAP_UNCATEGORIZED;
539 }
540
541 /* Check for traps to "powerdown debug" registers, which are controlled
542 * by MDCR.TDOSA
543 */
544 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
545 bool isread)
546 {
547 int el = arm_current_el(env);
548 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
549 (env->cp15.mdcr_el2 & MDCR_TDE) ||
550 (arm_hcr_el2_eff(env) & HCR_TGE);
551
552 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
553 return CP_ACCESS_TRAP_EL2;
554 }
555 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
556 return CP_ACCESS_TRAP_EL3;
557 }
558 return CP_ACCESS_OK;
559 }
560
561 /* Check for traps to "debug ROM" registers, which are controlled
562 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
563 */
564 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
565 bool isread)
566 {
567 int el = arm_current_el(env);
568 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
569 (env->cp15.mdcr_el2 & MDCR_TDE) ||
570 (arm_hcr_el2_eff(env) & HCR_TGE);
571
572 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
573 return CP_ACCESS_TRAP_EL2;
574 }
575 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
576 return CP_ACCESS_TRAP_EL3;
577 }
578 return CP_ACCESS_OK;
579 }
580
581 /* Check for traps to general debug registers, which are controlled
582 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
583 */
584 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
585 bool isread)
586 {
587 int el = arm_current_el(env);
588 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
589 (env->cp15.mdcr_el2 & MDCR_TDE) ||
590 (arm_hcr_el2_eff(env) & HCR_TGE);
591
592 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
593 return CP_ACCESS_TRAP_EL2;
594 }
595 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
596 return CP_ACCESS_TRAP_EL3;
597 }
598 return CP_ACCESS_OK;
599 }
600
601 /* Check for traps to performance monitor registers, which are controlled
602 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
603 */
604 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
605 bool isread)
606 {
607 int el = arm_current_el(env);
608
609 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
610 && !arm_is_secure_below_el3(env)) {
611 return CP_ACCESS_TRAP_EL2;
612 }
613 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
614 return CP_ACCESS_TRAP_EL3;
615 }
616 return CP_ACCESS_OK;
617 }
618
619 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
620 static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
621 bool isread)
622 {
623 if (arm_current_el(env) == 1) {
624 uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
625 if (arm_hcr_el2_eff(env) & trap) {
626 return CP_ACCESS_TRAP_EL2;
627 }
628 }
629 return CP_ACCESS_OK;
630 }
631
632 /* Check for traps from EL1 due to HCR_EL2.TSW. */
633 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
634 bool isread)
635 {
636 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
637 return CP_ACCESS_TRAP_EL2;
638 }
639 return CP_ACCESS_OK;
640 }
641
642 /* Check for traps from EL1 due to HCR_EL2.TACR. */
643 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
644 bool isread)
645 {
646 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
647 return CP_ACCESS_TRAP_EL2;
648 }
649 return CP_ACCESS_OK;
650 }
651
652 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
653 static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
654 bool isread)
655 {
656 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
657 return CP_ACCESS_TRAP_EL2;
658 }
659 return CP_ACCESS_OK;
660 }
661
662 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
663 {
664 ARMCPU *cpu = env_archcpu(env);
665
666 raw_write(env, ri, value);
667 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
668 }
669
670 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
671 {
672 ARMCPU *cpu = env_archcpu(env);
673
674 if (raw_read(env, ri) != value) {
675 /* Unlike real hardware the qemu TLB uses virtual addresses,
676 * not modified virtual addresses, so this causes a TLB flush.
677 */
678 tlb_flush(CPU(cpu));
679 raw_write(env, ri, value);
680 }
681 }
682
683 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
684 uint64_t value)
685 {
686 ARMCPU *cpu = env_archcpu(env);
687
688 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
689 && !extended_addresses_enabled(env)) {
690 /* For VMSA (when not using the LPAE long descriptor page table
691 * format) this register includes the ASID, so do a TLB flush.
692 * For PMSA it is purely a process ID and no action is needed.
693 */
694 tlb_flush(CPU(cpu));
695 }
696 raw_write(env, ri, value);
697 }
698
699 /* IS variants of TLB operations must affect all cores */
700 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
701 uint64_t value)
702 {
703 CPUState *cs = env_cpu(env);
704
705 tlb_flush_all_cpus_synced(cs);
706 }
707
708 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
709 uint64_t value)
710 {
711 CPUState *cs = env_cpu(env);
712
713 tlb_flush_all_cpus_synced(cs);
714 }
715
716 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
717 uint64_t value)
718 {
719 CPUState *cs = env_cpu(env);
720
721 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
722 }
723
724 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
725 uint64_t value)
726 {
727 CPUState *cs = env_cpu(env);
728
729 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
730 }
731
732 /*
733 * Non-IS variants of TLB operations are upgraded to
734 * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
735 * force broadcast of these operations.
736 */
737 static bool tlb_force_broadcast(CPUARMState *env)
738 {
739 return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
740 }
741
742 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
743 uint64_t value)
744 {
745 /* Invalidate all (TLBIALL) */
746 CPUState *cs = env_cpu(env);
747
748 if (tlb_force_broadcast(env)) {
749 tlb_flush_all_cpus_synced(cs);
750 } else {
751 tlb_flush(cs);
752 }
753 }
754
755 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
756 uint64_t value)
757 {
758 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
759 CPUState *cs = env_cpu(env);
760
761 value &= TARGET_PAGE_MASK;
762 if (tlb_force_broadcast(env)) {
763 tlb_flush_page_all_cpus_synced(cs, value);
764 } else {
765 tlb_flush_page(cs, value);
766 }
767 }
768
769 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
770 uint64_t value)
771 {
772 /* Invalidate by ASID (TLBIASID) */
773 CPUState *cs = env_cpu(env);
774
775 if (tlb_force_broadcast(env)) {
776 tlb_flush_all_cpus_synced(cs);
777 } else {
778 tlb_flush(cs);
779 }
780 }
781
782 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
783 uint64_t value)
784 {
785 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
786 CPUState *cs = env_cpu(env);
787
788 value &= TARGET_PAGE_MASK;
789 if (tlb_force_broadcast(env)) {
790 tlb_flush_page_all_cpus_synced(cs, value);
791 } else {
792 tlb_flush_page(cs, value);
793 }
794 }
795
796 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
797 uint64_t value)
798 {
799 CPUState *cs = env_cpu(env);
800
801 tlb_flush_by_mmuidx(cs,
802 ARMMMUIdxBit_E10_1 |
803 ARMMMUIdxBit_E10_1_PAN |
804 ARMMMUIdxBit_E10_0);
805 }
806
807 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
808 uint64_t value)
809 {
810 CPUState *cs = env_cpu(env);
811
812 tlb_flush_by_mmuidx_all_cpus_synced(cs,
813 ARMMMUIdxBit_E10_1 |
814 ARMMMUIdxBit_E10_1_PAN |
815 ARMMMUIdxBit_E10_0);
816 }
817
818
819 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
820 uint64_t value)
821 {
822 CPUState *cs = env_cpu(env);
823
824 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
825 }
826
827 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
828 uint64_t value)
829 {
830 CPUState *cs = env_cpu(env);
831
832 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
833 }
834
835 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
836 uint64_t value)
837 {
838 CPUState *cs = env_cpu(env);
839 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
840
841 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
842 }
843
844 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
845 uint64_t value)
846 {
847 CPUState *cs = env_cpu(env);
848 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
849
850 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
851 ARMMMUIdxBit_E2);
852 }
853
854 static const ARMCPRegInfo cp_reginfo[] = {
855 /* Define the secure and non-secure FCSE identifier CP registers
856 * separately because there is no secure bank in V8 (no _EL3). This allows
857 * the secure register to be properly reset and migrated. There is also no
858 * v8 EL1 version of the register so the non-secure instance stands alone.
859 */
860 { .name = "FCSEIDR",
861 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
862 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
863 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
864 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
865 { .name = "FCSEIDR_S",
866 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
867 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
868 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
869 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
870 /* Define the secure and non-secure context identifier CP registers
871 * separately because there is no secure bank in V8 (no _EL3). This allows
872 * the secure register to be properly reset and migrated. In the
873 * non-secure case, the 32-bit register will have reset and migration
874 * disabled during registration as it is handled by the 64-bit instance.
875 */
876 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
877 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
878 .access = PL1_RW, .accessfn = access_tvm_trvm,
879 .secure = ARM_CP_SECSTATE_NS,
880 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
881 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
882 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
883 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
884 .access = PL1_RW, .accessfn = access_tvm_trvm,
885 .secure = ARM_CP_SECSTATE_S,
886 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
887 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
888 REGINFO_SENTINEL
889 };
890
891 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
892 /* NB: Some of these registers exist in v8 but with more precise
893 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
894 */
895 /* MMU Domain access control / MPU write buffer control */
896 { .name = "DACR",
897 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
898 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
899 .writefn = dacr_write, .raw_writefn = raw_write,
900 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
901 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
902 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
903 * For v6 and v5, these mappings are overly broad.
904 */
905 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
906 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
907 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
908 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
909 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
910 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
911 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
912 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
913 /* Cache maintenance ops; some of this space may be overridden later. */
914 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
915 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
916 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
917 REGINFO_SENTINEL
918 };
919
920 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
921 /* Not all pre-v6 cores implemented this WFI, so this is slightly
922 * over-broad.
923 */
924 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
925 .access = PL1_W, .type = ARM_CP_WFI },
926 REGINFO_SENTINEL
927 };
928
929 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
930 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
931 * is UNPREDICTABLE; we choose to NOP as most implementations do).
932 */
933 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
934 .access = PL1_W, .type = ARM_CP_WFI },
935 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
936 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
937 * OMAPCP will override this space.
938 */
939 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
940 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
941 .resetvalue = 0 },
942 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
943 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
944 .resetvalue = 0 },
945 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
946 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
947 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
948 .resetvalue = 0 },
949 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
950 * implementing it as RAZ means the "debug architecture version" bits
951 * will read as a reserved value, which should cause Linux to not try
952 * to use the debug hardware.
953 */
954 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
955 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
956 /* MMU TLB control. Note that the wildcarding means we cover not just
957 * the unified TLB ops but also the dside/iside/inner-shareable variants.
958 */
959 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
960 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
961 .type = ARM_CP_NO_RAW },
962 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
963 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
964 .type = ARM_CP_NO_RAW },
965 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
966 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
967 .type = ARM_CP_NO_RAW },
968 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
969 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
970 .type = ARM_CP_NO_RAW },
971 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
972 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
973 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
974 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
975 REGINFO_SENTINEL
976 };
977
978 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
979 uint64_t value)
980 {
981 uint32_t mask = 0;
982
983 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
984 if (!arm_feature(env, ARM_FEATURE_V8)) {
985 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
986 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
987 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
988 */
989 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
990 /* VFP coprocessor: cp10 & cp11 [23:20] */
991 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
992
993 if (!arm_feature(env, ARM_FEATURE_NEON)) {
994 /* ASEDIS [31] bit is RAO/WI */
995 value |= (1 << 31);
996 }
997
998 /* VFPv3 and upwards with NEON implement 32 double precision
999 * registers (D0-D31).
1000 */
1001 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
1002 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
1003 value |= (1 << 30);
1004 }
1005 }
1006 value &= mask;
1007 }
1008
1009 /*
1010 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1011 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1012 */
1013 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
1014 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
1015 value &= ~(0xf << 20);
1016 value |= env->cp15.cpacr_el1 & (0xf << 20);
1017 }
1018
1019 env->cp15.cpacr_el1 = value;
1020 }
1021
1022 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1023 {
1024 /*
1025 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1026 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1027 */
1028 uint64_t value = env->cp15.cpacr_el1;
1029
1030 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
1031 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
1032 value &= ~(0xf << 20);
1033 }
1034 return value;
1035 }
1036
1037
1038 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1039 {
1040 /* Call cpacr_write() so that we reset with the correct RAO bits set
1041 * for our CPU features.
1042 */
1043 cpacr_write(env, ri, 0);
1044 }
1045
1046 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1047 bool isread)
1048 {
1049 if (arm_feature(env, ARM_FEATURE_V8)) {
1050 /* Check if CPACR accesses are to be trapped to EL2 */
1051 if (arm_current_el(env) == 1 &&
1052 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
1053 return CP_ACCESS_TRAP_EL2;
1054 /* Check if CPACR accesses are to be trapped to EL3 */
1055 } else if (arm_current_el(env) < 3 &&
1056 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
1057 return CP_ACCESS_TRAP_EL3;
1058 }
1059 }
1060
1061 return CP_ACCESS_OK;
1062 }
1063
1064 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1065 bool isread)
1066 {
1067 /* Check if CPTR accesses are set to trap to EL3 */
1068 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
1069 return CP_ACCESS_TRAP_EL3;
1070 }
1071
1072 return CP_ACCESS_OK;
1073 }
1074
1075 static const ARMCPRegInfo v6_cp_reginfo[] = {
1076 /* prefetch by MVA in v6, NOP in v7 */
1077 { .name = "MVA_prefetch",
1078 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
1079 .access = PL1_W, .type = ARM_CP_NOP },
1080 /* We need to break the TB after ISB to execute self-modifying code
1081 * correctly and also to take any pending interrupts immediately.
1082 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
1083 */
1084 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
1085 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
1086 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
1087 .access = PL0_W, .type = ARM_CP_NOP },
1088 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
1089 .access = PL0_W, .type = ARM_CP_NOP },
1090 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
1091 .access = PL1_RW, .accessfn = access_tvm_trvm,
1092 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
1093 offsetof(CPUARMState, cp15.ifar_ns) },
1094 .resetvalue = 0, },
1095 /* Watchpoint Fault Address Register : should actually only be present
1096 * for 1136, 1176, 11MPCore.
1097 */
1098 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
1099 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
1100 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
1101 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
1102 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
1103 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
1104 REGINFO_SENTINEL
1105 };
1106
1107 /* Definitions for the PMU registers */
1108 #define PMCRN_MASK 0xf800
1109 #define PMCRN_SHIFT 11
1110 #define PMCRLC 0x40
1111 #define PMCRDP 0x20
1112 #define PMCRX 0x10
1113 #define PMCRD 0x8
1114 #define PMCRC 0x4
1115 #define PMCRP 0x2
1116 #define PMCRE 0x1
1117 /*
1118 * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
1119 * which can be written as 1 to trigger behaviour but which stay RAZ).
1120 */
1121 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1122
1123 #define PMXEVTYPER_P 0x80000000
1124 #define PMXEVTYPER_U 0x40000000
1125 #define PMXEVTYPER_NSK 0x20000000
1126 #define PMXEVTYPER_NSU 0x10000000
1127 #define PMXEVTYPER_NSH 0x08000000
1128 #define PMXEVTYPER_M 0x04000000
1129 #define PMXEVTYPER_MT 0x02000000
1130 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1131 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1132 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1133 PMXEVTYPER_M | PMXEVTYPER_MT | \
1134 PMXEVTYPER_EVTCOUNT)
1135
1136 #define PMCCFILTR 0xf8000000
1137 #define PMCCFILTR_M PMXEVTYPER_M
1138 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1139
1140 static inline uint32_t pmu_num_counters(CPUARMState *env)
1141 {
1142 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1143 }
1144
1145 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1146 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1147 {
1148 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1149 }
1150
1151 typedef struct pm_event {
1152 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
1153 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1154 bool (*supported)(CPUARMState *);
1155 /*
1156 * Retrieve the current count of the underlying event. The programmed
1157 * counters hold a difference from the return value from this function
1158 */
1159 uint64_t (*get_count)(CPUARMState *);
1160 /*
1161 * Return how many nanoseconds it will take (at a minimum) for count events
1162 * to occur. A negative value indicates the counter will never overflow, or
1163 * that the counter has otherwise arranged for the overflow bit to be set
1164 * and the PMU interrupt to be raised on overflow.
1165 */
1166 int64_t (*ns_per_count)(uint64_t);
1167 } pm_event;
1168
1169 static bool event_always_supported(CPUARMState *env)
1170 {
1171 return true;
1172 }
1173
1174 static uint64_t swinc_get_count(CPUARMState *env)
1175 {
1176 /*
1177 * SW_INCR events are written directly to the pmevcntr's by writes to
1178 * PMSWINC, so there is no underlying count maintained by the PMU itself
1179 */
1180 return 0;
1181 }
1182
1183 static int64_t swinc_ns_per(uint64_t ignored)
1184 {
1185 return -1;
1186 }
1187
1188 /*
1189 * Return the underlying cycle count for the PMU cycle counters. If we're in
1190 * usermode, simply return 0.
1191 */
1192 static uint64_t cycles_get_count(CPUARMState *env)
1193 {
1194 #ifndef CONFIG_USER_ONLY
1195 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1196 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1197 #else
1198 return cpu_get_host_ticks();
1199 #endif
1200 }
1201
1202 #ifndef CONFIG_USER_ONLY
1203 static int64_t cycles_ns_per(uint64_t cycles)
1204 {
1205 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
1206 }
1207
1208 static bool instructions_supported(CPUARMState *env)
1209 {
1210 return icount_enabled() == 1; /* Precise instruction counting */
1211 }
1212
1213 static uint64_t instructions_get_count(CPUARMState *env)
1214 {
1215 return (uint64_t)icount_get_raw();
1216 }
1217
1218 static int64_t instructions_ns_per(uint64_t icount)
1219 {
1220 return icount_to_ns((int64_t)icount);
1221 }
1222 #endif
1223
1224 static bool pmu_8_1_events_supported(CPUARMState *env)
1225 {
1226 /* For events which are supported in any v8.1 PMU */
1227 return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
1228 }
1229
1230 static bool pmu_8_4_events_supported(CPUARMState *env)
1231 {
1232 /* For events which are supported in any v8.1 PMU */
1233 return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
1234 }
1235
1236 static uint64_t zero_event_get_count(CPUARMState *env)
1237 {
1238 /* For events which on QEMU never fire, so their count is always zero */
1239 return 0;
1240 }
1241
1242 static int64_t zero_event_ns_per(uint64_t cycles)
1243 {
1244 /* An event which never fires can never overflow */
1245 return -1;
1246 }
1247
1248 static const pm_event pm_events[] = {
1249 { .number = 0x000, /* SW_INCR */
1250 .supported = event_always_supported,
1251 .get_count = swinc_get_count,
1252 .ns_per_count = swinc_ns_per,
1253 },
1254 #ifndef CONFIG_USER_ONLY
1255 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1256 .supported = instructions_supported,
1257 .get_count = instructions_get_count,
1258 .ns_per_count = instructions_ns_per,
1259 },
1260 { .number = 0x011, /* CPU_CYCLES, Cycle */
1261 .supported = event_always_supported,
1262 .get_count = cycles_get_count,
1263 .ns_per_count = cycles_ns_per,
1264 },
1265 #endif
1266 { .number = 0x023, /* STALL_FRONTEND */
1267 .supported = pmu_8_1_events_supported,
1268 .get_count = zero_event_get_count,
1269 .ns_per_count = zero_event_ns_per,
1270 },
1271 { .number = 0x024, /* STALL_BACKEND */
1272 .supported = pmu_8_1_events_supported,
1273 .get_count = zero_event_get_count,
1274 .ns_per_count = zero_event_ns_per,
1275 },
1276 { .number = 0x03c, /* STALL */
1277 .supported = pmu_8_4_events_supported,
1278 .get_count = zero_event_get_count,
1279 .ns_per_count = zero_event_ns_per,
1280 },
1281 };
1282
1283 /*
1284 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1285 * events (i.e. the statistical profiling extension), this implementation
1286 * should first be updated to something sparse instead of the current
1287 * supported_event_map[] array.
1288 */
1289 #define MAX_EVENT_ID 0x3c
1290 #define UNSUPPORTED_EVENT UINT16_MAX
1291 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1292
1293 /*
1294 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1295 * of ARM event numbers to indices in our pm_events array.
1296 *
1297 * Note: Events in the 0x40XX range are not currently supported.
1298 */
1299 void pmu_init(ARMCPU *cpu)
1300 {
1301 unsigned int i;
1302
1303 /*
1304 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1305 * events to them
1306 */
1307 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1308 supported_event_map[i] = UNSUPPORTED_EVENT;
1309 }
1310 cpu->pmceid0 = 0;
1311 cpu->pmceid1 = 0;
1312
1313 for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1314 const pm_event *cnt = &pm_events[i];
1315 assert(cnt->number <= MAX_EVENT_ID);
1316 /* We do not currently support events in the 0x40xx range */
1317 assert(cnt->number <= 0x3f);
1318
1319 if (cnt->supported(&cpu->env)) {
1320 supported_event_map[cnt->number] = i;
1321 uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1322 if (cnt->number & 0x20) {
1323 cpu->pmceid1 |= event_mask;
1324 } else {
1325 cpu->pmceid0 |= event_mask;
1326 }
1327 }
1328 }
1329 }
1330
1331 /*
1332 * Check at runtime whether a PMU event is supported for the current machine
1333 */
1334 static bool event_supported(uint16_t number)
1335 {
1336 if (number > MAX_EVENT_ID) {
1337 return false;
1338 }
1339 return supported_event_map[number] != UNSUPPORTED_EVENT;
1340 }
1341
1342 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1343 bool isread)
1344 {
1345 /* Performance monitor registers user accessibility is controlled
1346 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1347 * trapping to EL2 or EL3 for other accesses.
1348 */
1349 int el = arm_current_el(env);
1350
1351 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1352 return CP_ACCESS_TRAP;
1353 }
1354 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1355 && !arm_is_secure_below_el3(env)) {
1356 return CP_ACCESS_TRAP_EL2;
1357 }
1358 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1359 return CP_ACCESS_TRAP_EL3;
1360 }
1361
1362 return CP_ACCESS_OK;
1363 }
1364
1365 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1366 const ARMCPRegInfo *ri,
1367 bool isread)
1368 {
1369 /* ER: event counter read trap control */
1370 if (arm_feature(env, ARM_FEATURE_V8)
1371 && arm_current_el(env) == 0
1372 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1373 && isread) {
1374 return CP_ACCESS_OK;
1375 }
1376
1377 return pmreg_access(env, ri, isread);
1378 }
1379
1380 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1381 const ARMCPRegInfo *ri,
1382 bool isread)
1383 {
1384 /* SW: software increment write trap control */
1385 if (arm_feature(env, ARM_FEATURE_V8)
1386 && arm_current_el(env) == 0
1387 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1388 && !isread) {
1389 return CP_ACCESS_OK;
1390 }
1391
1392 return pmreg_access(env, ri, isread);
1393 }
1394
1395 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1396 const ARMCPRegInfo *ri,
1397 bool isread)
1398 {
1399 /* ER: event counter read trap control */
1400 if (arm_feature(env, ARM_FEATURE_V8)
1401 && arm_current_el(env) == 0
1402 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1403 return CP_ACCESS_OK;
1404 }
1405
1406 return pmreg_access(env, ri, isread);
1407 }
1408
1409 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1410 const ARMCPRegInfo *ri,
1411 bool isread)
1412 {
1413 /* CR: cycle counter read trap control */
1414 if (arm_feature(env, ARM_FEATURE_V8)
1415 && arm_current_el(env) == 0
1416 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1417 && isread) {
1418 return CP_ACCESS_OK;
1419 }
1420
1421 return pmreg_access(env, ri, isread);
1422 }
1423
1424 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1425 * the current EL, security state, and register configuration.
1426 */
1427 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1428 {
1429 uint64_t filter;
1430 bool e, p, u, nsk, nsu, nsh, m;
1431 bool enabled, prohibited, filtered;
1432 bool secure = arm_is_secure(env);
1433 int el = arm_current_el(env);
1434 uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1435
1436 if (!arm_feature(env, ARM_FEATURE_PMU)) {
1437 return false;
1438 }
1439
1440 if (!arm_feature(env, ARM_FEATURE_EL2) ||
1441 (counter < hpmn || counter == 31)) {
1442 e = env->cp15.c9_pmcr & PMCRE;
1443 } else {
1444 e = env->cp15.mdcr_el2 & MDCR_HPME;
1445 }
1446 enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1447
1448 if (!secure) {
1449 if (el == 2 && (counter < hpmn || counter == 31)) {
1450 prohibited = env->cp15.mdcr_el2 & MDCR_HPMD;
1451 } else {
1452 prohibited = false;
1453 }
1454 } else {
1455 prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1456 !(env->cp15.mdcr_el3 & MDCR_SPME);
1457 }
1458
1459 if (prohibited && counter == 31) {
1460 prohibited = env->cp15.c9_pmcr & PMCRDP;
1461 }
1462
1463 if (counter == 31) {
1464 filter = env->cp15.pmccfiltr_el0;
1465 } else {
1466 filter = env->cp15.c14_pmevtyper[counter];
1467 }
1468
1469 p = filter & PMXEVTYPER_P;
1470 u = filter & PMXEVTYPER_U;
1471 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1472 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1473 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1474 m = arm_el_is_aa64(env, 1) &&
1475 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1476
1477 if (el == 0) {
1478 filtered = secure ? u : u != nsu;
1479 } else if (el == 1) {
1480 filtered = secure ? p : p != nsk;
1481 } else if (el == 2) {
1482 filtered = !nsh;
1483 } else { /* EL3 */
1484 filtered = m != p;
1485 }
1486
1487 if (counter != 31) {
1488 /*
1489 * If not checking PMCCNTR, ensure the counter is setup to an event we
1490 * support
1491 */
1492 uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1493 if (!event_supported(event)) {
1494 return false;
1495 }
1496 }
1497
1498 return enabled && !prohibited && !filtered;
1499 }
1500
1501 static void pmu_update_irq(CPUARMState *env)
1502 {
1503 ARMCPU *cpu = env_archcpu(env);
1504 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1505 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1506 }
1507
1508 /*
1509 * Ensure c15_ccnt is the guest-visible count so that operations such as
1510 * enabling/disabling the counter or filtering, modifying the count itself,
1511 * etc. can be done logically. This is essentially a no-op if the counter is
1512 * not enabled at the time of the call.
1513 */
1514 static void pmccntr_op_start(CPUARMState *env)
1515 {
1516 uint64_t cycles = cycles_get_count(env);
1517
1518 if (pmu_counter_enabled(env, 31)) {
1519 uint64_t eff_cycles = cycles;
1520 if (env->cp15.c9_pmcr & PMCRD) {
1521 /* Increment once every 64 processor clock cycles */
1522 eff_cycles /= 64;
1523 }
1524
1525 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1526
1527 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1528 1ull << 63 : 1ull << 31;
1529 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1530 env->cp15.c9_pmovsr |= (1 << 31);
1531 pmu_update_irq(env);
1532 }
1533
1534 env->cp15.c15_ccnt = new_pmccntr;
1535 }
1536 env->cp15.c15_ccnt_delta = cycles;
1537 }
1538
1539 /*
1540 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1541 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1542 * pmccntr_op_start.
1543 */
1544 static void pmccntr_op_finish(CPUARMState *env)
1545 {
1546 if (pmu_counter_enabled(env, 31)) {
1547 #ifndef CONFIG_USER_ONLY
1548 /* Calculate when the counter will next overflow */
1549 uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1550 if (!(env->cp15.c9_pmcr & PMCRLC)) {
1551 remaining_cycles = (uint32_t)remaining_cycles;
1552 }
1553 int64_t overflow_in = cycles_ns_per(remaining_cycles);
1554
1555 if (overflow_in > 0) {
1556 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1557 overflow_in;
1558 ARMCPU *cpu = env_archcpu(env);
1559 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1560 }
1561 #endif
1562
1563 uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1564 if (env->cp15.c9_pmcr & PMCRD) {
1565 /* Increment once every 64 processor clock cycles */
1566 prev_cycles /= 64;
1567 }
1568 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1569 }
1570 }
1571
1572 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1573 {
1574
1575 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1576 uint64_t count = 0;
1577 if (event_supported(event)) {
1578 uint16_t event_idx = supported_event_map[event];
1579 count = pm_events[event_idx].get_count(env);
1580 }
1581
1582 if (pmu_counter_enabled(env, counter)) {
1583 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1584
1585 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1586 env->cp15.c9_pmovsr |= (1 << counter);
1587 pmu_update_irq(env);
1588 }
1589 env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1590 }
1591 env->cp15.c14_pmevcntr_delta[counter] = count;
1592 }
1593
1594 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1595 {
1596 if (pmu_counter_enabled(env, counter)) {
1597 #ifndef CONFIG_USER_ONLY
1598 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1599 uint16_t event_idx = supported_event_map[event];
1600 uint64_t delta = UINT32_MAX -
1601 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1602 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1603
1604 if (overflow_in > 0) {
1605 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1606 overflow_in;
1607 ARMCPU *cpu = env_archcpu(env);
1608 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1609 }
1610 #endif
1611
1612 env->cp15.c14_pmevcntr_delta[counter] -=
1613 env->cp15.c14_pmevcntr[counter];
1614 }
1615 }
1616
1617 void pmu_op_start(CPUARMState *env)
1618 {
1619 unsigned int i;
1620 pmccntr_op_start(env);
1621 for (i = 0; i < pmu_num_counters(env); i++) {
1622 pmevcntr_op_start(env, i);
1623 }
1624 }
1625
1626 void pmu_op_finish(CPUARMState *env)
1627 {
1628 unsigned int i;
1629 pmccntr_op_finish(env);
1630 for (i = 0; i < pmu_num_counters(env); i++) {
1631 pmevcntr_op_finish(env, i);
1632 }
1633 }
1634
1635 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1636 {
1637 pmu_op_start(&cpu->env);
1638 }
1639
1640 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1641 {
1642 pmu_op_finish(&cpu->env);
1643 }
1644
1645 void arm_pmu_timer_cb(void *opaque)
1646 {
1647 ARMCPU *cpu = opaque;
1648
1649 /*
1650 * Update all the counter values based on the current underlying counts,
1651 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1652 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1653 * counter may expire.
1654 */
1655 pmu_op_start(&cpu->env);
1656 pmu_op_finish(&cpu->env);
1657 }
1658
1659 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1660 uint64_t value)
1661 {
1662 pmu_op_start(env);
1663
1664 if (value & PMCRC) {
1665 /* The counter has been reset */
1666 env->cp15.c15_ccnt = 0;
1667 }
1668
1669 if (value & PMCRP) {
1670 unsigned int i;
1671 for (i = 0; i < pmu_num_counters(env); i++) {
1672 env->cp15.c14_pmevcntr[i] = 0;
1673 }
1674 }
1675
1676 env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
1677 env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK);
1678
1679 pmu_op_finish(env);
1680 }
1681
1682 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1683 uint64_t value)
1684 {
1685 unsigned int i;
1686 for (i = 0; i < pmu_num_counters(env); i++) {
1687 /* Increment a counter's count iff: */
1688 if ((value & (1 << i)) && /* counter's bit is set */
1689 /* counter is enabled and not filtered */
1690 pmu_counter_enabled(env, i) &&
1691 /* counter is SW_INCR */
1692 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1693 pmevcntr_op_start(env, i);
1694
1695 /*
1696 * Detect if this write causes an overflow since we can't predict
1697 * PMSWINC overflows like we can for other events
1698 */
1699 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1700
1701 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1702 env->cp15.c9_pmovsr |= (1 << i);
1703 pmu_update_irq(env);
1704 }
1705
1706 env->cp15.c14_pmevcntr[i] = new_pmswinc;
1707
1708 pmevcntr_op_finish(env, i);
1709 }
1710 }
1711 }
1712
1713 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1714 {
1715 uint64_t ret;
1716 pmccntr_op_start(env);
1717 ret = env->cp15.c15_ccnt;
1718 pmccntr_op_finish(env);
1719 return ret;
1720 }
1721
1722 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1723 uint64_t value)
1724 {
1725 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1726 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1727 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1728 * accessed.
1729 */
1730 env->cp15.c9_pmselr = value & 0x1f;
1731 }
1732
1733 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1734 uint64_t value)
1735 {
1736 pmccntr_op_start(env);
1737 env->cp15.c15_ccnt = value;
1738 pmccntr_op_finish(env);
1739 }
1740
1741 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1742 uint64_t value)
1743 {
1744 uint64_t cur_val = pmccntr_read(env, NULL);
1745
1746 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1747 }
1748
1749 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1750 uint64_t value)
1751 {
1752 pmccntr_op_start(env);
1753 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1754 pmccntr_op_finish(env);
1755 }
1756
1757 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1758 uint64_t value)
1759 {
1760 pmccntr_op_start(env);
1761 /* M is not accessible from AArch32 */
1762 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1763 (value & PMCCFILTR);
1764 pmccntr_op_finish(env);
1765 }
1766
1767 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1768 {
1769 /* M is not visible in AArch32 */
1770 return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1771 }
1772
1773 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1774 uint64_t value)
1775 {
1776 value &= pmu_counter_mask(env);
1777 env->cp15.c9_pmcnten |= value;
1778 }
1779
1780 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1781 uint64_t value)
1782 {
1783 value &= pmu_counter_mask(env);
1784 env->cp15.c9_pmcnten &= ~value;
1785 }
1786
1787 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1788 uint64_t value)
1789 {
1790 value &= pmu_counter_mask(env);
1791 env->cp15.c9_pmovsr &= ~value;
1792 pmu_update_irq(env);
1793 }
1794
1795 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1796 uint64_t value)
1797 {
1798 value &= pmu_counter_mask(env);
1799 env->cp15.c9_pmovsr |= value;
1800 pmu_update_irq(env);
1801 }
1802
1803 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1804 uint64_t value, const uint8_t counter)
1805 {
1806 if (counter == 31) {
1807 pmccfiltr_write(env, ri, value);
1808 } else if (counter < pmu_num_counters(env)) {
1809 pmevcntr_op_start(env, counter);
1810
1811 /*
1812 * If this counter's event type is changing, store the current
1813 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1814 * pmevcntr_op_finish has the correct baseline when it converts back to
1815 * a delta.
1816 */
1817 uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1818 PMXEVTYPER_EVTCOUNT;
1819 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1820 if (old_event != new_event) {
1821 uint64_t count = 0;
1822 if (event_supported(new_event)) {
1823 uint16_t event_idx = supported_event_map[new_event];
1824 count = pm_events[event_idx].get_count(env);
1825 }
1826 env->cp15.c14_pmevcntr_delta[counter] = count;
1827 }
1828
1829 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1830 pmevcntr_op_finish(env, counter);
1831 }
1832 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1833 * PMSELR value is equal to or greater than the number of implemented
1834 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1835 */
1836 }
1837
1838 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1839 const uint8_t counter)
1840 {
1841 if (counter == 31) {
1842 return env->cp15.pmccfiltr_el0;
1843 } else if (counter < pmu_num_counters(env)) {
1844 return env->cp15.c14_pmevtyper[counter];
1845 } else {
1846 /*
1847 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1848 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1849 */
1850 return 0;
1851 }
1852 }
1853
1854 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1855 uint64_t value)
1856 {
1857 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1858 pmevtyper_write(env, ri, value, counter);
1859 }
1860
1861 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1862 uint64_t value)
1863 {
1864 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1865 env->cp15.c14_pmevtyper[counter] = value;
1866
1867 /*
1868 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1869 * pmu_op_finish calls when loading saved state for a migration. Because
1870 * we're potentially updating the type of event here, the value written to
1871 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1872 * different counter type. Therefore, we need to set this value to the
1873 * current count for the counter type we're writing so that pmu_op_finish
1874 * has the correct count for its calculation.
1875 */
1876 uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1877 if (event_supported(event)) {
1878 uint16_t event_idx = supported_event_map[event];
1879 env->cp15.c14_pmevcntr_delta[counter] =
1880 pm_events[event_idx].get_count(env);
1881 }
1882 }
1883
1884 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1885 {
1886 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1887 return pmevtyper_read(env, ri, counter);
1888 }
1889
1890 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1891 uint64_t value)
1892 {
1893 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1894 }
1895
1896 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1897 {
1898 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1899 }
1900
1901 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1902 uint64_t value, uint8_t counter)
1903 {
1904 if (counter < pmu_num_counters(env)) {
1905 pmevcntr_op_start(env, counter);
1906 env->cp15.c14_pmevcntr[counter] = value;
1907 pmevcntr_op_finish(env, counter);
1908 }
1909 /*
1910 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1911 * are CONSTRAINED UNPREDICTABLE.
1912 */
1913 }
1914
1915 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1916 uint8_t counter)
1917 {
1918 if (counter < pmu_num_counters(env)) {
1919 uint64_t ret;
1920 pmevcntr_op_start(env, counter);
1921 ret = env->cp15.c14_pmevcntr[counter];
1922 pmevcntr_op_finish(env, counter);
1923 return ret;
1924 } else {
1925 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1926 * are CONSTRAINED UNPREDICTABLE. */
1927 return 0;
1928 }
1929 }
1930
1931 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1932 uint64_t value)
1933 {
1934 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1935 pmevcntr_write(env, ri, value, counter);
1936 }
1937
1938 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1939 {
1940 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1941 return pmevcntr_read(env, ri, counter);
1942 }
1943
1944 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1945 uint64_t value)
1946 {
1947 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1948 assert(counter < pmu_num_counters(env));
1949 env->cp15.c14_pmevcntr[counter] = value;
1950 pmevcntr_write(env, ri, value, counter);
1951 }
1952
1953 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1954 {
1955 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1956 assert(counter < pmu_num_counters(env));
1957 return env->cp15.c14_pmevcntr[counter];
1958 }
1959
1960 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1961 uint64_t value)
1962 {
1963 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1964 }
1965
1966 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1967 {
1968 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1969 }
1970
1971 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1972 uint64_t value)
1973 {
1974 if (arm_feature(env, ARM_FEATURE_V8)) {
1975 env->cp15.c9_pmuserenr = value & 0xf;
1976 } else {
1977 env->cp15.c9_pmuserenr = value & 1;
1978 }
1979 }
1980
1981 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1982 uint64_t value)
1983 {
1984 /* We have no event counters so only the C bit can be changed */
1985 value &= pmu_counter_mask(env);
1986 env->cp15.c9_pminten |= value;
1987 pmu_update_irq(env);
1988 }
1989
1990 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1991 uint64_t value)
1992 {
1993 value &= pmu_counter_mask(env);
1994 env->cp15.c9_pminten &= ~value;
1995 pmu_update_irq(env);
1996 }
1997
1998 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1999 uint64_t value)
2000 {
2001 /* Note that even though the AArch64 view of this register has bits
2002 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
2003 * architectural requirements for bits which are RES0 only in some
2004 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
2005 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
2006 */
2007 raw_write(env, ri, value & ~0x1FULL);
2008 }
2009
2010 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2011 {
2012 /* Begin with base v8.0 state. */
2013 uint32_t valid_mask = 0x3fff;
2014 ARMCPU *cpu = env_archcpu(env);
2015
2016 if (ri->state == ARM_CP_STATE_AA64) {
2017 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
2018 valid_mask &= ~SCR_NET;
2019
2020 if (cpu_isar_feature(aa64_lor, cpu)) {
2021 valid_mask |= SCR_TLOR;
2022 }
2023 if (cpu_isar_feature(aa64_pauth, cpu)) {
2024 valid_mask |= SCR_API | SCR_APK;
2025 }
2026 if (cpu_isar_feature(aa64_mte, cpu)) {
2027 valid_mask |= SCR_ATA;
2028 }
2029 } else {
2030 valid_mask &= ~(SCR_RW | SCR_ST);
2031 }
2032
2033 if (!arm_feature(env, ARM_FEATURE_EL2)) {
2034 valid_mask &= ~SCR_HCE;
2035
2036 /* On ARMv7, SMD (or SCD as it is called in v7) is only
2037 * supported if EL2 exists. The bit is UNK/SBZP when
2038 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
2039 * when EL2 is unavailable.
2040 * On ARMv8, this bit is always available.
2041 */
2042 if (arm_feature(env, ARM_FEATURE_V7) &&
2043 !arm_feature(env, ARM_FEATURE_V8)) {
2044 valid_mask &= ~SCR_SMD;
2045 }
2046 }
2047
2048 /* Clear all-context RES0 bits. */
2049 value &= valid_mask;
2050 raw_write(env, ri, value);
2051 }
2052
2053 static CPAccessResult access_aa64_tid2(CPUARMState *env,
2054 const ARMCPRegInfo *ri,
2055 bool isread)
2056 {
2057 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
2058 return CP_ACCESS_TRAP_EL2;
2059 }
2060
2061 return CP_ACCESS_OK;
2062 }
2063
2064 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2065 {
2066 ARMCPU *cpu = env_archcpu(env);
2067
2068 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
2069 * bank
2070 */
2071 uint32_t index = A32_BANKED_REG_GET(env, csselr,
2072 ri->secure & ARM_CP_SECSTATE_S);
2073
2074 return cpu->ccsidr[index];
2075 }
2076
2077 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2078 uint64_t value)
2079 {
2080 raw_write(env, ri, value & 0xf);
2081 }
2082
2083 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2084 {
2085 CPUState *cs = env_cpu(env);
2086 uint64_t hcr_el2 = arm_hcr_el2_eff(env);
2087 uint64_t ret = 0;
2088 bool allow_virt = (arm_current_el(env) == 1 &&
2089 (!arm_is_secure_below_el3(env) ||
2090 (env->cp15.scr_el3 & SCR_EEL2)));
2091
2092 if (allow_virt && (hcr_el2 & HCR_IMO)) {
2093 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
2094 ret |= CPSR_I;
2095 }
2096 } else {
2097 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
2098 ret |= CPSR_I;
2099 }
2100 }
2101
2102 if (allow_virt && (hcr_el2 & HCR_FMO)) {
2103 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
2104 ret |= CPSR_F;
2105 }
2106 } else {
2107 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
2108 ret |= CPSR_F;
2109 }
2110 }
2111
2112 /* External aborts are not possible in QEMU so A bit is always clear */
2113 return ret;
2114 }
2115
2116 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2117 bool isread)
2118 {
2119 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
2120 return CP_ACCESS_TRAP_EL2;
2121 }
2122
2123 return CP_ACCESS_OK;
2124 }
2125
2126 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2127 bool isread)
2128 {
2129 if (arm_feature(env, ARM_FEATURE_V8)) {
2130 return access_aa64_tid1(env, ri, isread);
2131 }
2132
2133 return CP_ACCESS_OK;
2134 }
2135
2136 static const ARMCPRegInfo v7_cp_reginfo[] = {
2137 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2138 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
2139 .access = PL1_W, .type = ARM_CP_NOP },
2140 /* Performance monitors are implementation defined in v7,
2141 * but with an ARM recommended set of registers, which we
2142 * follow.
2143 *
2144 * Performance registers fall into three categories:
2145 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2146 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2147 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2148 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2149 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2150 */
2151 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
2152 .access = PL0_RW, .type = ARM_CP_ALIAS,
2153 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2154 .writefn = pmcntenset_write,
2155 .accessfn = pmreg_access,
2156 .raw_writefn = raw_write },
2157 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
2158 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
2159 .access = PL0_RW, .accessfn = pmreg_access,
2160 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
2161 .writefn = pmcntenset_write, .raw_writefn = raw_write },
2162 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
2163 .access = PL0_RW,
2164 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2165 .accessfn = pmreg_access,
2166 .writefn = pmcntenclr_write,
2167 .type = ARM_CP_ALIAS },
2168 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
2169 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
2170 .access = PL0_RW, .accessfn = pmreg_access,
2171 .type = ARM_CP_ALIAS,
2172 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
2173 .writefn = pmcntenclr_write },
2174 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
2175 .access = PL0_RW, .type = ARM_CP_IO,
2176 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2177 .accessfn = pmreg_access,
2178 .writefn = pmovsr_write,
2179 .raw_writefn = raw_write },
2180 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2181 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2182 .access = PL0_RW, .accessfn = pmreg_access,
2183 .type = ARM_CP_ALIAS | ARM_CP_IO,
2184 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2185 .writefn = pmovsr_write,
2186 .raw_writefn = raw_write },
2187 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2188 .access = PL0_W, .accessfn = pmreg_access_swinc,
2189 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2190 .writefn = pmswinc_write },
2191 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2192 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2193 .access = PL0_W, .accessfn = pmreg_access_swinc,
2194 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2195 .writefn = pmswinc_write },
2196 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2197 .access = PL0_RW, .type = ARM_CP_ALIAS,
2198 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2199 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2200 .raw_writefn = raw_write},
2201 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2202 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2203 .access = PL0_RW, .accessfn = pmreg_access_selr,
2204 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2205 .writefn = pmselr_write, .raw_writefn = raw_write, },
2206 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2207 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2208 .readfn = pmccntr_read, .writefn = pmccntr_write32,
2209 .accessfn = pmreg_access_ccntr },
2210 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2211 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2212 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2213 .type = ARM_CP_IO,
2214 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2215 .readfn = pmccntr_read, .writefn = pmccntr_write,
2216 .raw_readfn = raw_read, .raw_writefn = raw_write, },
2217 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2218 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2219 .access = PL0_RW, .accessfn = pmreg_access,
2220 .type = ARM_CP_ALIAS | ARM_CP_IO,
2221 .resetvalue = 0, },
2222 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2223 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2224 .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2225 .access = PL0_RW, .accessfn = pmreg_access,
2226 .type = ARM_CP_IO,
2227 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2228 .resetvalue = 0, },
2229 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2230 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2231 .accessfn = pmreg_access,
2232 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2233 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2234 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2235 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2236 .accessfn = pmreg_access,
2237 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2238 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2239 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2240 .accessfn = pmreg_access_xevcntr,
2241 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2242 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2243 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2244 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2245 .accessfn = pmreg_access_xevcntr,
2246 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2247 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2248 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2249 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2250 .resetvalue = 0,
2251 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2252 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2253 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2254 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2255 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2256 .resetvalue = 0,
2257 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2258 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2259 .access = PL1_RW, .accessfn = access_tpm,
2260 .type = ARM_CP_ALIAS | ARM_CP_IO,
2261 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2262 .resetvalue = 0,
2263 .writefn = pmintenset_write, .raw_writefn = raw_write },
2264 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2265 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2266 .access = PL1_RW, .accessfn = access_tpm,
2267 .type = ARM_CP_IO,
2268 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2269 .writefn = pmintenset_write, .raw_writefn = raw_write,
2270 .resetvalue = 0x0 },
2271 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2272 .access = PL1_RW, .accessfn = access_tpm,
2273 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2274 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2275 .writefn = pmintenclr_write, },
2276 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2277 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2278 .access = PL1_RW, .accessfn = access_tpm,
2279 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2280 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2281 .writefn = pmintenclr_write },
2282 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2283 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2284 .access = PL1_R,
2285 .accessfn = access_aa64_tid2,
2286 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2287 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2288 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2289 .access = PL1_RW,
2290 .accessfn = access_aa64_tid2,
2291 .writefn = csselr_write, .resetvalue = 0,
2292 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2293 offsetof(CPUARMState, cp15.csselr_ns) } },
2294 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2295 * just RAZ for all cores:
2296 */
2297 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2298 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2299 .access = PL1_R, .type = ARM_CP_CONST,
2300 .accessfn = access_aa64_tid1,
2301 .resetvalue = 0 },
2302 /* Auxiliary fault status registers: these also are IMPDEF, and we
2303 * choose to RAZ/WI for all cores.
2304 */
2305 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2306 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2307 .access = PL1_RW, .accessfn = access_tvm_trvm,
2308 .type = ARM_CP_CONST, .resetvalue = 0 },
2309 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2310 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2311 .access = PL1_RW, .accessfn = access_tvm_trvm,
2312 .type = ARM_CP_CONST, .resetvalue = 0 },
2313 /* MAIR can just read-as-written because we don't implement caches
2314 * and so don't need to care about memory attributes.
2315 */
2316 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2317 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2318 .access = PL1_RW, .accessfn = access_tvm_trvm,
2319 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2320 .resetvalue = 0 },
2321 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2322 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2323 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2324 .resetvalue = 0 },
2325 /* For non-long-descriptor page tables these are PRRR and NMRR;
2326 * regardless they still act as reads-as-written for QEMU.
2327 */
2328 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2329 * allows them to assign the correct fieldoffset based on the endianness
2330 * handled in the field definitions.
2331 */
2332 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2333 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2334 .access = PL1_RW, .accessfn = access_tvm_trvm,
2335 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2336 offsetof(CPUARMState, cp15.mair0_ns) },
2337 .resetfn = arm_cp_reset_ignore },
2338 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2339 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2340 .access = PL1_RW, .accessfn = access_tvm_trvm,
2341 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2342 offsetof(CPUARMState, cp15.mair1_ns) },
2343 .resetfn = arm_cp_reset_ignore },
2344 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2345 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2346 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2347 /* 32 bit ITLB invalidates */
2348 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2349 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2350 .writefn = tlbiall_write },
2351 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2352 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2353 .writefn = tlbimva_write },
2354 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2355 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2356 .writefn = tlbiasid_write },
2357 /* 32 bit DTLB invalidates */
2358 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2359 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2360 .writefn = tlbiall_write },
2361 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2362 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2363 .writefn = tlbimva_write },
2364 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2365 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2366 .writefn = tlbiasid_write },
2367 /* 32 bit TLB invalidates */
2368 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2369 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2370 .writefn = tlbiall_write },
2371 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2372 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2373 .writefn = tlbimva_write },
2374 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2375 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2376 .writefn = tlbiasid_write },
2377 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2378 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2379 .writefn = tlbimvaa_write },
2380 REGINFO_SENTINEL
2381 };
2382
2383 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2384 /* 32 bit TLB invalidates, Inner Shareable */
2385 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2386 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2387 .writefn = tlbiall_is_write },
2388 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2389 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2390 .writefn = tlbimva_is_write },
2391 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2392 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2393 .writefn = tlbiasid_is_write },
2394 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2395 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2396 .writefn = tlbimvaa_is_write },
2397 REGINFO_SENTINEL
2398 };
2399
2400 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2401 /* PMOVSSET is not implemented in v7 before v7ve */
2402 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2403 .access = PL0_RW, .accessfn = pmreg_access,
2404 .type = ARM_CP_ALIAS | ARM_CP_IO,
2405 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2406 .writefn = pmovsset_write,
2407 .raw_writefn = raw_write },
2408 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2409 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2410 .access = PL0_RW, .accessfn = pmreg_access,
2411 .type = ARM_CP_ALIAS | ARM_CP_IO,
2412 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2413 .writefn = pmovsset_write,
2414 .raw_writefn = raw_write },
2415 REGINFO_SENTINEL
2416 };
2417
2418 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2419 uint64_t value)
2420 {
2421 value &= 1;
2422 env->teecr = value;
2423 }
2424
2425 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2426 bool isread)
2427 {
2428 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2429 return CP_ACCESS_TRAP;
2430 }
2431 return CP_ACCESS_OK;
2432 }
2433
2434 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2435 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2436 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2437 .resetvalue = 0,
2438 .writefn = teecr_write },
2439 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2440 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2441 .accessfn = teehbr_access, .resetvalue = 0 },
2442 REGINFO_SENTINEL
2443 };
2444
2445 static const ARMCPRegInfo v6k_cp_reginfo[] = {
2446 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2447 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2448 .access = PL0_RW,
2449 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2450 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2451 .access = PL0_RW,
2452 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2453 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2454 .resetfn = arm_cp_reset_ignore },
2455 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2456 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2457 .access = PL0_R|PL1_W,
2458 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2459 .resetvalue = 0},
2460 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2461 .access = PL0_R|PL1_W,
2462 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2463 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2464 .resetfn = arm_cp_reset_ignore },
2465 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2466 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2467 .access = PL1_RW,
2468 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2469 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2470 .access = PL1_RW,
2471 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2472 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2473 .resetvalue = 0 },
2474 REGINFO_SENTINEL
2475 };
2476
2477 #ifndef CONFIG_USER_ONLY
2478
2479 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2480 bool isread)
2481 {
2482 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2483 * Writable only at the highest implemented exception level.
2484 */
2485 int el = arm_current_el(env);
2486 uint64_t hcr;
2487 uint32_t cntkctl;
2488
2489 switch (el) {
2490 case 0:
2491 hcr = arm_hcr_el2_eff(env);
2492 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2493 cntkctl = env->cp15.cnthctl_el2;
2494 } else {
2495 cntkctl = env->cp15.c14_cntkctl;
2496 }
2497 if (!extract32(cntkctl, 0, 2)) {
2498 return CP_ACCESS_TRAP;
2499 }
2500 break;
2501 case 1:
2502 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2503 arm_is_secure_below_el3(env)) {
2504 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2505 return CP_ACCESS_TRAP_UNCATEGORIZED;
2506 }
2507 break;
2508 case 2:
2509 case 3:
2510 break;
2511 }
2512
2513 if (!isread && el < arm_highest_el(env)) {
2514 return CP_ACCESS_TRAP_UNCATEGORIZED;
2515 }
2516
2517 return CP_ACCESS_OK;
2518 }
2519
2520 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2521 bool isread)
2522 {
2523 unsigned int cur_el = arm_current_el(env);
2524 bool secure = arm_is_secure(env);
2525 uint64_t hcr = arm_hcr_el2_eff(env);
2526
2527 switch (cur_el) {
2528 case 0:
2529 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2530 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2531 return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2532 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2533 }
2534
2535 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2536 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2537 return CP_ACCESS_TRAP;
2538 }
2539
2540 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2541 if (hcr & HCR_E2H) {
2542 if (timeridx == GTIMER_PHYS &&
2543 !extract32(env->cp15.cnthctl_el2, 10, 1)) {
2544 return CP_ACCESS_TRAP_EL2;
2545 }
2546 } else {
2547 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2548 if (arm_feature(env, ARM_FEATURE_EL2) &&
2549 timeridx == GTIMER_PHYS && !secure &&
2550 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2551 return CP_ACCESS_TRAP_EL2;
2552 }
2553 }
2554 break;
2555
2556 case 1:
2557 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2558 if (arm_feature(env, ARM_FEATURE_EL2) &&
2559 timeridx == GTIMER_PHYS && !secure &&
2560 (hcr & HCR_E2H
2561 ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2562 : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2563 return CP_ACCESS_TRAP_EL2;
2564 }
2565 break;
2566 }
2567 return CP_ACCESS_OK;
2568 }
2569
2570 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2571 bool isread)
2572 {
2573 unsigned int cur_el = arm_current_el(env);
2574 bool secure = arm_is_secure(env);
2575 uint64_t hcr = arm_hcr_el2_eff(env);
2576
2577 switch (cur_el) {
2578 case 0:
2579 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2580 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2581 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2582 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2583 }
2584
2585 /*
2586 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2587 * EL0 if EL0[PV]TEN is zero.
2588 */
2589 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2590 return CP_ACCESS_TRAP;
2591 }
2592 /* fall through */
2593
2594 case 1:
2595 if (arm_feature(env, ARM_FEATURE_EL2) &&
2596 timeridx == GTIMER_PHYS && !secure) {
2597 if (hcr & HCR_E2H) {
2598 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2599 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2600 return CP_ACCESS_TRAP_EL2;
2601 }
2602 } else {
2603 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2604 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2605 return CP_ACCESS_TRAP_EL2;
2606 }
2607 }
2608 }
2609 break;
2610 }
2611 return CP_ACCESS_OK;
2612 }
2613
2614 static CPAccessResult gt_pct_access(CPUARMState *env,
2615 const ARMCPRegInfo *ri,
2616 bool isread)
2617 {
2618 return gt_counter_access(env, GTIMER_PHYS, isread);
2619 }
2620
2621 static CPAccessResult gt_vct_access(CPUARMState *env,
2622 const ARMCPRegInfo *ri,
2623 bool isread)
2624 {
2625 return gt_counter_access(env, GTIMER_VIRT, isread);
2626 }
2627
2628 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2629 bool isread)
2630 {
2631 return gt_timer_access(env, GTIMER_PHYS, isread);
2632 }
2633
2634 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2635 bool isread)
2636 {
2637 return gt_timer_access(env, GTIMER_VIRT, isread);
2638 }
2639
2640 static CPAccessResult gt_stimer_access(CPUARMState *env,
2641 const ARMCPRegInfo *ri,
2642 bool isread)
2643 {
2644 /* The AArch64 register view of the secure physical timer is
2645 * always accessible from EL3, and configurably accessible from
2646 * Secure EL1.
2647 */
2648 switch (arm_current_el(env)) {
2649 case 1:
2650 if (!arm_is_secure(env)) {
2651 return CP_ACCESS_TRAP;
2652 }
2653 if (!(env->cp15.scr_el3 & SCR_ST)) {
2654 return CP_ACCESS_TRAP_EL3;
2655 }
2656 return CP_ACCESS_OK;
2657 case 0:
2658 case 2:
2659 return CP_ACCESS_TRAP;
2660 case 3:
2661 return CP_ACCESS_OK;
2662 default:
2663 g_assert_not_reached();
2664 }
2665 }
2666
2667 static uint64_t gt_get_countervalue(CPUARMState *env)
2668 {
2669 ARMCPU *cpu = env_archcpu(env);
2670
2671 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
2672 }
2673
2674 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2675 {
2676 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2677
2678 if (gt->ctl & 1) {
2679 /* Timer enabled: calculate and set current ISTATUS, irq, and
2680 * reset timer to when ISTATUS next has to change
2681 */
2682 uint64_t offset = timeridx == GTIMER_VIRT ?
2683 cpu->env.cp15.cntvoff_el2 : 0;
2684 uint64_t count = gt_get_countervalue(&cpu->env);
2685 /* Note that this must be unsigned 64 bit arithmetic: */
2686 int istatus = count - offset >= gt->cval;
2687 uint64_t nexttick;
2688 int irqstate;
2689
2690 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2691
2692 irqstate = (istatus && !(gt->ctl & 2));
2693 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2694
2695 if (istatus) {
2696 /* Next transition is when count rolls back over to zero */
2697 nexttick = UINT64_MAX;
2698 } else {
2699 /* Next transition is when we hit cval */
2700 nexttick = gt->cval + offset;
2701 }
2702 /* Note that the desired next expiry time might be beyond the
2703 * signed-64-bit range of a QEMUTimer -- in this case we just
2704 * set the timer for as far in the future as possible. When the
2705 * timer expires we will reset the timer for any remaining period.
2706 */
2707 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
2708 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2709 } else {
2710 timer_mod(cpu->gt_timer[timeridx], nexttick);
2711 }
2712 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2713 } else {
2714 /* Timer disabled: ISTATUS and timer output always clear */
2715 gt->ctl &= ~4;
2716 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2717 timer_del(cpu->gt_timer[timeridx]);
2718 trace_arm_gt_recalc_disabled(timeridx);
2719 }
2720 }
2721
2722 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2723 int timeridx)
2724 {
2725 ARMCPU *cpu = env_archcpu(env);
2726
2727 timer_del(cpu->gt_timer[timeridx]);
2728 }
2729
2730 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2731 {
2732 return gt_get_countervalue(env);
2733 }
2734
2735 static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2736 {
2737 uint64_t hcr;
2738
2739 switch (arm_current_el(env)) {
2740 case 2:
2741 hcr = arm_hcr_el2_eff(env);
2742 if (hcr & HCR_E2H) {
2743 return 0;
2744 }
2745 break;
2746 case 0:
2747 hcr = arm_hcr_el2_eff(env);
2748 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2749 return 0;
2750 }
2751 break;
2752 }
2753
2754 return env->cp15.cntvoff_el2;
2755 }
2756
2757 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2758 {
2759 return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
2760 }
2761
2762 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2763 int timeridx,
2764 uint64_t value)
2765 {
2766 trace_arm_gt_cval_write(timeridx, value);
2767 env->cp15.c14_timer[timeridx].cval = value;
2768 gt_recalc_timer(env_archcpu(env), timeridx);
2769 }
2770
2771 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2772 int timeridx)
2773 {
2774 uint64_t offset = 0;
2775
2776 switch (timeridx) {
2777 case GTIMER_VIRT:
2778 case GTIMER_HYPVIRT:
2779 offset = gt_virt_cnt_offset(env);
2780 break;
2781 }
2782
2783 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2784 (gt_get_countervalue(env) - offset));
2785 }
2786
2787 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2788 int timeridx,
2789 uint64_t value)
2790 {
2791 uint64_t offset = 0;
2792
2793 switch (timeridx) {
2794 case GTIMER_VIRT:
2795 case GTIMER_HYPVIRT:
2796 offset = gt_virt_cnt_offset(env);
2797 break;
2798 }
2799
2800 trace_arm_gt_tval_write(timeridx, value);
2801 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2802 sextract64(value, 0, 32);
2803 gt_recalc_timer(env_archcpu(env), timeridx);
2804 }
2805
2806 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2807 int timeridx,
2808 uint64_t value)
2809 {
2810 ARMCPU *cpu = env_archcpu(env);
2811 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2812
2813 trace_arm_gt_ctl_write(timeridx, value);
2814 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2815 if ((oldval ^ value) & 1) {
2816 /* Enable toggled */
2817 gt_recalc_timer(cpu, timeridx);
2818 } else if ((oldval ^ value) & 2) {
2819 /* IMASK toggled: don't need to recalculate,
2820 * just set the interrupt line based on ISTATUS
2821 */
2822 int irqstate = (oldval & 4) && !(value & 2);
2823
2824 trace_arm_gt_imask_toggle(timeridx, irqstate);
2825 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2826 }
2827 }
2828
2829 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2830 {
2831 gt_timer_reset(env, ri, GTIMER_PHYS);
2832 }
2833
2834 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2835 uint64_t value)
2836 {
2837 gt_cval_write(env, ri, GTIMER_PHYS, value);
2838 }
2839
2840 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2841 {
2842 return gt_tval_read(env, ri, GTIMER_PHYS);
2843 }
2844
2845 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2846 uint64_t value)
2847 {
2848 gt_tval_write(env, ri, GTIMER_PHYS, value);
2849 }
2850
2851 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2852 uint64_t value)
2853 {
2854 gt_ctl_write(env, ri, GTIMER_PHYS, value);
2855 }
2856
2857 static int gt_phys_redir_timeridx(CPUARMState *env)
2858 {
2859 switch (arm_mmu_idx(env)) {
2860 case ARMMMUIdx_E20_0:
2861 case ARMMMUIdx_E20_2:
2862 case ARMMMUIdx_E20_2_PAN:
2863 return GTIMER_HYP;
2864 default:
2865 return GTIMER_PHYS;
2866 }
2867 }
2868
2869 static int gt_virt_redir_timeridx(CPUARMState *env)
2870 {
2871 switch (arm_mmu_idx(env)) {
2872 case ARMMMUIdx_E20_0:
2873 case ARMMMUIdx_E20_2:
2874 case ARMMMUIdx_E20_2_PAN:
2875 return GTIMER_HYPVIRT;
2876 default:
2877 return GTIMER_VIRT;
2878 }
2879 }
2880
2881 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2882 const ARMCPRegInfo *ri)
2883 {
2884 int timeridx = gt_phys_redir_timeridx(env);
2885 return env->cp15.c14_timer[timeridx].cval;
2886 }
2887
2888 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2889 uint64_t value)
2890 {
2891 int timeridx = gt_phys_redir_timeridx(env);
2892 gt_cval_write(env, ri, timeridx, value);
2893 }
2894
2895 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2896 const ARMCPRegInfo *ri)
2897 {
2898 int timeridx = gt_phys_redir_timeridx(env);
2899 return gt_tval_read(env, ri, timeridx);
2900 }
2901
2902 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2903 uint64_t value)
2904 {
2905 int timeridx = gt_phys_redir_timeridx(env);
2906 gt_tval_write(env, ri, timeridx, value);
2907 }
2908
2909 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2910 const ARMCPRegInfo *ri)
2911 {
2912 int timeridx = gt_phys_redir_timeridx(env);
2913 return env->cp15.c14_timer[timeridx].ctl;
2914 }
2915
2916 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2917 uint64_t value)
2918 {
2919 int timeridx = gt_phys_redir_timeridx(env);
2920 gt_ctl_write(env, ri, timeridx, value);
2921 }
2922
2923 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2924 {
2925 gt_timer_reset(env, ri, GTIMER_VIRT);
2926 }
2927
2928 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2929 uint64_t value)
2930 {
2931 gt_cval_write(env, ri, GTIMER_VIRT, value);
2932 }
2933
2934 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2935 {
2936 return gt_tval_read(env, ri, GTIMER_VIRT);
2937 }
2938
2939 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2940 uint64_t value)
2941 {
2942 gt_tval_write(env, ri, GTIMER_VIRT, value);
2943 }
2944
2945 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2946 uint64_t value)
2947 {
2948 gt_ctl_write(env, ri, GTIMER_VIRT, value);
2949 }
2950
2951 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2952 uint64_t value)
2953 {
2954 ARMCPU *cpu = env_archcpu(env);
2955
2956 trace_arm_gt_cntvoff_write(value);
2957 raw_write(env, ri, value);
2958 gt_recalc_timer(cpu, GTIMER_VIRT);
2959 }
2960
2961 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
2962 const ARMCPRegInfo *ri)
2963 {
2964 int timeridx = gt_virt_redir_timeridx(env);
2965 return env->cp15.c14_timer[timeridx].cval;
2966 }
2967
2968 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2969 uint64_t value)
2970 {
2971 int timeridx = gt_virt_redir_timeridx(env);
2972 gt_cval_write(env, ri, timeridx, value);
2973 }
2974
2975 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
2976 const ARMCPRegInfo *ri)
2977 {
2978 int timeridx = gt_virt_redir_timeridx(env);
2979 return gt_tval_read(env, ri, timeridx);
2980 }
2981
2982 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2983 uint64_t value)
2984 {
2985 int timeridx = gt_virt_redir_timeridx(env);
2986 gt_tval_write(env, ri, timeridx, value);
2987 }
2988
2989 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
2990 const ARMCPRegInfo *ri)
2991 {
2992 int timeridx = gt_virt_redir_timeridx(env);
2993 return env->cp15.c14_timer[timeridx].ctl;
2994 }
2995
2996 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2997 uint64_t value)
2998 {
2999 int timeridx = gt_virt_redir_timeridx(env);
3000 gt_ctl_write(env, ri, timeridx, value);
3001 }
3002
3003 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3004 {
3005 gt_timer_reset(env, ri, GTIMER_HYP);
3006 }
3007
3008 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3009 uint64_t value)
3010 {
3011 gt_cval_write(env, ri, GTIMER_HYP, value);
3012 }
3013
3014 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3015 {
3016 return gt_tval_read(env, ri, GTIMER_HYP);
3017 }
3018
3019 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3020 uint64_t value)
3021 {
3022 gt_tval_write(env, ri, GTIMER_HYP, value);
3023 }
3024
3025 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3026 uint64_t value)
3027 {
3028 gt_ctl_write(env, ri, GTIMER_HYP, value);
3029 }
3030
3031 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3032 {
3033 gt_timer_reset(env, ri, GTIMER_SEC);
3034 }
3035
3036 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3037 uint64_t value)
3038 {
3039 gt_cval_write(env, ri, GTIMER_SEC, value);
3040 }
3041
3042 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3043 {
3044 return gt_tval_read(env, ri, GTIMER_SEC);
3045 }
3046
3047 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3048 uint64_t value)
3049 {
3050 gt_tval_write(env, ri, GTIMER_SEC, value);
3051 }
3052
3053 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3054 uint64_t value)
3055 {
3056 gt_ctl_write(env, ri, GTIMER_SEC, value);
3057 }
3058
3059 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3060 {
3061 gt_timer_reset(env, ri, GTIMER_HYPVIRT);
3062 }
3063
3064 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3065 uint64_t value)
3066 {
3067 gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
3068 }
3069
3070 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3071 {
3072 return gt_tval_read(env, ri, GTIMER_HYPVIRT);
3073 }
3074
3075 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3076 uint64_t value)
3077 {
3078 gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
3079 }
3080
3081 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3082 uint64_t value)
3083 {
3084 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
3085 }
3086
3087 void arm_gt_ptimer_cb(void *opaque)
3088 {
3089 ARMCPU *cpu = opaque;
3090
3091 gt_recalc_timer(cpu, GTIMER_PHYS);
3092 }
3093
3094 void arm_gt_vtimer_cb(void *opaque)
3095 {
3096 ARMCPU *cpu = opaque;
3097
3098 gt_recalc_timer(cpu, GTIMER_VIRT);
3099 }
3100
3101 void arm_gt_htimer_cb(void *opaque)
3102 {
3103 ARMCPU *cpu = opaque;
3104
3105 gt_recalc_timer(cpu, GTIMER_HYP);
3106 }
3107
3108 void arm_gt_stimer_cb(void *opaque)
3109 {
3110 ARMCPU *cpu = opaque;
3111
3112 gt_recalc_timer(cpu, GTIMER_SEC);
3113 }
3114
3115 void arm_gt_hvtimer_cb(void *opaque)
3116 {
3117 ARMCPU *cpu = opaque;
3118
3119 gt_recalc_timer(cpu, GTIMER_HYPVIRT);
3120 }
3121
3122 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
3123 {
3124 ARMCPU *cpu = env_archcpu(env);
3125
3126 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
3127 }
3128
3129 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3130 /* Note that CNTFRQ is purely reads-as-written for the benefit
3131 * of software; writing it doesn't actually change the timer frequency.
3132 * Our reset value matches the fixed frequency we implement the timer at.
3133 */
3134 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
3135 .type = ARM_CP_ALIAS,
3136 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3137 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
3138 },
3139 { .name = "CNTFRQ_EL0"