meson: target
[qemu.git] / target / arm / helper.c
1 /*
2 * ARM generic helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
12 #include "trace.h"
13 #include "cpu.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
24 #include "hw/irq.h"
25 #include "hw/semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/tcg.h"
29 #include "qemu/range.h"
30 #include "qapi/qapi-commands-machine-target.h"
31 #include "qapi/error.h"
32 #include "qemu/guest-random.h"
33 #ifdef CONFIG_TCG
34 #include "arm_ldst.h"
35 #include "exec/cpu_ldst.h"
36 #endif
37
38 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
39
40 #ifndef CONFIG_USER_ONLY
41
42 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
43 MMUAccessType access_type, ARMMMUIdx mmu_idx,
44 bool s1_is_el0,
45 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
46 target_ulong *page_size_ptr,
47 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
48 __attribute__((nonnull));
49 #endif
50
51 static void switch_mode(CPUARMState *env, int mode);
52
53 static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
54 {
55 ARMCPU *cpu = env_archcpu(env);
56 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
57
58 /* VFP data registers are always little-endian. */
59 if (reg < nregs) {
60 return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg));
61 }
62 if (arm_feature(env, ARM_FEATURE_NEON)) {
63 /* Aliases for Q regs. */
64 nregs += 16;
65 if (reg < nregs) {
66 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
67 return gdb_get_reg128(buf, q[0], q[1]);
68 }
69 }
70 switch (reg - nregs) {
71 case 0: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); break;
72 case 1: return gdb_get_reg32(buf, vfp_get_fpscr(env)); break;
73 case 2: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]); break;
74 }
75 return 0;
76 }
77
78 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
79 {
80 ARMCPU *cpu = env_archcpu(env);
81 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
82
83 if (reg < nregs) {
84 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
85 return 8;
86 }
87 if (arm_feature(env, ARM_FEATURE_NEON)) {
88 nregs += 16;
89 if (reg < nregs) {
90 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
91 q[0] = ldq_le_p(buf);
92 q[1] = ldq_le_p(buf + 8);
93 return 16;
94 }
95 }
96 switch (reg - nregs) {
97 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
98 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
99 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
100 }
101 return 0;
102 }
103
104 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
105 {
106 switch (reg) {
107 case 0 ... 31:
108 {
109 /* 128 bit FP register - quads are in LE order */
110 uint64_t *q = aa64_vfp_qreg(env, reg);
111 return gdb_get_reg128(buf, q[1], q[0]);
112 }
113 case 32:
114 /* FPSR */
115 return gdb_get_reg32(buf, vfp_get_fpsr(env));
116 case 33:
117 /* FPCR */
118 return gdb_get_reg32(buf,vfp_get_fpcr(env));
119 default:
120 return 0;
121 }
122 }
123
124 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
125 {
126 switch (reg) {
127 case 0 ... 31:
128 /* 128 bit FP register */
129 {
130 uint64_t *q = aa64_vfp_qreg(env, reg);
131 q[0] = ldq_le_p(buf);
132 q[1] = ldq_le_p(buf + 8);
133 return 16;
134 }
135 case 32:
136 /* FPSR */
137 vfp_set_fpsr(env, ldl_p(buf));
138 return 4;
139 case 33:
140 /* FPCR */
141 vfp_set_fpcr(env, ldl_p(buf));
142 return 4;
143 default:
144 return 0;
145 }
146 }
147
148 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
149 {
150 assert(ri->fieldoffset);
151 if (cpreg_field_is_64bit(ri)) {
152 return CPREG_FIELD64(env, ri);
153 } else {
154 return CPREG_FIELD32(env, ri);
155 }
156 }
157
158 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
159 uint64_t value)
160 {
161 assert(ri->fieldoffset);
162 if (cpreg_field_is_64bit(ri)) {
163 CPREG_FIELD64(env, ri) = value;
164 } else {
165 CPREG_FIELD32(env, ri) = value;
166 }
167 }
168
169 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
170 {
171 return (char *)env + ri->fieldoffset;
172 }
173
174 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
175 {
176 /* Raw read of a coprocessor register (as needed for migration, etc). */
177 if (ri->type & ARM_CP_CONST) {
178 return ri->resetvalue;
179 } else if (ri->raw_readfn) {
180 return ri->raw_readfn(env, ri);
181 } else if (ri->readfn) {
182 return ri->readfn(env, ri);
183 } else {
184 return raw_read(env, ri);
185 }
186 }
187
188 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
189 uint64_t v)
190 {
191 /* Raw write of a coprocessor register (as needed for migration, etc).
192 * Note that constant registers are treated as write-ignored; the
193 * caller should check for success by whether a readback gives the
194 * value written.
195 */
196 if (ri->type & ARM_CP_CONST) {
197 return;
198 } else if (ri->raw_writefn) {
199 ri->raw_writefn(env, ri, v);
200 } else if (ri->writefn) {
201 ri->writefn(env, ri, v);
202 } else {
203 raw_write(env, ri, v);
204 }
205 }
206
207 /**
208 * arm_get/set_gdb_*: get/set a gdb register
209 * @env: the CPU state
210 * @buf: a buffer to copy to/from
211 * @reg: register number (offset from start of group)
212 *
213 * We return the number of bytes copied
214 */
215
216 static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
217 {
218 ARMCPU *cpu = env_archcpu(env);
219 const ARMCPRegInfo *ri;
220 uint32_t key;
221
222 key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg];
223 ri = get_arm_cp_reginfo(cpu->cp_regs, key);
224 if (ri) {
225 if (cpreg_field_is_64bit(ri)) {
226 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
227 } else {
228 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
229 }
230 }
231 return 0;
232 }
233
234 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
235 {
236 return 0;
237 }
238
239 #ifdef TARGET_AARCH64
240 static int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg)
241 {
242 ARMCPU *cpu = env_archcpu(env);
243
244 switch (reg) {
245 /* The first 32 registers are the zregs */
246 case 0 ... 31:
247 {
248 int vq, len = 0;
249 for (vq = 0; vq < cpu->sve_max_vq; vq++) {
250 len += gdb_get_reg128(buf,
251 env->vfp.zregs[reg].d[vq * 2 + 1],
252 env->vfp.zregs[reg].d[vq * 2]);
253 }
254 return len;
255 }
256 case 32:
257 return gdb_get_reg32(buf, vfp_get_fpsr(env));
258 case 33:
259 return gdb_get_reg32(buf, vfp_get_fpcr(env));
260 /* then 16 predicates and the ffr */
261 case 34 ... 50:
262 {
263 int preg = reg - 34;
264 int vq, len = 0;
265 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
266 len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]);
267 }
268 return len;
269 }
270 case 51:
271 {
272 /*
273 * We report in Vector Granules (VG) which is 64bit in a Z reg
274 * while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
275 */
276 int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1;
277 return gdb_get_reg32(buf, vq * 2);
278 }
279 default:
280 /* gdbstub asked for something out our range */
281 qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg);
282 break;
283 }
284
285 return 0;
286 }
287
288 static int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg)
289 {
290 ARMCPU *cpu = env_archcpu(env);
291
292 /* The first 32 registers are the zregs */
293 switch (reg) {
294 /* The first 32 registers are the zregs */
295 case 0 ... 31:
296 {
297 int vq, len = 0;
298 uint64_t *p = (uint64_t *) buf;
299 for (vq = 0; vq < cpu->sve_max_vq; vq++) {
300 env->vfp.zregs[reg].d[vq * 2 + 1] = *p++;
301 env->vfp.zregs[reg].d[vq * 2] = *p++;
302 len += 16;
303 }
304 return len;
305 }
306 case 32:
307 vfp_set_fpsr(env, *(uint32_t *)buf);
308 return 4;
309 case 33:
310 vfp_set_fpcr(env, *(uint32_t *)buf);
311 return 4;
312 case 34 ... 50:
313 {
314 int preg = reg - 34;
315 int vq, len = 0;
316 uint64_t *p = (uint64_t *) buf;
317 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
318 env->vfp.pregs[preg].p[vq / 4] = *p++;
319 len += 8;
320 }
321 return len;
322 }
323 case 51:
324 /* cannot set vg via gdbstub */
325 return 0;
326 default:
327 /* gdbstub asked for something out our range */
328 break;
329 }
330
331 return 0;
332 }
333 #endif /* TARGET_AARCH64 */
334
335 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
336 {
337 /* Return true if the regdef would cause an assertion if you called
338 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
339 * program bug for it not to have the NO_RAW flag).
340 * NB that returning false here doesn't necessarily mean that calling
341 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
342 * read/write access functions which are safe for raw use" from "has
343 * read/write access functions which have side effects but has forgotten
344 * to provide raw access functions".
345 * The tests here line up with the conditions in read/write_raw_cp_reg()
346 * and assertions in raw_read()/raw_write().
347 */
348 if ((ri->type & ARM_CP_CONST) ||
349 ri->fieldoffset ||
350 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
351 return false;
352 }
353 return true;
354 }
355
356 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
357 {
358 /* Write the coprocessor state from cpu->env to the (index,value) list. */
359 int i;
360 bool ok = true;
361
362 for (i = 0; i < cpu->cpreg_array_len; i++) {
363 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
364 const ARMCPRegInfo *ri;
365 uint64_t newval;
366
367 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
368 if (!ri) {
369 ok = false;
370 continue;
371 }
372 if (ri->type & ARM_CP_NO_RAW) {
373 continue;
374 }
375
376 newval = read_raw_cp_reg(&cpu->env, ri);
377 if (kvm_sync) {
378 /*
379 * Only sync if the previous list->cpustate sync succeeded.
380 * Rather than tracking the success/failure state for every
381 * item in the list, we just recheck "does the raw write we must
382 * have made in write_list_to_cpustate() read back OK" here.
383 */
384 uint64_t oldval = cpu->cpreg_values[i];
385
386 if (oldval == newval) {
387 continue;
388 }
389
390 write_raw_cp_reg(&cpu->env, ri, oldval);
391 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
392 continue;
393 }
394
395 write_raw_cp_reg(&cpu->env, ri, newval);
396 }
397 cpu->cpreg_values[i] = newval;
398 }
399 return ok;
400 }
401
402 bool write_list_to_cpustate(ARMCPU *cpu)
403 {
404 int i;
405 bool ok = true;
406
407 for (i = 0; i < cpu->cpreg_array_len; i++) {
408 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
409 uint64_t v = cpu->cpreg_values[i];
410 const ARMCPRegInfo *ri;
411
412 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
413 if (!ri) {
414 ok = false;
415 continue;
416 }
417 if (ri->type & ARM_CP_NO_RAW) {
418 continue;
419 }
420 /* Write value and confirm it reads back as written
421 * (to catch read-only registers and partially read-only
422 * registers where the incoming migration value doesn't match)
423 */
424 write_raw_cp_reg(&cpu->env, ri, v);
425 if (read_raw_cp_reg(&cpu->env, ri) != v) {
426 ok = false;
427 }
428 }
429 return ok;
430 }
431
432 static void add_cpreg_to_list(gpointer key, gpointer opaque)
433 {
434 ARMCPU *cpu = opaque;
435 uint64_t regidx;
436 const ARMCPRegInfo *ri;
437
438 regidx = *(uint32_t *)key;
439 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
440
441 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
442 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
443 /* The value array need not be initialized at this point */
444 cpu->cpreg_array_len++;
445 }
446 }
447
448 static void count_cpreg(gpointer key, gpointer opaque)
449 {
450 ARMCPU *cpu = opaque;
451 uint64_t regidx;
452 const ARMCPRegInfo *ri;
453
454 regidx = *(uint32_t *)key;
455 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
456
457 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
458 cpu->cpreg_array_len++;
459 }
460 }
461
462 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
463 {
464 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
465 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
466
467 if (aidx > bidx) {
468 return 1;
469 }
470 if (aidx < bidx) {
471 return -1;
472 }
473 return 0;
474 }
475
476 void init_cpreg_list(ARMCPU *cpu)
477 {
478 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
479 * Note that we require cpreg_tuples[] to be sorted by key ID.
480 */
481 GList *keys;
482 int arraylen;
483
484 keys = g_hash_table_get_keys(cpu->cp_regs);
485 keys = g_list_sort(keys, cpreg_key_compare);
486
487 cpu->cpreg_array_len = 0;
488
489 g_list_foreach(keys, count_cpreg, cpu);
490
491 arraylen = cpu->cpreg_array_len;
492 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
493 cpu->cpreg_values = g_new(uint64_t, arraylen);
494 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
495 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
496 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
497 cpu->cpreg_array_len = 0;
498
499 g_list_foreach(keys, add_cpreg_to_list, cpu);
500
501 assert(cpu->cpreg_array_len == arraylen);
502
503 g_list_free(keys);
504 }
505
506 /*
507 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
508 */
509 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
510 const ARMCPRegInfo *ri,
511 bool isread)
512 {
513 if (!is_a64(env) && arm_current_el(env) == 3 &&
514 arm_is_secure_below_el3(env)) {
515 return CP_ACCESS_TRAP_UNCATEGORIZED;
516 }
517 return CP_ACCESS_OK;
518 }
519
520 /* Some secure-only AArch32 registers trap to EL3 if used from
521 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
522 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
523 * We assume that the .access field is set to PL1_RW.
524 */
525 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
526 const ARMCPRegInfo *ri,
527 bool isread)
528 {
529 if (arm_current_el(env) == 3) {
530 return CP_ACCESS_OK;
531 }
532 if (arm_is_secure_below_el3(env)) {
533 return CP_ACCESS_TRAP_EL3;
534 }
535 /* This will be EL1 NS and EL2 NS, which just UNDEF */
536 return CP_ACCESS_TRAP_UNCATEGORIZED;
537 }
538
539 /* Check for traps to "powerdown debug" registers, which are controlled
540 * by MDCR.TDOSA
541 */
542 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
543 bool isread)
544 {
545 int el = arm_current_el(env);
546 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
547 (env->cp15.mdcr_el2 & MDCR_TDE) ||
548 (arm_hcr_el2_eff(env) & HCR_TGE);
549
550 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
551 return CP_ACCESS_TRAP_EL2;
552 }
553 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
554 return CP_ACCESS_TRAP_EL3;
555 }
556 return CP_ACCESS_OK;
557 }
558
559 /* Check for traps to "debug ROM" registers, which are controlled
560 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
561 */
562 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
563 bool isread)
564 {
565 int el = arm_current_el(env);
566 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
567 (env->cp15.mdcr_el2 & MDCR_TDE) ||
568 (arm_hcr_el2_eff(env) & HCR_TGE);
569
570 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
571 return CP_ACCESS_TRAP_EL2;
572 }
573 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
574 return CP_ACCESS_TRAP_EL3;
575 }
576 return CP_ACCESS_OK;
577 }
578
579 /* Check for traps to general debug registers, which are controlled
580 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
581 */
582 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
583 bool isread)
584 {
585 int el = arm_current_el(env);
586 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
587 (env->cp15.mdcr_el2 & MDCR_TDE) ||
588 (arm_hcr_el2_eff(env) & HCR_TGE);
589
590 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
591 return CP_ACCESS_TRAP_EL2;
592 }
593 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
594 return CP_ACCESS_TRAP_EL3;
595 }
596 return CP_ACCESS_OK;
597 }
598
599 /* Check for traps to performance monitor registers, which are controlled
600 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
601 */
602 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
603 bool isread)
604 {
605 int el = arm_current_el(env);
606
607 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
608 && !arm_is_secure_below_el3(env)) {
609 return CP_ACCESS_TRAP_EL2;
610 }
611 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
612 return CP_ACCESS_TRAP_EL3;
613 }
614 return CP_ACCESS_OK;
615 }
616
617 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
618 static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
619 bool isread)
620 {
621 if (arm_current_el(env) == 1) {
622 uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
623 if (arm_hcr_el2_eff(env) & trap) {
624 return CP_ACCESS_TRAP_EL2;
625 }
626 }
627 return CP_ACCESS_OK;
628 }
629
630 /* Check for traps from EL1 due to HCR_EL2.TSW. */
631 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
632 bool isread)
633 {
634 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
635 return CP_ACCESS_TRAP_EL2;
636 }
637 return CP_ACCESS_OK;
638 }
639
640 /* Check for traps from EL1 due to HCR_EL2.TACR. */
641 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
642 bool isread)
643 {
644 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
645 return CP_ACCESS_TRAP_EL2;
646 }
647 return CP_ACCESS_OK;
648 }
649
650 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
651 static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
652 bool isread)
653 {
654 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
655 return CP_ACCESS_TRAP_EL2;
656 }
657 return CP_ACCESS_OK;
658 }
659
660 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
661 {
662 ARMCPU *cpu = env_archcpu(env);
663
664 raw_write(env, ri, value);
665 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
666 }
667
668 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
669 {
670 ARMCPU *cpu = env_archcpu(env);
671
672 if (raw_read(env, ri) != value) {
673 /* Unlike real hardware the qemu TLB uses virtual addresses,
674 * not modified virtual addresses, so this causes a TLB flush.
675 */
676 tlb_flush(CPU(cpu));
677 raw_write(env, ri, value);
678 }
679 }
680
681 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
682 uint64_t value)
683 {
684 ARMCPU *cpu = env_archcpu(env);
685
686 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
687 && !extended_addresses_enabled(env)) {
688 /* For VMSA (when not using the LPAE long descriptor page table
689 * format) this register includes the ASID, so do a TLB flush.
690 * For PMSA it is purely a process ID and no action is needed.
691 */
692 tlb_flush(CPU(cpu));
693 }
694 raw_write(env, ri, value);
695 }
696
697 /* IS variants of TLB operations must affect all cores */
698 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
699 uint64_t value)
700 {
701 CPUState *cs = env_cpu(env);
702
703 tlb_flush_all_cpus_synced(cs);
704 }
705
706 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
707 uint64_t value)
708 {
709 CPUState *cs = env_cpu(env);
710
711 tlb_flush_all_cpus_synced(cs);
712 }
713
714 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
715 uint64_t value)
716 {
717 CPUState *cs = env_cpu(env);
718
719 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
720 }
721
722 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
723 uint64_t value)
724 {
725 CPUState *cs = env_cpu(env);
726
727 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
728 }
729
730 /*
731 * Non-IS variants of TLB operations are upgraded to
732 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
733 * force broadcast of these operations.
734 */
735 static bool tlb_force_broadcast(CPUARMState *env)
736 {
737 return (env->cp15.hcr_el2 & HCR_FB) &&
738 arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
739 }
740
741 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
742 uint64_t value)
743 {
744 /* Invalidate all (TLBIALL) */
745 CPUState *cs = env_cpu(env);
746
747 if (tlb_force_broadcast(env)) {
748 tlb_flush_all_cpus_synced(cs);
749 } else {
750 tlb_flush(cs);
751 }
752 }
753
754 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
755 uint64_t value)
756 {
757 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
758 CPUState *cs = env_cpu(env);
759
760 value &= TARGET_PAGE_MASK;
761 if (tlb_force_broadcast(env)) {
762 tlb_flush_page_all_cpus_synced(cs, value);
763 } else {
764 tlb_flush_page(cs, value);
765 }
766 }
767
768 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
769 uint64_t value)
770 {
771 /* Invalidate by ASID (TLBIASID) */
772 CPUState *cs = env_cpu(env);
773
774 if (tlb_force_broadcast(env)) {
775 tlb_flush_all_cpus_synced(cs);
776 } else {
777 tlb_flush(cs);
778 }
779 }
780
781 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
782 uint64_t value)
783 {
784 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
785 CPUState *cs = env_cpu(env);
786
787 value &= TARGET_PAGE_MASK;
788 if (tlb_force_broadcast(env)) {
789 tlb_flush_page_all_cpus_synced(cs, value);
790 } else {
791 tlb_flush_page(cs, value);
792 }
793 }
794
795 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
796 uint64_t value)
797 {
798 CPUState *cs = env_cpu(env);
799
800 tlb_flush_by_mmuidx(cs,
801 ARMMMUIdxBit_E10_1 |
802 ARMMMUIdxBit_E10_1_PAN |
803 ARMMMUIdxBit_E10_0);
804 }
805
806 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
807 uint64_t value)
808 {
809 CPUState *cs = env_cpu(env);
810
811 tlb_flush_by_mmuidx_all_cpus_synced(cs,
812 ARMMMUIdxBit_E10_1 |
813 ARMMMUIdxBit_E10_1_PAN |
814 ARMMMUIdxBit_E10_0);
815 }
816
817
818 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
819 uint64_t value)
820 {
821 CPUState *cs = env_cpu(env);
822
823 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
824 }
825
826 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
827 uint64_t value)
828 {
829 CPUState *cs = env_cpu(env);
830
831 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
832 }
833
834 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
835 uint64_t value)
836 {
837 CPUState *cs = env_cpu(env);
838 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
839
840 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
841 }
842
843 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
844 uint64_t value)
845 {
846 CPUState *cs = env_cpu(env);
847 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
848
849 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
850 ARMMMUIdxBit_E2);
851 }
852
853 static const ARMCPRegInfo cp_reginfo[] = {
854 /* Define the secure and non-secure FCSE identifier CP registers
855 * separately because there is no secure bank in V8 (no _EL3). This allows
856 * the secure register to be properly reset and migrated. There is also no
857 * v8 EL1 version of the register so the non-secure instance stands alone.
858 */
859 { .name = "FCSEIDR",
860 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
861 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
862 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
863 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
864 { .name = "FCSEIDR_S",
865 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
866 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
867 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
868 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
869 /* Define the secure and non-secure context identifier CP registers
870 * separately because there is no secure bank in V8 (no _EL3). This allows
871 * the secure register to be properly reset and migrated. In the
872 * non-secure case, the 32-bit register will have reset and migration
873 * disabled during registration as it is handled by the 64-bit instance.
874 */
875 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
876 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
877 .access = PL1_RW, .accessfn = access_tvm_trvm,
878 .secure = ARM_CP_SECSTATE_NS,
879 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
880 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
881 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
882 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
883 .access = PL1_RW, .accessfn = access_tvm_trvm,
884 .secure = ARM_CP_SECSTATE_S,
885 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
886 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
887 REGINFO_SENTINEL
888 };
889
890 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
891 /* NB: Some of these registers exist in v8 but with more precise
892 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
893 */
894 /* MMU Domain access control / MPU write buffer control */
895 { .name = "DACR",
896 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
897 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
898 .writefn = dacr_write, .raw_writefn = raw_write,
899 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
900 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
901 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
902 * For v6 and v5, these mappings are overly broad.
903 */
904 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
905 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
906 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
907 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
908 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
909 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
910 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
911 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
912 /* Cache maintenance ops; some of this space may be overridden later. */
913 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
914 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
915 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
916 REGINFO_SENTINEL
917 };
918
919 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
920 /* Not all pre-v6 cores implemented this WFI, so this is slightly
921 * over-broad.
922 */
923 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
924 .access = PL1_W, .type = ARM_CP_WFI },
925 REGINFO_SENTINEL
926 };
927
928 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
929 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
930 * is UNPREDICTABLE; we choose to NOP as most implementations do).
931 */
932 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
933 .access = PL1_W, .type = ARM_CP_WFI },
934 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
935 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
936 * OMAPCP will override this space.
937 */
938 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
939 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
940 .resetvalue = 0 },
941 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
942 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
943 .resetvalue = 0 },
944 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
945 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
946 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
947 .resetvalue = 0 },
948 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
949 * implementing it as RAZ means the "debug architecture version" bits
950 * will read as a reserved value, which should cause Linux to not try
951 * to use the debug hardware.
952 */
953 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
954 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
955 /* MMU TLB control. Note that the wildcarding means we cover not just
956 * the unified TLB ops but also the dside/iside/inner-shareable variants.
957 */
958 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
959 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
960 .type = ARM_CP_NO_RAW },
961 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
962 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
963 .type = ARM_CP_NO_RAW },
964 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
965 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
966 .type = ARM_CP_NO_RAW },
967 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
968 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
969 .type = ARM_CP_NO_RAW },
970 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
971 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
972 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
973 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
974 REGINFO_SENTINEL
975 };
976
977 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
978 uint64_t value)
979 {
980 uint32_t mask = 0;
981
982 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
983 if (!arm_feature(env, ARM_FEATURE_V8)) {
984 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
985 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
986 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
987 */
988 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
989 /* VFP coprocessor: cp10 & cp11 [23:20] */
990 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
991
992 if (!arm_feature(env, ARM_FEATURE_NEON)) {
993 /* ASEDIS [31] bit is RAO/WI */
994 value |= (1 << 31);
995 }
996
997 /* VFPv3 and upwards with NEON implement 32 double precision
998 * registers (D0-D31).
999 */
1000 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
1001 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
1002 value |= (1 << 30);
1003 }
1004 }
1005 value &= mask;
1006 }
1007
1008 /*
1009 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1010 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1011 */
1012 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
1013 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
1014 value &= ~(0xf << 20);
1015 value |= env->cp15.cpacr_el1 & (0xf << 20);
1016 }
1017
1018 env->cp15.cpacr_el1 = value;
1019 }
1020
1021 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1022 {
1023 /*
1024 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1025 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1026 */
1027 uint64_t value = env->cp15.cpacr_el1;
1028
1029 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
1030 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
1031 value &= ~(0xf << 20);
1032 }
1033 return value;
1034 }
1035
1036
1037 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1038 {
1039 /* Call cpacr_write() so that we reset with the correct RAO bits set
1040 * for our CPU features.
1041 */
1042 cpacr_write(env, ri, 0);
1043 }
1044
1045 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1046 bool isread)
1047 {
1048 if (arm_feature(env, ARM_FEATURE_V8)) {
1049 /* Check if CPACR accesses are to be trapped to EL2 */
1050 if (arm_current_el(env) == 1 &&
1051 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
1052 return CP_ACCESS_TRAP_EL2;
1053 /* Check if CPACR accesses are to be trapped to EL3 */
1054 } else if (arm_current_el(env) < 3 &&
1055 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
1056 return CP_ACCESS_TRAP_EL3;
1057 }
1058 }
1059
1060 return CP_ACCESS_OK;
1061 }
1062
1063 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1064 bool isread)
1065 {
1066 /* Check if CPTR accesses are set to trap to EL3 */
1067 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
1068 return CP_ACCESS_TRAP_EL3;
1069 }
1070
1071 return CP_ACCESS_OK;
1072 }
1073
1074 static const ARMCPRegInfo v6_cp_reginfo[] = {
1075 /* prefetch by MVA in v6, NOP in v7 */
1076 { .name = "MVA_prefetch",
1077 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
1078 .access = PL1_W, .type = ARM_CP_NOP },
1079 /* We need to break the TB after ISB to execute self-modifying code
1080 * correctly and also to take any pending interrupts immediately.
1081 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
1082 */
1083 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
1084 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
1085 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
1086 .access = PL0_W, .type = ARM_CP_NOP },
1087 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
1088 .access = PL0_W, .type = ARM_CP_NOP },
1089 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
1090 .access = PL1_RW, .accessfn = access_tvm_trvm,
1091 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
1092 offsetof(CPUARMState, cp15.ifar_ns) },
1093 .resetvalue = 0, },
1094 /* Watchpoint Fault Address Register : should actually only be present
1095 * for 1136, 1176, 11MPCore.
1096 */
1097 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
1098 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
1099 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
1100 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
1101 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
1102 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
1103 REGINFO_SENTINEL
1104 };
1105
1106 /* Definitions for the PMU registers */
1107 #define PMCRN_MASK 0xf800
1108 #define PMCRN_SHIFT 11
1109 #define PMCRLC 0x40
1110 #define PMCRDP 0x20
1111 #define PMCRX 0x10
1112 #define PMCRD 0x8
1113 #define PMCRC 0x4
1114 #define PMCRP 0x2
1115 #define PMCRE 0x1
1116 /*
1117 * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
1118 * which can be written as 1 to trigger behaviour but which stay RAZ).
1119 */
1120 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1121
1122 #define PMXEVTYPER_P 0x80000000
1123 #define PMXEVTYPER_U 0x40000000
1124 #define PMXEVTYPER_NSK 0x20000000
1125 #define PMXEVTYPER_NSU 0x10000000
1126 #define PMXEVTYPER_NSH 0x08000000
1127 #define PMXEVTYPER_M 0x04000000
1128 #define PMXEVTYPER_MT 0x02000000
1129 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1130 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1131 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1132 PMXEVTYPER_M | PMXEVTYPER_MT | \
1133 PMXEVTYPER_EVTCOUNT)
1134
1135 #define PMCCFILTR 0xf8000000
1136 #define PMCCFILTR_M PMXEVTYPER_M
1137 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1138
1139 static inline uint32_t pmu_num_counters(CPUARMState *env)
1140 {
1141 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1142 }
1143
1144 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1145 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1146 {
1147 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1148 }
1149
1150 typedef struct pm_event {
1151 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
1152 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1153 bool (*supported)(CPUARMState *);
1154 /*
1155 * Retrieve the current count of the underlying event. The programmed
1156 * counters hold a difference from the return value from this function
1157 */
1158 uint64_t (*get_count)(CPUARMState *);
1159 /*
1160 * Return how many nanoseconds it will take (at a minimum) for count events
1161 * to occur. A negative value indicates the counter will never overflow, or
1162 * that the counter has otherwise arranged for the overflow bit to be set
1163 * and the PMU interrupt to be raised on overflow.
1164 */
1165 int64_t (*ns_per_count)(uint64_t);
1166 } pm_event;
1167
1168 static bool event_always_supported(CPUARMState *env)
1169 {
1170 return true;
1171 }
1172
1173 static uint64_t swinc_get_count(CPUARMState *env)
1174 {
1175 /*
1176 * SW_INCR events are written directly to the pmevcntr's by writes to
1177 * PMSWINC, so there is no underlying count maintained by the PMU itself
1178 */
1179 return 0;
1180 }
1181
1182 static int64_t swinc_ns_per(uint64_t ignored)
1183 {
1184 return -1;
1185 }
1186
1187 /*
1188 * Return the underlying cycle count for the PMU cycle counters. If we're in
1189 * usermode, simply return 0.
1190 */
1191 static uint64_t cycles_get_count(CPUARMState *env)
1192 {
1193 #ifndef CONFIG_USER_ONLY
1194 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1195 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1196 #else
1197 return cpu_get_host_ticks();
1198 #endif
1199 }
1200
1201 #ifndef CONFIG_USER_ONLY
1202 static int64_t cycles_ns_per(uint64_t cycles)
1203 {
1204 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
1205 }
1206
1207 static bool instructions_supported(CPUARMState *env)
1208 {
1209 return use_icount == 1 /* Precise instruction counting */;
1210 }
1211
1212 static uint64_t instructions_get_count(CPUARMState *env)
1213 {
1214 return (uint64_t)cpu_get_icount_raw();
1215 }
1216
1217 static int64_t instructions_ns_per(uint64_t icount)
1218 {
1219 return cpu_icount_to_ns((int64_t)icount);
1220 }
1221 #endif
1222
1223 static bool pmu_8_1_events_supported(CPUARMState *env)
1224 {
1225 /* For events which are supported in any v8.1 PMU */
1226 return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
1227 }
1228
1229 static bool pmu_8_4_events_supported(CPUARMState *env)
1230 {
1231 /* For events which are supported in any v8.1 PMU */
1232 return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
1233 }
1234
1235 static uint64_t zero_event_get_count(CPUARMState *env)
1236 {
1237 /* For events which on QEMU never fire, so their count is always zero */
1238 return 0;
1239 }
1240
1241 static int64_t zero_event_ns_per(uint64_t cycles)
1242 {
1243 /* An event which never fires can never overflow */
1244 return -1;
1245 }
1246
1247 static const pm_event pm_events[] = {
1248 { .number = 0x000, /* SW_INCR */
1249 .supported = event_always_supported,
1250 .get_count = swinc_get_count,
1251 .ns_per_count = swinc_ns_per,
1252 },
1253 #ifndef CONFIG_USER_ONLY
1254 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1255 .supported = instructions_supported,
1256 .get_count = instructions_get_count,
1257 .ns_per_count = instructions_ns_per,
1258 },
1259 { .number = 0x011, /* CPU_CYCLES, Cycle */
1260 .supported = event_always_supported,
1261 .get_count = cycles_get_count,
1262 .ns_per_count = cycles_ns_per,
1263 },
1264 #endif
1265 { .number = 0x023, /* STALL_FRONTEND */
1266 .supported = pmu_8_1_events_supported,
1267 .get_count = zero_event_get_count,
1268 .ns_per_count = zero_event_ns_per,
1269 },
1270 { .number = 0x024, /* STALL_BACKEND */
1271 .supported = pmu_8_1_events_supported,
1272 .get_count = zero_event_get_count,
1273 .ns_per_count = zero_event_ns_per,
1274 },
1275 { .number = 0x03c, /* STALL */
1276 .supported = pmu_8_4_events_supported,
1277 .get_count = zero_event_get_count,
1278 .ns_per_count = zero_event_ns_per,
1279 },
1280 };
1281
1282 /*
1283 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1284 * events (i.e. the statistical profiling extension), this implementation
1285 * should first be updated to something sparse instead of the current
1286 * supported_event_map[] array.
1287 */
1288 #define MAX_EVENT_ID 0x3c
1289 #define UNSUPPORTED_EVENT UINT16_MAX
1290 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1291
1292 /*
1293 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1294 * of ARM event numbers to indices in our pm_events array.
1295 *
1296 * Note: Events in the 0x40XX range are not currently supported.
1297 */
1298 void pmu_init(ARMCPU *cpu)
1299 {
1300 unsigned int i;
1301
1302 /*
1303 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1304 * events to them
1305 */
1306 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1307 supported_event_map[i] = UNSUPPORTED_EVENT;
1308 }
1309 cpu->pmceid0 = 0;
1310 cpu->pmceid1 = 0;
1311
1312 for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1313 const pm_event *cnt = &pm_events[i];
1314 assert(cnt->number <= MAX_EVENT_ID);
1315 /* We do not currently support events in the 0x40xx range */
1316 assert(cnt->number <= 0x3f);
1317
1318 if (cnt->supported(&cpu->env)) {
1319 supported_event_map[cnt->number] = i;
1320 uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1321 if (cnt->number & 0x20) {
1322 cpu->pmceid1 |= event_mask;
1323 } else {
1324 cpu->pmceid0 |= event_mask;
1325 }
1326 }
1327 }
1328 }
1329
1330 /*
1331 * Check at runtime whether a PMU event is supported for the current machine
1332 */
1333 static bool event_supported(uint16_t number)
1334 {
1335 if (number > MAX_EVENT_ID) {
1336 return false;
1337 }
1338 return supported_event_map[number] != UNSUPPORTED_EVENT;
1339 }
1340
1341 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1342 bool isread)
1343 {
1344 /* Performance monitor registers user accessibility is controlled
1345 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1346 * trapping to EL2 or EL3 for other accesses.
1347 */
1348 int el = arm_current_el(env);
1349
1350 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1351 return CP_ACCESS_TRAP;
1352 }
1353 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1354 && !arm_is_secure_below_el3(env)) {
1355 return CP_ACCESS_TRAP_EL2;
1356 }
1357 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1358 return CP_ACCESS_TRAP_EL3;
1359 }
1360
1361 return CP_ACCESS_OK;
1362 }
1363
1364 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1365 const ARMCPRegInfo *ri,
1366 bool isread)
1367 {
1368 /* ER: event counter read trap control */
1369 if (arm_feature(env, ARM_FEATURE_V8)
1370 && arm_current_el(env) == 0
1371 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1372 && isread) {
1373 return CP_ACCESS_OK;
1374 }
1375
1376 return pmreg_access(env, ri, isread);
1377 }
1378
1379 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1380 const ARMCPRegInfo *ri,
1381 bool isread)
1382 {
1383 /* SW: software increment write trap control */
1384 if (arm_feature(env, ARM_FEATURE_V8)
1385 && arm_current_el(env) == 0
1386 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1387 && !isread) {
1388 return CP_ACCESS_OK;
1389 }
1390
1391 return pmreg_access(env, ri, isread);
1392 }
1393
1394 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1395 const ARMCPRegInfo *ri,
1396 bool isread)
1397 {
1398 /* ER: event counter read trap control */
1399 if (arm_feature(env, ARM_FEATURE_V8)
1400 && arm_current_el(env) == 0
1401 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1402 return CP_ACCESS_OK;
1403 }
1404
1405 return pmreg_access(env, ri, isread);
1406 }
1407
1408 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1409 const ARMCPRegInfo *ri,
1410 bool isread)
1411 {
1412 /* CR: cycle counter read trap control */
1413 if (arm_feature(env, ARM_FEATURE_V8)
1414 && arm_current_el(env) == 0
1415 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1416 && isread) {
1417 return CP_ACCESS_OK;
1418 }
1419
1420 return pmreg_access(env, ri, isread);
1421 }
1422
1423 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1424 * the current EL, security state, and register configuration.
1425 */
1426 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1427 {
1428 uint64_t filter;
1429 bool e, p, u, nsk, nsu, nsh, m;
1430 bool enabled, prohibited, filtered;
1431 bool secure = arm_is_secure(env);
1432 int el = arm_current_el(env);
1433 uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1434
1435 if (!arm_feature(env, ARM_FEATURE_PMU)) {
1436 return false;
1437 }
1438
1439 if (!arm_feature(env, ARM_FEATURE_EL2) ||
1440 (counter < hpmn || counter == 31)) {
1441 e = env->cp15.c9_pmcr & PMCRE;
1442 } else {
1443 e = env->cp15.mdcr_el2 & MDCR_HPME;
1444 }
1445 enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1446
1447 if (!secure) {
1448 if (el == 2 && (counter < hpmn || counter == 31)) {
1449 prohibited = env->cp15.mdcr_el2 & MDCR_HPMD;
1450 } else {
1451 prohibited = false;
1452 }
1453 } else {
1454 prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1455 (env->cp15.mdcr_el3 & MDCR_SPME);
1456 }
1457
1458 if (prohibited && counter == 31) {
1459 prohibited = env->cp15.c9_pmcr & PMCRDP;
1460 }
1461
1462 if (counter == 31) {
1463 filter = env->cp15.pmccfiltr_el0;
1464 } else {
1465 filter = env->cp15.c14_pmevtyper[counter];
1466 }
1467
1468 p = filter & PMXEVTYPER_P;
1469 u = filter & PMXEVTYPER_U;
1470 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1471 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1472 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1473 m = arm_el_is_aa64(env, 1) &&
1474 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1475
1476 if (el == 0) {
1477 filtered = secure ? u : u != nsu;
1478 } else if (el == 1) {
1479 filtered = secure ? p : p != nsk;
1480 } else if (el == 2) {
1481 filtered = !nsh;
1482 } else { /* EL3 */
1483 filtered = m != p;
1484 }
1485
1486 if (counter != 31) {
1487 /*
1488 * If not checking PMCCNTR, ensure the counter is setup to an event we
1489 * support
1490 */
1491 uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1492 if (!event_supported(event)) {
1493 return false;
1494 }
1495 }
1496
1497 return enabled && !prohibited && !filtered;
1498 }
1499
1500 static void pmu_update_irq(CPUARMState *env)
1501 {
1502 ARMCPU *cpu = env_archcpu(env);
1503 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1504 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1505 }
1506
1507 /*
1508 * Ensure c15_ccnt is the guest-visible count so that operations such as
1509 * enabling/disabling the counter or filtering, modifying the count itself,
1510 * etc. can be done logically. This is essentially a no-op if the counter is
1511 * not enabled at the time of the call.
1512 */
1513 static void pmccntr_op_start(CPUARMState *env)
1514 {
1515 uint64_t cycles = cycles_get_count(env);
1516
1517 if (pmu_counter_enabled(env, 31)) {
1518 uint64_t eff_cycles = cycles;
1519 if (env->cp15.c9_pmcr & PMCRD) {
1520 /* Increment once every 64 processor clock cycles */
1521 eff_cycles /= 64;
1522 }
1523
1524 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1525
1526 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1527 1ull << 63 : 1ull << 31;
1528 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1529 env->cp15.c9_pmovsr |= (1 << 31);
1530 pmu_update_irq(env);
1531 }
1532
1533 env->cp15.c15_ccnt = new_pmccntr;
1534 }
1535 env->cp15.c15_ccnt_delta = cycles;
1536 }
1537
1538 /*
1539 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1540 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1541 * pmccntr_op_start.
1542 */
1543 static void pmccntr_op_finish(CPUARMState *env)
1544 {
1545 if (pmu_counter_enabled(env, 31)) {
1546 #ifndef CONFIG_USER_ONLY
1547 /* Calculate when the counter will next overflow */
1548 uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1549 if (!(env->cp15.c9_pmcr & PMCRLC)) {
1550 remaining_cycles = (uint32_t)remaining_cycles;
1551 }
1552 int64_t overflow_in = cycles_ns_per(remaining_cycles);
1553
1554 if (overflow_in > 0) {
1555 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1556 overflow_in;
1557 ARMCPU *cpu = env_archcpu(env);
1558 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1559 }
1560 #endif
1561
1562 uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1563 if (env->cp15.c9_pmcr & PMCRD) {
1564 /* Increment once every 64 processor clock cycles */
1565 prev_cycles /= 64;
1566 }
1567 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1568 }
1569 }
1570
1571 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1572 {
1573
1574 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1575 uint64_t count = 0;
1576 if (event_supported(event)) {
1577 uint16_t event_idx = supported_event_map[event];
1578 count = pm_events[event_idx].get_count(env);
1579 }
1580
1581 if (pmu_counter_enabled(env, counter)) {
1582 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1583
1584 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1585 env->cp15.c9_pmovsr |= (1 << counter);
1586 pmu_update_irq(env);
1587 }
1588 env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1589 }
1590 env->cp15.c14_pmevcntr_delta[counter] = count;
1591 }
1592
1593 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1594 {
1595 if (pmu_counter_enabled(env, counter)) {
1596 #ifndef CONFIG_USER_ONLY
1597 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1598 uint16_t event_idx = supported_event_map[event];
1599 uint64_t delta = UINT32_MAX -
1600 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1601 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1602
1603 if (overflow_in > 0) {
1604 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1605 overflow_in;
1606 ARMCPU *cpu = env_archcpu(env);
1607 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1608 }
1609 #endif
1610
1611 env->cp15.c14_pmevcntr_delta[counter] -=
1612 env->cp15.c14_pmevcntr[counter];
1613 }
1614 }
1615
1616 void pmu_op_start(CPUARMState *env)
1617 {
1618 unsigned int i;
1619 pmccntr_op_start(env);
1620 for (i = 0; i < pmu_num_counters(env); i++) {
1621 pmevcntr_op_start(env, i);
1622 }
1623 }
1624
1625 void pmu_op_finish(CPUARMState *env)
1626 {
1627 unsigned int i;
1628 pmccntr_op_finish(env);
1629 for (i = 0; i < pmu_num_counters(env); i++) {
1630 pmevcntr_op_finish(env, i);
1631 }
1632 }
1633
1634 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1635 {
1636 pmu_op_start(&cpu->env);
1637 }
1638
1639 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1640 {
1641 pmu_op_finish(&cpu->env);
1642 }
1643
1644 void arm_pmu_timer_cb(void *opaque)
1645 {
1646 ARMCPU *cpu = opaque;
1647
1648 /*
1649 * Update all the counter values based on the current underlying counts,
1650 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1651 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1652 * counter may expire.
1653 */
1654 pmu_op_start(&cpu->env);
1655 pmu_op_finish(&cpu->env);
1656 }
1657
1658 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1659 uint64_t value)
1660 {
1661 pmu_op_start(env);
1662
1663 if (value & PMCRC) {
1664 /* The counter has been reset */
1665 env->cp15.c15_ccnt = 0;
1666 }
1667
1668 if (value & PMCRP) {
1669 unsigned int i;
1670 for (i = 0; i < pmu_num_counters(env); i++) {
1671 env->cp15.c14_pmevcntr[i] = 0;
1672 }
1673 }
1674
1675 env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
1676 env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK);
1677
1678 pmu_op_finish(env);
1679 }
1680
1681 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1682 uint64_t value)
1683 {
1684 unsigned int i;
1685 for (i = 0; i < pmu_num_counters(env); i++) {
1686 /* Increment a counter's count iff: */
1687 if ((value & (1 << i)) && /* counter's bit is set */
1688 /* counter is enabled and not filtered */
1689 pmu_counter_enabled(env, i) &&
1690 /* counter is SW_INCR */
1691 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1692 pmevcntr_op_start(env, i);
1693
1694 /*
1695 * Detect if this write causes an overflow since we can't predict
1696 * PMSWINC overflows like we can for other events
1697 */
1698 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1699
1700 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1701 env->cp15.c9_pmovsr |= (1 << i);
1702 pmu_update_irq(env);
1703 }
1704
1705 env->cp15.c14_pmevcntr[i] = new_pmswinc;
1706
1707 pmevcntr_op_finish(env, i);
1708 }
1709 }
1710 }
1711
1712 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1713 {
1714 uint64_t ret;
1715 pmccntr_op_start(env);
1716 ret = env->cp15.c15_ccnt;
1717 pmccntr_op_finish(env);
1718 return ret;
1719 }
1720
1721 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1722 uint64_t value)
1723 {
1724 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1725 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1726 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1727 * accessed.
1728 */
1729 env->cp15.c9_pmselr = value & 0x1f;
1730 }
1731
1732 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1733 uint64_t value)
1734 {
1735 pmccntr_op_start(env);
1736 env->cp15.c15_ccnt = value;
1737 pmccntr_op_finish(env);
1738 }
1739
1740 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1741 uint64_t value)
1742 {
1743 uint64_t cur_val = pmccntr_read(env, NULL);
1744
1745 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1746 }
1747
1748 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1749 uint64_t value)
1750 {
1751 pmccntr_op_start(env);
1752 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1753 pmccntr_op_finish(env);
1754 }
1755
1756 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1757 uint64_t value)
1758 {
1759 pmccntr_op_start(env);
1760 /* M is not accessible from AArch32 */
1761 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1762 (value & PMCCFILTR);
1763 pmccntr_op_finish(env);
1764 }
1765
1766 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1767 {
1768 /* M is not visible in AArch32 */
1769 return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1770 }
1771
1772 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1773 uint64_t value)
1774 {
1775 value &= pmu_counter_mask(env);
1776 env->cp15.c9_pmcnten |= value;
1777 }
1778
1779 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1780 uint64_t value)
1781 {
1782 value &= pmu_counter_mask(env);
1783 env->cp15.c9_pmcnten &= ~value;
1784 }
1785
1786 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1787 uint64_t value)
1788 {
1789 value &= pmu_counter_mask(env);
1790 env->cp15.c9_pmovsr &= ~value;
1791 pmu_update_irq(env);
1792 }
1793
1794 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1795 uint64_t value)
1796 {
1797 value &= pmu_counter_mask(env);
1798 env->cp15.c9_pmovsr |= value;
1799 pmu_update_irq(env);
1800 }
1801
1802 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1803 uint64_t value, const uint8_t counter)
1804 {
1805 if (counter == 31) {
1806 pmccfiltr_write(env, ri, value);
1807 } else if (counter < pmu_num_counters(env)) {
1808 pmevcntr_op_start(env, counter);
1809
1810 /*
1811 * If this counter's event type is changing, store the current
1812 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1813 * pmevcntr_op_finish has the correct baseline when it converts back to
1814 * a delta.
1815 */
1816 uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1817 PMXEVTYPER_EVTCOUNT;
1818 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1819 if (old_event != new_event) {
1820 uint64_t count = 0;
1821 if (event_supported(new_event)) {
1822 uint16_t event_idx = supported_event_map[new_event];
1823 count = pm_events[event_idx].get_count(env);
1824 }
1825 env->cp15.c14_pmevcntr_delta[counter] = count;
1826 }
1827
1828 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1829 pmevcntr_op_finish(env, counter);
1830 }
1831 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1832 * PMSELR value is equal to or greater than the number of implemented
1833 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1834 */
1835 }
1836
1837 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1838 const uint8_t counter)
1839 {
1840 if (counter == 31) {
1841 return env->cp15.pmccfiltr_el0;
1842 } else if (counter < pmu_num_counters(env)) {
1843 return env->cp15.c14_pmevtyper[counter];
1844 } else {
1845 /*
1846 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1847 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1848 */
1849 return 0;
1850 }
1851 }
1852
1853 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1854 uint64_t value)
1855 {
1856 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1857 pmevtyper_write(env, ri, value, counter);
1858 }
1859
1860 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1861 uint64_t value)
1862 {
1863 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1864 env->cp15.c14_pmevtyper[counter] = value;
1865
1866 /*
1867 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1868 * pmu_op_finish calls when loading saved state for a migration. Because
1869 * we're potentially updating the type of event here, the value written to
1870 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1871 * different counter type. Therefore, we need to set this value to the
1872 * current count for the counter type we're writing so that pmu_op_finish
1873 * has the correct count for its calculation.
1874 */
1875 uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1876 if (event_supported(event)) {
1877 uint16_t event_idx = supported_event_map[event];
1878 env->cp15.c14_pmevcntr_delta[counter] =
1879 pm_events[event_idx].get_count(env);
1880 }
1881 }
1882
1883 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1884 {
1885 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1886 return pmevtyper_read(env, ri, counter);
1887 }
1888
1889 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1890 uint64_t value)
1891 {
1892 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1893 }
1894
1895 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1896 {
1897 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1898 }
1899
1900 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1901 uint64_t value, uint8_t counter)
1902 {
1903 if (counter < pmu_num_counters(env)) {
1904 pmevcntr_op_start(env, counter);
1905 env->cp15.c14_pmevcntr[counter] = value;
1906 pmevcntr_op_finish(env, counter);
1907 }
1908 /*
1909 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1910 * are CONSTRAINED UNPREDICTABLE.
1911 */
1912 }
1913
1914 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1915 uint8_t counter)
1916 {
1917 if (counter < pmu_num_counters(env)) {
1918 uint64_t ret;
1919 pmevcntr_op_start(env, counter);
1920 ret = env->cp15.c14_pmevcntr[counter];
1921 pmevcntr_op_finish(env, counter);
1922 return ret;
1923 } else {
1924 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1925 * are CONSTRAINED UNPREDICTABLE. */
1926 return 0;
1927 }
1928 }
1929
1930 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1931 uint64_t value)
1932 {
1933 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1934 pmevcntr_write(env, ri, value, counter);
1935 }
1936
1937 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1938 {
1939 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1940 return pmevcntr_read(env, ri, counter);
1941 }
1942
1943 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1944 uint64_t value)
1945 {
1946 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1947 assert(counter < pmu_num_counters(env));
1948 env->cp15.c14_pmevcntr[counter] = value;
1949 pmevcntr_write(env, ri, value, counter);
1950 }
1951
1952 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1953 {
1954 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1955 assert(counter < pmu_num_counters(env));
1956 return env->cp15.c14_pmevcntr[counter];
1957 }
1958
1959 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1960 uint64_t value)
1961 {
1962 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1963 }
1964
1965 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1966 {
1967 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1968 }
1969
1970 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1971 uint64_t value)
1972 {
1973 if (arm_feature(env, ARM_FEATURE_V8)) {
1974 env->cp15.c9_pmuserenr = value & 0xf;
1975 } else {
1976 env->cp15.c9_pmuserenr = value & 1;
1977 }
1978 }
1979
1980 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1981 uint64_t value)
1982 {
1983 /* We have no event counters so only the C bit can be changed */
1984 value &= pmu_counter_mask(env);
1985 env->cp15.c9_pminten |= value;
1986 pmu_update_irq(env);
1987 }
1988
1989 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1990 uint64_t value)
1991 {
1992 value &= pmu_counter_mask(env);
1993 env->cp15.c9_pminten &= ~value;
1994 pmu_update_irq(env);
1995 }
1996
1997 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1998 uint64_t value)
1999 {
2000 /* Note that even though the AArch64 view of this register has bits
2001 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
2002 * architectural requirements for bits which are RES0 only in some
2003 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
2004 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
2005 */
2006 raw_write(env, ri, value & ~0x1FULL);
2007 }
2008
2009 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2010 {
2011 /* Begin with base v8.0 state. */
2012 uint32_t valid_mask = 0x3fff;
2013 ARMCPU *cpu = env_archcpu(env);
2014
2015 if (ri->state == ARM_CP_STATE_AA64) {
2016 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
2017 valid_mask &= ~SCR_NET;
2018
2019 if (cpu_isar_feature(aa64_lor, cpu)) {
2020 valid_mask |= SCR_TLOR;
2021 }
2022 if (cpu_isar_feature(aa64_pauth, cpu)) {
2023 valid_mask |= SCR_API | SCR_APK;
2024 }
2025 if (cpu_isar_feature(aa64_mte, cpu)) {
2026 valid_mask |= SCR_ATA;
2027 }
2028 } else {
2029 valid_mask &= ~(SCR_RW | SCR_ST);
2030 }
2031
2032 if (!arm_feature(env, ARM_FEATURE_EL2)) {
2033 valid_mask &= ~SCR_HCE;
2034
2035 /* On ARMv7, SMD (or SCD as it is called in v7) is only
2036 * supported if EL2 exists. The bit is UNK/SBZP when
2037 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
2038 * when EL2 is unavailable.
2039 * On ARMv8, this bit is always available.
2040 */
2041 if (arm_feature(env, ARM_FEATURE_V7) &&
2042 !arm_feature(env, ARM_FEATURE_V8)) {
2043 valid_mask &= ~SCR_SMD;
2044 }
2045 }
2046
2047 /* Clear all-context RES0 bits. */
2048 value &= valid_mask;
2049 raw_write(env, ri, value);
2050 }
2051
2052 static CPAccessResult access_aa64_tid2(CPUARMState *env,
2053 const ARMCPRegInfo *ri,
2054 bool isread)
2055 {
2056 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
2057 return CP_ACCESS_TRAP_EL2;
2058 }
2059
2060 return CP_ACCESS_OK;
2061 }
2062
2063 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2064 {
2065 ARMCPU *cpu = env_archcpu(env);
2066
2067 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
2068 * bank
2069 */
2070 uint32_t index = A32_BANKED_REG_GET(env, csselr,
2071 ri->secure & ARM_CP_SECSTATE_S);
2072
2073 return cpu->ccsidr[index];
2074 }
2075
2076 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2077 uint64_t value)
2078 {
2079 raw_write(env, ri, value & 0xf);
2080 }
2081
2082 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2083 {
2084 CPUState *cs = env_cpu(env);
2085 uint64_t hcr_el2 = arm_hcr_el2_eff(env);
2086 uint64_t ret = 0;
2087 bool allow_virt = (arm_current_el(env) == 1 &&
2088 (!arm_is_secure_below_el3(env) ||
2089 (env->cp15.scr_el3 & SCR_EEL2)));
2090
2091 if (allow_virt && (hcr_el2 & HCR_IMO)) {
2092 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
2093 ret |= CPSR_I;
2094 }
2095 } else {
2096 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
2097 ret |= CPSR_I;
2098 }
2099 }
2100
2101 if (allow_virt && (hcr_el2 & HCR_FMO)) {
2102 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
2103 ret |= CPSR_F;
2104 }
2105 } else {
2106 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
2107 ret |= CPSR_F;
2108 }
2109 }
2110
2111 /* External aborts are not possible in QEMU so A bit is always clear */
2112 return ret;
2113 }
2114
2115 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2116 bool isread)
2117 {
2118 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
2119 return CP_ACCESS_TRAP_EL2;
2120 }
2121
2122 return CP_ACCESS_OK;
2123 }
2124
2125 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2126 bool isread)
2127 {
2128 if (arm_feature(env, ARM_FEATURE_V8)) {
2129 return access_aa64_tid1(env, ri, isread);
2130 }
2131
2132 return CP_ACCESS_OK;
2133 }
2134
2135 static const ARMCPRegInfo v7_cp_reginfo[] = {
2136 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2137 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
2138 .access = PL1_W, .type = ARM_CP_NOP },
2139 /* Performance monitors are implementation defined in v7,
2140 * but with an ARM recommended set of registers, which we
2141 * follow.
2142 *
2143 * Performance registers fall into three categories:
2144 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2145 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2146 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2147 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2148 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2149 */
2150 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
2151 .access = PL0_RW, .type = ARM_CP_ALIAS,
2152 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2153 .writefn = pmcntenset_write,
2154 .accessfn = pmreg_access,
2155 .raw_writefn = raw_write },
2156 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
2157 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
2158 .access = PL0_RW, .accessfn = pmreg_access,
2159 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
2160 .writefn = pmcntenset_write, .raw_writefn = raw_write },
2161 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
2162 .access = PL0_RW,
2163 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2164 .accessfn = pmreg_access,
2165 .writefn = pmcntenclr_write,
2166 .type = ARM_CP_ALIAS },
2167 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
2168 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
2169 .access = PL0_RW, .accessfn = pmreg_access,
2170 .type = ARM_CP_ALIAS,
2171 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
2172 .writefn = pmcntenclr_write },
2173 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
2174 .access = PL0_RW, .type = ARM_CP_IO,
2175 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2176 .accessfn = pmreg_access,
2177 .writefn = pmovsr_write,
2178 .raw_writefn = raw_write },
2179 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2180 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2181 .access = PL0_RW, .accessfn = pmreg_access,
2182 .type = ARM_CP_ALIAS | ARM_CP_IO,
2183 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2184 .writefn = pmovsr_write,
2185 .raw_writefn = raw_write },
2186 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2187 .access = PL0_W, .accessfn = pmreg_access_swinc,
2188 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2189 .writefn = pmswinc_write },
2190 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2191 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2192 .access = PL0_W, .accessfn = pmreg_access_swinc,
2193 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2194 .writefn = pmswinc_write },
2195 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2196 .access = PL0_RW, .type = ARM_CP_ALIAS,
2197 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2198 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2199 .raw_writefn = raw_write},
2200 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2201 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2202 .access = PL0_RW, .accessfn = pmreg_access_selr,
2203 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2204 .writefn = pmselr_write, .raw_writefn = raw_write, },
2205 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2206 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2207 .readfn = pmccntr_read, .writefn = pmccntr_write32,
2208 .accessfn = pmreg_access_ccntr },
2209 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2210 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2211 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2212 .type = ARM_CP_IO,
2213 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2214 .readfn = pmccntr_read, .writefn = pmccntr_write,
2215 .raw_readfn = raw_read, .raw_writefn = raw_write, },
2216 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2217 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2218 .access = PL0_RW, .accessfn = pmreg_access,
2219 .type = ARM_CP_ALIAS | ARM_CP_IO,
2220 .resetvalue = 0, },
2221 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2222 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2223 .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2224 .access = PL0_RW, .accessfn = pmreg_access,
2225 .type = ARM_CP_IO,
2226 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2227 .resetvalue = 0, },
2228 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2229 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2230 .accessfn = pmreg_access,
2231 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2232 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2233 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2234 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2235 .accessfn = pmreg_access,
2236 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2237 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2238 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2239 .accessfn = pmreg_access_xevcntr,
2240 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2241 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2242 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2243 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2244 .accessfn = pmreg_access_xevcntr,
2245 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2246 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2247 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2248 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2249 .resetvalue = 0,
2250 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2251 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2252 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2253 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2254 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2255 .resetvalue = 0,
2256 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2257 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2258 .access = PL1_RW, .accessfn = access_tpm,
2259 .type = ARM_CP_ALIAS | ARM_CP_IO,
2260 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2261 .resetvalue = 0,
2262 .writefn = pmintenset_write, .raw_writefn = raw_write },
2263 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2264 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2265 .access = PL1_RW, .accessfn = access_tpm,
2266 .type = ARM_CP_IO,
2267 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2268 .writefn = pmintenset_write, .raw_writefn = raw_write,
2269 .resetvalue = 0x0 },
2270 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2271 .access = PL1_RW, .accessfn = access_tpm,
2272 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2273 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2274 .writefn = pmintenclr_write, },
2275 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2276 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2277 .access = PL1_RW, .accessfn = access_tpm,
2278 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2279 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2280 .writefn = pmintenclr_write },
2281 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2282 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2283 .access = PL1_R,
2284 .accessfn = access_aa64_tid2,
2285 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2286 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2287 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2288 .access = PL1_RW,
2289 .accessfn = access_aa64_tid2,
2290 .writefn = csselr_write, .resetvalue = 0,
2291 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2292 offsetof(CPUARMState, cp15.csselr_ns) } },
2293 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2294 * just RAZ for all cores:
2295 */
2296 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2297 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2298 .access = PL1_R, .type = ARM_CP_CONST,
2299 .accessfn = access_aa64_tid1,
2300 .resetvalue = 0 },
2301 /* Auxiliary fault status registers: these also are IMPDEF, and we
2302 * choose to RAZ/WI for all cores.
2303 */
2304 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2305 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2306 .access = PL1_RW, .accessfn = access_tvm_trvm,
2307 .type = ARM_CP_CONST, .resetvalue = 0 },
2308 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2309 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2310 .access = PL1_RW, .accessfn = access_tvm_trvm,
2311 .type = ARM_CP_CONST, .resetvalue = 0 },
2312 /* MAIR can just read-as-written because we don't implement caches
2313 * and so don't need to care about memory attributes.
2314 */
2315 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2316 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2317 .access = PL1_RW, .accessfn = access_tvm_trvm,
2318 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2319 .resetvalue = 0 },
2320 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2321 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2322 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2323 .resetvalue = 0 },
2324 /* For non-long-descriptor page tables these are PRRR and NMRR;
2325 * regardless they still act as reads-as-written for QEMU.
2326 */
2327 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2328 * allows them to assign the correct fieldoffset based on the endianness
2329 * handled in the field definitions.
2330 */
2331 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2332 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2333 .access = PL1_RW, .accessfn = access_tvm_trvm,
2334 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2335 offsetof(CPUARMState, cp15.mair0_ns) },
2336 .resetfn = arm_cp_reset_ignore },
2337 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2338 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2339 .access = PL1_RW, .accessfn = access_tvm_trvm,
2340 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2341 offsetof(CPUARMState, cp15.mair1_ns) },
2342 .resetfn = arm_cp_reset_ignore },
2343 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2344 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2345 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2346 /* 32 bit ITLB invalidates */
2347 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2348 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2349 .writefn = tlbiall_write },
2350 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2351 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2352 .writefn = tlbimva_write },
2353 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2354 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2355 .writefn = tlbiasid_write },
2356 /* 32 bit DTLB invalidates */
2357 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2358 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2359 .writefn = tlbiall_write },
2360 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2361 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2362 .writefn = tlbimva_write },
2363 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2364 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2365 .writefn = tlbiasid_write },
2366 /* 32 bit TLB invalidates */
2367 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2368 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2369 .writefn = tlbiall_write },
2370 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2371 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2372 .writefn = tlbimva_write },
2373 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2374 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2375 .writefn = tlbiasid_write },
2376 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2377 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2378 .writefn = tlbimvaa_write },
2379 REGINFO_SENTINEL
2380 };
2381
2382 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2383 /* 32 bit TLB invalidates, Inner Shareable */
2384 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2385 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2386 .writefn = tlbiall_is_write },
2387 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2388 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2389 .writefn = tlbimva_is_write },
2390 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2391 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2392 .writefn = tlbiasid_is_write },
2393 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2394 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2395 .writefn = tlbimvaa_is_write },
2396 REGINFO_SENTINEL
2397 };
2398
2399 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2400 /* PMOVSSET is not implemented in v7 before v7ve */
2401 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2402 .access = PL0_RW, .accessfn = pmreg_access,
2403 .type = ARM_CP_ALIAS | ARM_CP_IO,
2404 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2405 .writefn = pmovsset_write,
2406 .raw_writefn = raw_write },
2407 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2408 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2409 .access = PL0_RW, .accessfn = pmreg_access,
2410 .type = ARM_CP_ALIAS | ARM_CP_IO,
2411 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2412 .writefn = pmovsset_write,
2413 .raw_writefn = raw_write },
2414 REGINFO_SENTINEL
2415 };
2416
2417 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2418 uint64_t value)
2419 {
2420 value &= 1;
2421 env->teecr = value;
2422 }
2423
2424 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2425 bool isread)
2426 {
2427 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2428 return CP_ACCESS_TRAP;
2429 }
2430 return CP_ACCESS_OK;
2431 }
2432
2433 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2434 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2435 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2436 .resetvalue = 0,
2437 .writefn = teecr_write },
2438 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2439 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2440 .accessfn = teehbr_access, .resetvalue = 0 },
2441 REGINFO_SENTINEL
2442 };
2443
2444 static const ARMCPRegInfo v6k_cp_reginfo[] = {
2445 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2446 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2447 .access = PL0_RW,
2448 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2449 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2450 .access = PL0_RW,
2451 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2452 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2453 .resetfn = arm_cp_reset_ignore },
2454 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2455 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2456 .access = PL0_R|PL1_W,
2457 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2458 .resetvalue = 0},
2459 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2460 .access = PL0_R|PL1_W,
2461 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2462 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2463 .resetfn = arm_cp_reset_ignore },
2464 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2465 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2466 .access = PL1_RW,
2467 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2468 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2469 .access = PL1_RW,
2470 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2471 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2472 .resetvalue = 0 },
2473 REGINFO_SENTINEL
2474 };
2475
2476 #ifndef CONFIG_USER_ONLY
2477
2478 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2479 bool isread)
2480 {
2481 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2482 * Writable only at the highest implemented exception level.
2483 */
2484 int el = arm_current_el(env);
2485 uint64_t hcr;
2486 uint32_t cntkctl;
2487
2488 switch (el) {
2489 case 0:
2490 hcr = arm_hcr_el2_eff(env);
2491 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2492 cntkctl = env->cp15.cnthctl_el2;
2493 } else {
2494 cntkctl = env->cp15.c14_cntkctl;
2495 }
2496 if (!extract32(cntkctl, 0, 2)) {
2497 return CP_ACCESS_TRAP;
2498 }
2499 break;
2500 case 1:
2501 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2502 arm_is_secure_below_el3(env)) {
2503 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2504 return CP_ACCESS_TRAP_UNCATEGORIZED;
2505 }
2506 break;
2507 case 2:
2508 case 3:
2509 break;
2510 }
2511
2512 if (!isread && el < arm_highest_el(env)) {
2513 return CP_ACCESS_TRAP_UNCATEGORIZED;
2514 }
2515
2516 return CP_ACCESS_OK;
2517 }
2518
2519 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2520 bool isread)
2521 {
2522 unsigned int cur_el = arm_current_el(env);
2523 bool secure = arm_is_secure(env);
2524 uint64_t hcr = arm_hcr_el2_eff(env);
2525
2526 switch (cur_el) {
2527 case 0:
2528 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2529 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2530 return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2531 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2532 }
2533
2534 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2535 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2536 return CP_ACCESS_TRAP;
2537 }
2538
2539 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2540 if (hcr & HCR_E2H) {
2541 if (timeridx == GTIMER_PHYS &&
2542 !extract32(env->cp15.cnthctl_el2, 10, 1)) {
2543 return CP_ACCESS_TRAP_EL2;
2544 }
2545 } else {
2546 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2547 if (arm_feature(env, ARM_FEATURE_EL2) &&
2548 timeridx == GTIMER_PHYS && !secure &&
2549 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2550 return CP_ACCESS_TRAP_EL2;
2551 }
2552 }
2553 break;
2554
2555 case 1:
2556 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2557 if (arm_feature(env, ARM_FEATURE_EL2) &&
2558 timeridx == GTIMER_PHYS && !secure &&
2559 (hcr & HCR_E2H
2560 ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2561 : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2562 return CP_ACCESS_TRAP_EL2;
2563 }
2564 break;
2565 }
2566 return CP_ACCESS_OK;
2567 }
2568
2569 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2570 bool isread)
2571 {
2572 unsigned int cur_el = arm_current_el(env);
2573 bool secure = arm_is_secure(env);
2574 uint64_t hcr = arm_hcr_el2_eff(env);
2575
2576 switch (cur_el) {
2577 case 0:
2578 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2579 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2580 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2581 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2582 }
2583
2584 /*
2585 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2586 * EL0 if EL0[PV]TEN is zero.
2587 */
2588 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2589 return CP_ACCESS_TRAP;
2590 }
2591 /* fall through */
2592
2593 case 1:
2594 if (arm_feature(env, ARM_FEATURE_EL2) &&
2595 timeridx == GTIMER_PHYS && !secure) {
2596 if (hcr & HCR_E2H) {
2597 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2598 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2599 return CP_ACCESS_TRAP_EL2;
2600 }
2601 } else {
2602 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2603 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2604 return CP_ACCESS_TRAP_EL2;
2605 }
2606 }
2607 }
2608 break;
2609 }
2610 return CP_ACCESS_OK;
2611 }
2612
2613 static CPAccessResult gt_pct_access(CPUARMState *env,
2614 const ARMCPRegInfo *ri,
2615 bool isread)
2616 {
2617 return gt_counter_access(env, GTIMER_PHYS, isread);
2618 }
2619
2620 static CPAccessResult gt_vct_access(CPUARMState *env,
2621 const ARMCPRegInfo *ri,
2622 bool isread)
2623 {
2624 return gt_counter_access(env, GTIMER_VIRT, isread);
2625 }
2626
2627 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2628 bool isread)
2629 {
2630 return gt_timer_access(env, GTIMER_PHYS, isread);
2631 }
2632
2633 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2634 bool isread)
2635 {
2636 return gt_timer_access(env, GTIMER_VIRT, isread);
2637 }
2638
2639 static CPAccessResult gt_stimer_access(CPUARMState *env,
2640 const ARMCPRegInfo *ri,
2641 bool isread)
2642 {
2643 /* The AArch64 register view of the secure physical timer is
2644 * always accessible from EL3, and configurably accessible from
2645 * Secure EL1.
2646 */
2647 switch (arm_current_el(env)) {
2648 case 1:
2649 if (!arm_is_secure(env)) {
2650 return CP_ACCESS_TRAP;
2651 }
2652 if (!(env->cp15.scr_el3 & SCR_ST)) {
2653 return CP_ACCESS_TRAP_EL3;
2654 }
2655 return CP_ACCESS_OK;
2656 case 0:
2657 case 2:
2658 return CP_ACCESS_TRAP;
2659 case 3:
2660 return CP_ACCESS_OK;
2661 default:
2662 g_assert_not_reached();
2663 }
2664 }
2665
2666 static uint64_t gt_get_countervalue(CPUARMState *env)
2667 {
2668 ARMCPU *cpu = env_archcpu(env);
2669
2670 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
2671 }
2672
2673 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2674 {
2675 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2676
2677 if (gt->ctl & 1) {
2678 /* Timer enabled: calculate and set current ISTATUS, irq, and
2679 * reset timer to when ISTATUS next has to change
2680 */
2681 uint64_t offset = timeridx == GTIMER_VIRT ?
2682 cpu->env.cp15.cntvoff_el2 : 0;
2683 uint64_t count = gt_get_countervalue(&cpu->env);
2684 /* Note that this must be unsigned 64 bit arithmetic: */
2685 int istatus = count - offset >= gt->cval;
2686 uint64_t nexttick;
2687 int irqstate;
2688
2689 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2690
2691 irqstate = (istatus && !(gt->ctl & 2));
2692 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2693
2694 if (istatus) {
2695 /* Next transition is when count rolls back over to zero */
2696 nexttick = UINT64_MAX;
2697 } else {
2698 /* Next transition is when we hit cval */
2699 nexttick = gt->cval + offset;
2700 }
2701 /* Note that the desired next expiry time might be beyond the
2702 * signed-64-bit range of a QEMUTimer -- in this case we just
2703 * set the timer for as far in the future as possible. When the
2704 * timer expires we will reset the timer for any remaining period.
2705 */
2706 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
2707 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2708 } else {
2709 timer_mod(cpu->gt_timer[timeridx], nexttick);
2710 }
2711 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2712 } else {
2713 /* Timer disabled: ISTATUS and timer output always clear */
2714 gt->ctl &= ~4;
2715 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2716 timer_del(cpu->gt_timer[timeridx]);
2717 trace_arm_gt_recalc_disabled(timeridx);
2718 }
2719 }
2720
2721 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2722 int timeridx)
2723 {
2724 ARMCPU *cpu = env_archcpu(env);
2725
2726 timer_del(cpu->gt_timer[timeridx]);
2727 }
2728
2729 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2730 {
2731 return gt_get_countervalue(env);
2732 }
2733
2734 static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2735 {
2736 uint64_t hcr;
2737
2738 switch (arm_current_el(env)) {
2739 case 2:
2740 hcr = arm_hcr_el2_eff(env);
2741 if (hcr & HCR_E2H) {
2742 return 0;
2743 }
2744 break;
2745 case 0:
2746 hcr = arm_hcr_el2_eff(env);
2747 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2748 return 0;
2749 }
2750 break;
2751 }
2752
2753 return env->cp15.cntvoff_el2;
2754 }
2755
2756 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2757 {
2758 return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
2759 }
2760
2761 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2762 int timeridx,
2763 uint64_t value)
2764 {
2765 trace_arm_gt_cval_write(timeridx, value);
2766 env->cp15.c14_timer[timeridx].cval = value;
2767 gt_recalc_timer(env_archcpu(env), timeridx);
2768 }
2769
2770 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2771 int timeridx)
2772 {
2773 uint64_t offset = 0;
2774
2775 switch (timeridx) {
2776 case GTIMER_VIRT:
2777 case GTIMER_HYPVIRT:
2778 offset = gt_virt_cnt_offset(env);
2779 break;
2780 }
2781
2782 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2783 (gt_get_countervalue(env) - offset));
2784 }
2785
2786 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2787 int timeridx,
2788 uint64_t value)
2789 {
2790 uint64_t offset = 0;
2791
2792 switch (timeridx) {
2793 case GTIMER_VIRT:
2794 case GTIMER_HYPVIRT:
2795 offset = gt_virt_cnt_offset(env);
2796 break;
2797 }
2798
2799 trace_arm_gt_tval_write(timeridx, value);
2800 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2801 sextract64(value, 0, 32);
2802 gt_recalc_timer(env_archcpu(env), timeridx);
2803 }
2804
2805 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2806 int timeridx,
2807 uint64_t value)
2808 {
2809 ARMCPU *cpu = env_archcpu(env);
2810 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2811
2812 trace_arm_gt_ctl_write(timeridx, value);
2813 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2814 if ((oldval ^ value) & 1) {
2815 /* Enable toggled */
2816 gt_recalc_timer(cpu, timeridx);
2817 } else if ((oldval ^ value) & 2) {
2818 /* IMASK toggled: don't need to recalculate,
2819 * just set the interrupt line based on ISTATUS
2820 */
2821 int irqstate = (oldval & 4) && !(value & 2);
2822
2823 trace_arm_gt_imask_toggle(timeridx, irqstate);
2824 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2825 }
2826 }
2827
2828 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2829 {
2830 gt_timer_reset(env, ri, GTIMER_PHYS);
2831 }
2832
2833 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2834 uint64_t value)
2835 {
2836 gt_cval_write(env, ri, GTIMER_PHYS, value);
2837 }
2838
2839 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2840 {
2841 return gt_tval_read(env, ri, GTIMER_PHYS);
2842 }
2843
2844 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2845 uint64_t value)
2846 {
2847 gt_tval_write(env, ri, GTIMER_PHYS, value);
2848 }
2849
2850 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2851 uint64_t value)
2852 {
2853 gt_ctl_write(env, ri, GTIMER_PHYS, value);
2854 }
2855
2856 static int gt_phys_redir_timeridx(CPUARMState *env)
2857 {
2858 switch (arm_mmu_idx(env)) {
2859 case ARMMMUIdx_E20_0:
2860 case ARMMMUIdx_E20_2:
2861 case ARMMMUIdx_E20_2_PAN:
2862 return GTIMER_HYP;
2863 default:
2864 return GTIMER_PHYS;
2865 }
2866 }
2867
2868 static int gt_virt_redir_timeridx(CPUARMState *env)
2869 {
2870 switch (arm_mmu_idx(env)) {
2871 case ARMMMUIdx_E20_0:
2872 case ARMMMUIdx_E20_2:
2873 case ARMMMUIdx_E20_2_PAN:
2874 return GTIMER_HYPVIRT;
2875 default:
2876 return GTIMER_VIRT;
2877 }
2878 }
2879
2880 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2881 const ARMCPRegInfo *ri)
2882 {
2883 int timeridx = gt_phys_redir_timeridx(env);
2884 return env->cp15.c14_timer[timeridx].cval;
2885 }
2886
2887 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2888 uint64_t value)
2889 {
2890 int timeridx = gt_phys_redir_timeridx(env);
2891 gt_cval_write(env, ri, timeridx, value);
2892 }
2893
2894 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2895 const ARMCPRegInfo *ri)
2896 {
2897 int timeridx = gt_phys_redir_timeridx(env);
2898 return gt_tval_read(env, ri, timeridx);
2899 }
2900
2901 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2902 uint64_t value)
2903 {
2904 int timeridx = gt_phys_redir_timeridx(env);
2905 gt_tval_write(env, ri, timeridx, value);
2906 }
2907
2908 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2909 const ARMCPRegInfo *ri)
2910 {
2911 int timeridx = gt_phys_redir_timeridx(env);
2912 return env->cp15.c14_timer[timeridx].ctl;
2913 }
2914
2915 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2916 uint64_t value)
2917 {
2918 int timeridx = gt_phys_redir_timeridx(env);
2919 gt_ctl_write(env, ri, timeridx, value);
2920 }
2921
2922 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2923 {
2924 gt_timer_reset(env, ri, GTIMER_VIRT);
2925 }
2926
2927 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2928 uint64_t value)
2929 {
2930 gt_cval_write(env, ri, GTIMER_VIRT, value);
2931 }
2932
2933 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2934 {
2935 return gt_tval_read(env, ri, GTIMER_VIRT);
2936 }
2937
2938 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2939 uint64_t value)
2940 {
2941 gt_tval_write(env, ri, GTIMER_VIRT, value);
2942 }
2943
2944 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2945 uint64_t value)
2946 {
2947 gt_ctl_write(env, ri, GTIMER_VIRT, value);
2948 }
2949
2950 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2951 uint64_t value)
2952 {
2953 ARMCPU *cpu = env_archcpu(env);
2954
2955 trace_arm_gt_cntvoff_write(value);
2956 raw_write(env, ri, value);
2957 gt_recalc_timer(cpu, GTIMER_VIRT);
2958 }
2959
2960 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
2961 const ARMCPRegInfo *ri)
2962 {
2963 int timeridx = gt_virt_redir_timeridx(env);
2964 return env->cp15.c14_timer[timeridx].cval;
2965 }
2966
2967 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2968 uint64_t value)
2969 {
2970 int timeridx = gt_virt_redir_timeridx(env);
2971 gt_cval_write(env, ri, timeridx, value);
2972 }
2973
2974 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
2975 const ARMCPRegInfo *ri)
2976 {
2977 int timeridx = gt_virt_redir_timeridx(env);
2978 return gt_tval_read(env, ri, timeridx);
2979 }
2980
2981 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2982 uint64_t value)
2983 {
2984 int timeridx = gt_virt_redir_timeridx(env);
2985 gt_tval_write(env, ri, timeridx, value);
2986 }
2987
2988 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
2989 const ARMCPRegInfo *ri)
2990 {
2991 int timeridx = gt_virt_redir_timeridx(env);
2992 return env->cp15.c14_timer[timeridx].ctl;
2993 }
2994
2995 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2996 uint64_t value)
2997 {
2998 int timeridx = gt_virt_redir_timeridx(env);
2999 gt_ctl_write(env, ri, timeridx, value);
3000 }
3001
3002 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3003 {
3004 gt_timer_reset(env, ri, GTIMER_HYP);
3005 }
3006
3007 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3008 uint64_t value)
3009 {
3010 gt_cval_write(env, ri, GTIMER_HYP, value);
3011 }
3012
3013 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3014 {
3015 return gt_tval_read(env, ri, GTIMER_HYP);
3016 }
3017
3018 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3019 uint64_t value)
3020 {
3021 gt_tval_write(env, ri, GTIMER_HYP, value);
3022 }
3023
3024 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3025 uint64_t value)
3026 {
3027 gt_ctl_write(env, ri, GTIMER_HYP, value);
3028 }
3029
3030 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3031 {
3032 gt_timer_reset(env, ri, GTIMER_SEC);
3033 }
3034
3035 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3036 uint64_t value)
3037 {
3038 gt_cval_write(env, ri, GTIMER_SEC, value);
3039 }
3040
3041 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3042 {
3043 return gt_tval_read(env, ri, GTIMER_SEC);
3044 }
3045
3046 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3047 uint64_t value)
3048 {
3049 gt_tval_write(env, ri, GTIMER_SEC, value);
3050 }
3051
3052 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3053 uint64_t value)
3054 {
3055 gt_ctl_write(env, ri, GTIMER_SEC, value);
3056 }
3057
3058 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3059 {
3060 gt_timer_reset(env, ri, GTIMER_HYPVIRT);
3061 }
3062
3063 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3064 uint64_t value)
3065 {
3066 gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
3067 }
3068
3069 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3070 {
3071 return gt_tval_read(env, ri, GTIMER_HYPVIRT);
3072 }
3073
3074 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3075 uint64_t value)
3076 {
3077 gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
3078 }
3079
3080 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3081 uint64_t value)
3082 {
3083 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
3084 }
3085
3086 void arm_gt_ptimer_cb(void *opaque)
3087 {
3088 ARMCPU *cpu = opaque;
3089
3090 gt_recalc_timer(cpu, GTIMER_PHYS);
3091 }
3092
3093 void arm_gt_vtimer_cb(void *opaque)
3094 {
3095 ARMCPU *cpu = opaque;
3096
3097 gt_recalc_timer(cpu, GTIMER_VIRT);
3098 }
3099
3100 void arm_gt_htimer_cb(void *opaque)
3101 {
3102 ARMCPU *cpu = opaque;
3103
3104 gt_recalc_timer(cpu, GTIMER_HYP);
3105 }
3106
3107 void arm_gt_stimer_cb(void *opaque)
3108 {
3109 ARMCPU *cpu = opaque;
3110
3111 gt_recalc_timer(cpu, GTIMER_SEC);
3112 }
3113
3114 void arm_gt_hvtimer_cb(void *opaque)
3115 {
3116 ARMCPU *cpu = opaque;
3117
3118 gt_recalc_timer(cpu, GTIMER_HYPVIRT);
3119 }
3120
3121 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
3122 {
3123 ARMCPU *cpu = env_archcpu(env);
3124
3125 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
3126 }
3127
3128 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3129 /* Note that CNTFRQ is purely reads-as-written for the benefit
3130 * of software; writing it doesn't actually change the timer frequency.
3131 * Our reset value matches the fixed frequency we implement the timer at.
3132 */
3133 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
3134 .type = ARM_CP_ALIAS,
3135 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3136 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
3137 },
3138 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3139 .opc0 = 3, .opc1 = 3