gdbstub: extend GByteArray to read register helpers
[qemu.git] / target / arm / helper.c
1 /*
2 * ARM generic helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
12 #include "trace.h"
13 #include "cpu.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
24 #include "hw/irq.h"
25 #include "hw/semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/tcg.h"
29 #include "qemu/range.h"
30 #include "qapi/qapi-commands-machine-target.h"
31 #include "qapi/error.h"
32 #include "qemu/guest-random.h"
33 #ifdef CONFIG_TCG
34 #include "arm_ldst.h"
35 #include "exec/cpu_ldst.h"
36 #endif
37
38 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
39
40 #ifndef CONFIG_USER_ONLY
41
42 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
43 MMUAccessType access_type, ARMMMUIdx mmu_idx,
44 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
45 target_ulong *page_size_ptr,
46 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
47 #endif
48
49 static void switch_mode(CPUARMState *env, int mode);
50
51 static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
52 {
53 ARMCPU *cpu = env_archcpu(env);
54 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
55
56 /* VFP data registers are always little-endian. */
57 if (reg < nregs) {
58 return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg));
59 }
60 if (arm_feature(env, ARM_FEATURE_NEON)) {
61 /* Aliases for Q regs. */
62 nregs += 16;
63 if (reg < nregs) {
64 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
65 return gdb_get_reg128(buf, q[0], q[1]);
66 }
67 }
68 switch (reg - nregs) {
69 case 0: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); break;
70 case 1: return gdb_get_reg32(buf, vfp_get_fpscr(env)); break;
71 case 2: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]); break;
72 }
73 return 0;
74 }
75
76 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
77 {
78 ARMCPU *cpu = env_archcpu(env);
79 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
80
81 if (reg < nregs) {
82 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
83 return 8;
84 }
85 if (arm_feature(env, ARM_FEATURE_NEON)) {
86 nregs += 16;
87 if (reg < nregs) {
88 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
89 q[0] = ldq_le_p(buf);
90 q[1] = ldq_le_p(buf + 8);
91 return 16;
92 }
93 }
94 switch (reg - nregs) {
95 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
96 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
97 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
98 }
99 return 0;
100 }
101
102 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
103 {
104 switch (reg) {
105 case 0 ... 31:
106 {
107 /* 128 bit FP register - quads are in LE order */
108 uint64_t *q = aa64_vfp_qreg(env, reg);
109 return gdb_get_reg128(buf, q[1], q[0]);
110 }
111 case 32:
112 /* FPSR */
113 return gdb_get_reg32(buf, vfp_get_fpsr(env));
114 case 33:
115 /* FPCR */
116 return gdb_get_reg32(buf,vfp_get_fpcr(env));
117 default:
118 return 0;
119 }
120 }
121
122 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
123 {
124 switch (reg) {
125 case 0 ... 31:
126 /* 128 bit FP register */
127 {
128 uint64_t *q = aa64_vfp_qreg(env, reg);
129 q[0] = ldq_le_p(buf);
130 q[1] = ldq_le_p(buf + 8);
131 return 16;
132 }
133 case 32:
134 /* FPSR */
135 vfp_set_fpsr(env, ldl_p(buf));
136 return 4;
137 case 33:
138 /* FPCR */
139 vfp_set_fpcr(env, ldl_p(buf));
140 return 4;
141 default:
142 return 0;
143 }
144 }
145
146 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
147 {
148 assert(ri->fieldoffset);
149 if (cpreg_field_is_64bit(ri)) {
150 return CPREG_FIELD64(env, ri);
151 } else {
152 return CPREG_FIELD32(env, ri);
153 }
154 }
155
156 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
157 uint64_t value)
158 {
159 assert(ri->fieldoffset);
160 if (cpreg_field_is_64bit(ri)) {
161 CPREG_FIELD64(env, ri) = value;
162 } else {
163 CPREG_FIELD32(env, ri) = value;
164 }
165 }
166
167 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
168 {
169 return (char *)env + ri->fieldoffset;
170 }
171
172 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
173 {
174 /* Raw read of a coprocessor register (as needed for migration, etc). */
175 if (ri->type & ARM_CP_CONST) {
176 return ri->resetvalue;
177 } else if (ri->raw_readfn) {
178 return ri->raw_readfn(env, ri);
179 } else if (ri->readfn) {
180 return ri->readfn(env, ri);
181 } else {
182 return raw_read(env, ri);
183 }
184 }
185
186 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
187 uint64_t v)
188 {
189 /* Raw write of a coprocessor register (as needed for migration, etc).
190 * Note that constant registers are treated as write-ignored; the
191 * caller should check for success by whether a readback gives the
192 * value written.
193 */
194 if (ri->type & ARM_CP_CONST) {
195 return;
196 } else if (ri->raw_writefn) {
197 ri->raw_writefn(env, ri, v);
198 } else if (ri->writefn) {
199 ri->writefn(env, ri, v);
200 } else {
201 raw_write(env, ri, v);
202 }
203 }
204
205 static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
206 {
207 ARMCPU *cpu = env_archcpu(env);
208 const ARMCPRegInfo *ri;
209 uint32_t key;
210
211 key = cpu->dyn_xml.cpregs_keys[reg];
212 ri = get_arm_cp_reginfo(cpu->cp_regs, key);
213 if (ri) {
214 if (cpreg_field_is_64bit(ri)) {
215 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
216 } else {
217 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
218 }
219 }
220 return 0;
221 }
222
223 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
224 {
225 return 0;
226 }
227
228 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
229 {
230 /* Return true if the regdef would cause an assertion if you called
231 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
232 * program bug for it not to have the NO_RAW flag).
233 * NB that returning false here doesn't necessarily mean that calling
234 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
235 * read/write access functions which are safe for raw use" from "has
236 * read/write access functions which have side effects but has forgotten
237 * to provide raw access functions".
238 * The tests here line up with the conditions in read/write_raw_cp_reg()
239 * and assertions in raw_read()/raw_write().
240 */
241 if ((ri->type & ARM_CP_CONST) ||
242 ri->fieldoffset ||
243 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
244 return false;
245 }
246 return true;
247 }
248
249 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
250 {
251 /* Write the coprocessor state from cpu->env to the (index,value) list. */
252 int i;
253 bool ok = true;
254
255 for (i = 0; i < cpu->cpreg_array_len; i++) {
256 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
257 const ARMCPRegInfo *ri;
258 uint64_t newval;
259
260 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
261 if (!ri) {
262 ok = false;
263 continue;
264 }
265 if (ri->type & ARM_CP_NO_RAW) {
266 continue;
267 }
268
269 newval = read_raw_cp_reg(&cpu->env, ri);
270 if (kvm_sync) {
271 /*
272 * Only sync if the previous list->cpustate sync succeeded.
273 * Rather than tracking the success/failure state for every
274 * item in the list, we just recheck "does the raw write we must
275 * have made in write_list_to_cpustate() read back OK" here.
276 */
277 uint64_t oldval = cpu->cpreg_values[i];
278
279 if (oldval == newval) {
280 continue;
281 }
282
283 write_raw_cp_reg(&cpu->env, ri, oldval);
284 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
285 continue;
286 }
287
288 write_raw_cp_reg(&cpu->env, ri, newval);
289 }
290 cpu->cpreg_values[i] = newval;
291 }
292 return ok;
293 }
294
295 bool write_list_to_cpustate(ARMCPU *cpu)
296 {
297 int i;
298 bool ok = true;
299
300 for (i = 0; i < cpu->cpreg_array_len; i++) {
301 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
302 uint64_t v = cpu->cpreg_values[i];
303 const ARMCPRegInfo *ri;
304
305 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
306 if (!ri) {
307 ok = false;
308 continue;
309 }
310 if (ri->type & ARM_CP_NO_RAW) {
311 continue;
312 }
313 /* Write value and confirm it reads back as written
314 * (to catch read-only registers and partially read-only
315 * registers where the incoming migration value doesn't match)
316 */
317 write_raw_cp_reg(&cpu->env, ri, v);
318 if (read_raw_cp_reg(&cpu->env, ri) != v) {
319 ok = false;
320 }
321 }
322 return ok;
323 }
324
325 static void add_cpreg_to_list(gpointer key, gpointer opaque)
326 {
327 ARMCPU *cpu = opaque;
328 uint64_t regidx;
329 const ARMCPRegInfo *ri;
330
331 regidx = *(uint32_t *)key;
332 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
333
334 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
335 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
336 /* The value array need not be initialized at this point */
337 cpu->cpreg_array_len++;
338 }
339 }
340
341 static void count_cpreg(gpointer key, gpointer opaque)
342 {
343 ARMCPU *cpu = opaque;
344 uint64_t regidx;
345 const ARMCPRegInfo *ri;
346
347 regidx = *(uint32_t *)key;
348 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
349
350 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
351 cpu->cpreg_array_len++;
352 }
353 }
354
355 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
356 {
357 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
358 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
359
360 if (aidx > bidx) {
361 return 1;
362 }
363 if (aidx < bidx) {
364 return -1;
365 }
366 return 0;
367 }
368
369 void init_cpreg_list(ARMCPU *cpu)
370 {
371 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
372 * Note that we require cpreg_tuples[] to be sorted by key ID.
373 */
374 GList *keys;
375 int arraylen;
376
377 keys = g_hash_table_get_keys(cpu->cp_regs);
378 keys = g_list_sort(keys, cpreg_key_compare);
379
380 cpu->cpreg_array_len = 0;
381
382 g_list_foreach(keys, count_cpreg, cpu);
383
384 arraylen = cpu->cpreg_array_len;
385 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
386 cpu->cpreg_values = g_new(uint64_t, arraylen);
387 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
388 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
389 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
390 cpu->cpreg_array_len = 0;
391
392 g_list_foreach(keys, add_cpreg_to_list, cpu);
393
394 assert(cpu->cpreg_array_len == arraylen);
395
396 g_list_free(keys);
397 }
398
399 /*
400 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
401 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
402 *
403 * access_el3_aa32ns: Used to check AArch32 register views.
404 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
405 */
406 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
407 const ARMCPRegInfo *ri,
408 bool isread)
409 {
410 bool secure = arm_is_secure_below_el3(env);
411
412 assert(!arm_el_is_aa64(env, 3));
413 if (secure) {
414 return CP_ACCESS_TRAP_UNCATEGORIZED;
415 }
416 return CP_ACCESS_OK;
417 }
418
419 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
420 const ARMCPRegInfo *ri,
421 bool isread)
422 {
423 if (!arm_el_is_aa64(env, 3)) {
424 return access_el3_aa32ns(env, ri, isread);
425 }
426 return CP_ACCESS_OK;
427 }
428
429 /* Some secure-only AArch32 registers trap to EL3 if used from
430 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
431 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
432 * We assume that the .access field is set to PL1_RW.
433 */
434 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
435 const ARMCPRegInfo *ri,
436 bool isread)
437 {
438 if (arm_current_el(env) == 3) {
439 return CP_ACCESS_OK;
440 }
441 if (arm_is_secure_below_el3(env)) {
442 return CP_ACCESS_TRAP_EL3;
443 }
444 /* This will be EL1 NS and EL2 NS, which just UNDEF */
445 return CP_ACCESS_TRAP_UNCATEGORIZED;
446 }
447
448 /* Check for traps to "powerdown debug" registers, which are controlled
449 * by MDCR.TDOSA
450 */
451 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
452 bool isread)
453 {
454 int el = arm_current_el(env);
455 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
456 (env->cp15.mdcr_el2 & MDCR_TDE) ||
457 (arm_hcr_el2_eff(env) & HCR_TGE);
458
459 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
460 return CP_ACCESS_TRAP_EL2;
461 }
462 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
463 return CP_ACCESS_TRAP_EL3;
464 }
465 return CP_ACCESS_OK;
466 }
467
468 /* Check for traps to "debug ROM" registers, which are controlled
469 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
470 */
471 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
472 bool isread)
473 {
474 int el = arm_current_el(env);
475 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
476 (env->cp15.mdcr_el2 & MDCR_TDE) ||
477 (arm_hcr_el2_eff(env) & HCR_TGE);
478
479 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
480 return CP_ACCESS_TRAP_EL2;
481 }
482 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
483 return CP_ACCESS_TRAP_EL3;
484 }
485 return CP_ACCESS_OK;
486 }
487
488 /* Check for traps to general debug registers, which are controlled
489 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
490 */
491 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
492 bool isread)
493 {
494 int el = arm_current_el(env);
495 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
496 (env->cp15.mdcr_el2 & MDCR_TDE) ||
497 (arm_hcr_el2_eff(env) & HCR_TGE);
498
499 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
500 return CP_ACCESS_TRAP_EL2;
501 }
502 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
503 return CP_ACCESS_TRAP_EL3;
504 }
505 return CP_ACCESS_OK;
506 }
507
508 /* Check for traps to performance monitor registers, which are controlled
509 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
510 */
511 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
512 bool isread)
513 {
514 int el = arm_current_el(env);
515
516 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
517 && !arm_is_secure_below_el3(env)) {
518 return CP_ACCESS_TRAP_EL2;
519 }
520 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
521 return CP_ACCESS_TRAP_EL3;
522 }
523 return CP_ACCESS_OK;
524 }
525
526 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
527 static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
528 bool isread)
529 {
530 if (arm_current_el(env) == 1) {
531 uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
532 if (arm_hcr_el2_eff(env) & trap) {
533 return CP_ACCESS_TRAP_EL2;
534 }
535 }
536 return CP_ACCESS_OK;
537 }
538
539 /* Check for traps from EL1 due to HCR_EL2.TSW. */
540 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
541 bool isread)
542 {
543 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
544 return CP_ACCESS_TRAP_EL2;
545 }
546 return CP_ACCESS_OK;
547 }
548
549 /* Check for traps from EL1 due to HCR_EL2.TACR. */
550 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
551 bool isread)
552 {
553 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
554 return CP_ACCESS_TRAP_EL2;
555 }
556 return CP_ACCESS_OK;
557 }
558
559 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
560 static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
561 bool isread)
562 {
563 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
564 return CP_ACCESS_TRAP_EL2;
565 }
566 return CP_ACCESS_OK;
567 }
568
569 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
570 {
571 ARMCPU *cpu = env_archcpu(env);
572
573 raw_write(env, ri, value);
574 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
575 }
576
577 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
578 {
579 ARMCPU *cpu = env_archcpu(env);
580
581 if (raw_read(env, ri) != value) {
582 /* Unlike real hardware the qemu TLB uses virtual addresses,
583 * not modified virtual addresses, so this causes a TLB flush.
584 */
585 tlb_flush(CPU(cpu));
586 raw_write(env, ri, value);
587 }
588 }
589
590 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
591 uint64_t value)
592 {
593 ARMCPU *cpu = env_archcpu(env);
594
595 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
596 && !extended_addresses_enabled(env)) {
597 /* For VMSA (when not using the LPAE long descriptor page table
598 * format) this register includes the ASID, so do a TLB flush.
599 * For PMSA it is purely a process ID and no action is needed.
600 */
601 tlb_flush(CPU(cpu));
602 }
603 raw_write(env, ri, value);
604 }
605
606 /* IS variants of TLB operations must affect all cores */
607 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
608 uint64_t value)
609 {
610 CPUState *cs = env_cpu(env);
611
612 tlb_flush_all_cpus_synced(cs);
613 }
614
615 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
616 uint64_t value)
617 {
618 CPUState *cs = env_cpu(env);
619
620 tlb_flush_all_cpus_synced(cs);
621 }
622
623 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
624 uint64_t value)
625 {
626 CPUState *cs = env_cpu(env);
627
628 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
629 }
630
631 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
632 uint64_t value)
633 {
634 CPUState *cs = env_cpu(env);
635
636 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
637 }
638
639 /*
640 * Non-IS variants of TLB operations are upgraded to
641 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
642 * force broadcast of these operations.
643 */
644 static bool tlb_force_broadcast(CPUARMState *env)
645 {
646 return (env->cp15.hcr_el2 & HCR_FB) &&
647 arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
648 }
649
650 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
651 uint64_t value)
652 {
653 /* Invalidate all (TLBIALL) */
654 CPUState *cs = env_cpu(env);
655
656 if (tlb_force_broadcast(env)) {
657 tlb_flush_all_cpus_synced(cs);
658 } else {
659 tlb_flush(cs);
660 }
661 }
662
663 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
664 uint64_t value)
665 {
666 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
667 CPUState *cs = env_cpu(env);
668
669 value &= TARGET_PAGE_MASK;
670 if (tlb_force_broadcast(env)) {
671 tlb_flush_page_all_cpus_synced(cs, value);
672 } else {
673 tlb_flush_page(cs, value);
674 }
675 }
676
677 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
678 uint64_t value)
679 {
680 /* Invalidate by ASID (TLBIASID) */
681 CPUState *cs = env_cpu(env);
682
683 if (tlb_force_broadcast(env)) {
684 tlb_flush_all_cpus_synced(cs);
685 } else {
686 tlb_flush(cs);
687 }
688 }
689
690 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
691 uint64_t value)
692 {
693 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
694 CPUState *cs = env_cpu(env);
695
696 value &= TARGET_PAGE_MASK;
697 if (tlb_force_broadcast(env)) {
698 tlb_flush_page_all_cpus_synced(cs, value);
699 } else {
700 tlb_flush_page(cs, value);
701 }
702 }
703
704 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
705 uint64_t value)
706 {
707 CPUState *cs = env_cpu(env);
708
709 tlb_flush_by_mmuidx(cs,
710 ARMMMUIdxBit_E10_1 |
711 ARMMMUIdxBit_E10_1_PAN |
712 ARMMMUIdxBit_E10_0 |
713 ARMMMUIdxBit_Stage2);
714 }
715
716 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
717 uint64_t value)
718 {
719 CPUState *cs = env_cpu(env);
720
721 tlb_flush_by_mmuidx_all_cpus_synced(cs,
722 ARMMMUIdxBit_E10_1 |
723 ARMMMUIdxBit_E10_1_PAN |
724 ARMMMUIdxBit_E10_0 |
725 ARMMMUIdxBit_Stage2);
726 }
727
728 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
729 uint64_t value)
730 {
731 /* Invalidate by IPA. This has to invalidate any structures that
732 * contain only stage 2 translation information, but does not need
733 * to apply to structures that contain combined stage 1 and stage 2
734 * translation information.
735 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
736 */
737 CPUState *cs = env_cpu(env);
738 uint64_t pageaddr;
739
740 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
741 return;
742 }
743
744 pageaddr = sextract64(value << 12, 0, 40);
745
746 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
747 }
748
749 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
750 uint64_t value)
751 {
752 CPUState *cs = env_cpu(env);
753 uint64_t pageaddr;
754
755 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
756 return;
757 }
758
759 pageaddr = sextract64(value << 12, 0, 40);
760
761 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
762 ARMMMUIdxBit_Stage2);
763 }
764
765 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
766 uint64_t value)
767 {
768 CPUState *cs = env_cpu(env);
769
770 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
771 }
772
773 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
774 uint64_t value)
775 {
776 CPUState *cs = env_cpu(env);
777
778 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
779 }
780
781 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
782 uint64_t value)
783 {
784 CPUState *cs = env_cpu(env);
785 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
786
787 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
788 }
789
790 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
791 uint64_t value)
792 {
793 CPUState *cs = env_cpu(env);
794 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
795
796 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
797 ARMMMUIdxBit_E2);
798 }
799
800 static const ARMCPRegInfo cp_reginfo[] = {
801 /* Define the secure and non-secure FCSE identifier CP registers
802 * separately because there is no secure bank in V8 (no _EL3). This allows
803 * the secure register to be properly reset and migrated. There is also no
804 * v8 EL1 version of the register so the non-secure instance stands alone.
805 */
806 { .name = "FCSEIDR",
807 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
808 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
809 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
810 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
811 { .name = "FCSEIDR_S",
812 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
813 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
814 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
815 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
816 /* Define the secure and non-secure context identifier CP registers
817 * separately because there is no secure bank in V8 (no _EL3). This allows
818 * the secure register to be properly reset and migrated. In the
819 * non-secure case, the 32-bit register will have reset and migration
820 * disabled during registration as it is handled by the 64-bit instance.
821 */
822 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
823 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
824 .access = PL1_RW, .accessfn = access_tvm_trvm,
825 .secure = ARM_CP_SECSTATE_NS,
826 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
827 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
828 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
829 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
830 .access = PL1_RW, .accessfn = access_tvm_trvm,
831 .secure = ARM_CP_SECSTATE_S,
832 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
833 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
834 REGINFO_SENTINEL
835 };
836
837 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
838 /* NB: Some of these registers exist in v8 but with more precise
839 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
840 */
841 /* MMU Domain access control / MPU write buffer control */
842 { .name = "DACR",
843 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
844 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
845 .writefn = dacr_write, .raw_writefn = raw_write,
846 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
847 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
848 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
849 * For v6 and v5, these mappings are overly broad.
850 */
851 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
852 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
853 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
854 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
855 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
856 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
857 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
858 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
859 /* Cache maintenance ops; some of this space may be overridden later. */
860 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
861 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
862 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
863 REGINFO_SENTINEL
864 };
865
866 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
867 /* Not all pre-v6 cores implemented this WFI, so this is slightly
868 * over-broad.
869 */
870 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
871 .access = PL1_W, .type = ARM_CP_WFI },
872 REGINFO_SENTINEL
873 };
874
875 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
876 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
877 * is UNPREDICTABLE; we choose to NOP as most implementations do).
878 */
879 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
880 .access = PL1_W, .type = ARM_CP_WFI },
881 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
882 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
883 * OMAPCP will override this space.
884 */
885 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
886 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
887 .resetvalue = 0 },
888 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
889 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
890 .resetvalue = 0 },
891 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
892 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
893 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
894 .resetvalue = 0 },
895 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
896 * implementing it as RAZ means the "debug architecture version" bits
897 * will read as a reserved value, which should cause Linux to not try
898 * to use the debug hardware.
899 */
900 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
901 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
902 /* MMU TLB control. Note that the wildcarding means we cover not just
903 * the unified TLB ops but also the dside/iside/inner-shareable variants.
904 */
905 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
906 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
907 .type = ARM_CP_NO_RAW },
908 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
909 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
910 .type = ARM_CP_NO_RAW },
911 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
912 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
913 .type = ARM_CP_NO_RAW },
914 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
915 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
916 .type = ARM_CP_NO_RAW },
917 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
918 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
919 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
920 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
921 REGINFO_SENTINEL
922 };
923
924 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
925 uint64_t value)
926 {
927 uint32_t mask = 0;
928
929 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
930 if (!arm_feature(env, ARM_FEATURE_V8)) {
931 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
932 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
933 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
934 */
935 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
936 /* VFP coprocessor: cp10 & cp11 [23:20] */
937 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
938
939 if (!arm_feature(env, ARM_FEATURE_NEON)) {
940 /* ASEDIS [31] bit is RAO/WI */
941 value |= (1 << 31);
942 }
943
944 /* VFPv3 and upwards with NEON implement 32 double precision
945 * registers (D0-D31).
946 */
947 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
948 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
949 value |= (1 << 30);
950 }
951 }
952 value &= mask;
953 }
954
955 /*
956 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
957 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
958 */
959 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
960 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
961 value &= ~(0xf << 20);
962 value |= env->cp15.cpacr_el1 & (0xf << 20);
963 }
964
965 env->cp15.cpacr_el1 = value;
966 }
967
968 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
969 {
970 /*
971 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
972 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
973 */
974 uint64_t value = env->cp15.cpacr_el1;
975
976 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
977 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
978 value &= ~(0xf << 20);
979 }
980 return value;
981 }
982
983
984 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
985 {
986 /* Call cpacr_write() so that we reset with the correct RAO bits set
987 * for our CPU features.
988 */
989 cpacr_write(env, ri, 0);
990 }
991
992 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
993 bool isread)
994 {
995 if (arm_feature(env, ARM_FEATURE_V8)) {
996 /* Check if CPACR accesses are to be trapped to EL2 */
997 if (arm_current_el(env) == 1 &&
998 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
999 return CP_ACCESS_TRAP_EL2;
1000 /* Check if CPACR accesses are to be trapped to EL3 */
1001 } else if (arm_current_el(env) < 3 &&
1002 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
1003 return CP_ACCESS_TRAP_EL3;
1004 }
1005 }
1006
1007 return CP_ACCESS_OK;
1008 }
1009
1010 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1011 bool isread)
1012 {
1013 /* Check if CPTR accesses are set to trap to EL3 */
1014 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
1015 return CP_ACCESS_TRAP_EL3;
1016 }
1017
1018 return CP_ACCESS_OK;
1019 }
1020
1021 static const ARMCPRegInfo v6_cp_reginfo[] = {
1022 /* prefetch by MVA in v6, NOP in v7 */
1023 { .name = "MVA_prefetch",
1024 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
1025 .access = PL1_W, .type = ARM_CP_NOP },
1026 /* We need to break the TB after ISB to execute self-modifying code
1027 * correctly and also to take any pending interrupts immediately.
1028 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
1029 */
1030 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
1031 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
1032 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
1033 .access = PL0_W, .type = ARM_CP_NOP },
1034 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
1035 .access = PL0_W, .type = ARM_CP_NOP },
1036 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
1037 .access = PL1_RW, .accessfn = access_tvm_trvm,
1038 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
1039 offsetof(CPUARMState, cp15.ifar_ns) },
1040 .resetvalue = 0, },
1041 /* Watchpoint Fault Address Register : should actually only be present
1042 * for 1136, 1176, 11MPCore.
1043 */
1044 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
1045 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
1046 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
1047 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
1048 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
1049 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
1050 REGINFO_SENTINEL
1051 };
1052
1053 /* Definitions for the PMU registers */
1054 #define PMCRN_MASK 0xf800
1055 #define PMCRN_SHIFT 11
1056 #define PMCRLC 0x40
1057 #define PMCRDP 0x20
1058 #define PMCRX 0x10
1059 #define PMCRD 0x8
1060 #define PMCRC 0x4
1061 #define PMCRP 0x2
1062 #define PMCRE 0x1
1063 /*
1064 * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
1065 * which can be written as 1 to trigger behaviour but which stay RAZ).
1066 */
1067 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1068
1069 #define PMXEVTYPER_P 0x80000000
1070 #define PMXEVTYPER_U 0x40000000
1071 #define PMXEVTYPER_NSK 0x20000000
1072 #define PMXEVTYPER_NSU 0x10000000
1073 #define PMXEVTYPER_NSH 0x08000000
1074 #define PMXEVTYPER_M 0x04000000
1075 #define PMXEVTYPER_MT 0x02000000
1076 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1077 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1078 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1079 PMXEVTYPER_M | PMXEVTYPER_MT | \
1080 PMXEVTYPER_EVTCOUNT)
1081
1082 #define PMCCFILTR 0xf8000000
1083 #define PMCCFILTR_M PMXEVTYPER_M
1084 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1085
1086 static inline uint32_t pmu_num_counters(CPUARMState *env)
1087 {
1088 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1089 }
1090
1091 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1092 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1093 {
1094 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1095 }
1096
1097 typedef struct pm_event {
1098 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
1099 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1100 bool (*supported)(CPUARMState *);
1101 /*
1102 * Retrieve the current count of the underlying event. The programmed
1103 * counters hold a difference from the return value from this function
1104 */
1105 uint64_t (*get_count)(CPUARMState *);
1106 /*
1107 * Return how many nanoseconds it will take (at a minimum) for count events
1108 * to occur. A negative value indicates the counter will never overflow, or
1109 * that the counter has otherwise arranged for the overflow bit to be set
1110 * and the PMU interrupt to be raised on overflow.
1111 */
1112 int64_t (*ns_per_count)(uint64_t);
1113 } pm_event;
1114
1115 static bool event_always_supported(CPUARMState *env)
1116 {
1117 return true;
1118 }
1119
1120 static uint64_t swinc_get_count(CPUARMState *env)
1121 {
1122 /*
1123 * SW_INCR events are written directly to the pmevcntr's by writes to
1124 * PMSWINC, so there is no underlying count maintained by the PMU itself
1125 */
1126 return 0;
1127 }
1128
1129 static int64_t swinc_ns_per(uint64_t ignored)
1130 {
1131 return -1;
1132 }
1133
1134 /*
1135 * Return the underlying cycle count for the PMU cycle counters. If we're in
1136 * usermode, simply return 0.
1137 */
1138 static uint64_t cycles_get_count(CPUARMState *env)
1139 {
1140 #ifndef CONFIG_USER_ONLY
1141 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1142 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1143 #else
1144 return cpu_get_host_ticks();
1145 #endif
1146 }
1147
1148 #ifndef CONFIG_USER_ONLY
1149 static int64_t cycles_ns_per(uint64_t cycles)
1150 {
1151 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
1152 }
1153
1154 static bool instructions_supported(CPUARMState *env)
1155 {
1156 return use_icount == 1 /* Precise instruction counting */;
1157 }
1158
1159 static uint64_t instructions_get_count(CPUARMState *env)
1160 {
1161 return (uint64_t)cpu_get_icount_raw();
1162 }
1163
1164 static int64_t instructions_ns_per(uint64_t icount)
1165 {
1166 return cpu_icount_to_ns((int64_t)icount);
1167 }
1168 #endif
1169
1170 static bool pmu_8_1_events_supported(CPUARMState *env)
1171 {
1172 /* For events which are supported in any v8.1 PMU */
1173 return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
1174 }
1175
1176 static bool pmu_8_4_events_supported(CPUARMState *env)
1177 {
1178 /* For events which are supported in any v8.1 PMU */
1179 return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
1180 }
1181
1182 static uint64_t zero_event_get_count(CPUARMState *env)
1183 {
1184 /* For events which on QEMU never fire, so their count is always zero */
1185 return 0;
1186 }
1187
1188 static int64_t zero_event_ns_per(uint64_t cycles)
1189 {
1190 /* An event which never fires can never overflow */
1191 return -1;
1192 }
1193
1194 static const pm_event pm_events[] = {
1195 { .number = 0x000, /* SW_INCR */
1196 .supported = event_always_supported,
1197 .get_count = swinc_get_count,
1198 .ns_per_count = swinc_ns_per,
1199 },
1200 #ifndef CONFIG_USER_ONLY
1201 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1202 .supported = instructions_supported,
1203 .get_count = instructions_get_count,
1204 .ns_per_count = instructions_ns_per,
1205 },
1206 { .number = 0x011, /* CPU_CYCLES, Cycle */
1207 .supported = event_always_supported,
1208 .get_count = cycles_get_count,
1209 .ns_per_count = cycles_ns_per,
1210 },
1211 #endif
1212 { .number = 0x023, /* STALL_FRONTEND */
1213 .supported = pmu_8_1_events_supported,
1214 .get_count = zero_event_get_count,
1215 .ns_per_count = zero_event_ns_per,
1216 },
1217 { .number = 0x024, /* STALL_BACKEND */
1218 .supported = pmu_8_1_events_supported,
1219 .get_count = zero_event_get_count,
1220 .ns_per_count = zero_event_ns_per,
1221 },
1222 { .number = 0x03c, /* STALL */
1223 .supported = pmu_8_4_events_supported,
1224 .get_count = zero_event_get_count,
1225 .ns_per_count = zero_event_ns_per,
1226 },
1227 };
1228
1229 /*
1230 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1231 * events (i.e. the statistical profiling extension), this implementation
1232 * should first be updated to something sparse instead of the current
1233 * supported_event_map[] array.
1234 */
1235 #define MAX_EVENT_ID 0x3c
1236 #define UNSUPPORTED_EVENT UINT16_MAX
1237 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1238
1239 /*
1240 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1241 * of ARM event numbers to indices in our pm_events array.
1242 *
1243 * Note: Events in the 0x40XX range are not currently supported.
1244 */
1245 void pmu_init(ARMCPU *cpu)
1246 {
1247 unsigned int i;
1248
1249 /*
1250 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1251 * events to them
1252 */
1253 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1254 supported_event_map[i] = UNSUPPORTED_EVENT;
1255 }
1256 cpu->pmceid0 = 0;
1257 cpu->pmceid1 = 0;
1258
1259 for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1260 const pm_event *cnt = &pm_events[i];
1261 assert(cnt->number <= MAX_EVENT_ID);
1262 /* We do not currently support events in the 0x40xx range */
1263 assert(cnt->number <= 0x3f);
1264
1265 if (cnt->supported(&cpu->env)) {
1266 supported_event_map[cnt->number] = i;
1267 uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1268 if (cnt->number & 0x20) {
1269 cpu->pmceid1 |= event_mask;
1270 } else {
1271 cpu->pmceid0 |= event_mask;
1272 }
1273 }
1274 }
1275 }
1276
1277 /*
1278 * Check at runtime whether a PMU event is supported for the current machine
1279 */
1280 static bool event_supported(uint16_t number)
1281 {
1282 if (number > MAX_EVENT_ID) {
1283 return false;
1284 }
1285 return supported_event_map[number] != UNSUPPORTED_EVENT;
1286 }
1287
1288 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1289 bool isread)
1290 {
1291 /* Performance monitor registers user accessibility is controlled
1292 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1293 * trapping to EL2 or EL3 for other accesses.
1294 */
1295 int el = arm_current_el(env);
1296
1297 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1298 return CP_ACCESS_TRAP;
1299 }
1300 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1301 && !arm_is_secure_below_el3(env)) {
1302 return CP_ACCESS_TRAP_EL2;
1303 }
1304 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1305 return CP_ACCESS_TRAP_EL3;
1306 }
1307
1308 return CP_ACCESS_OK;
1309 }
1310
1311 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1312 const ARMCPRegInfo *ri,
1313 bool isread)
1314 {
1315 /* ER: event counter read trap control */
1316 if (arm_feature(env, ARM_FEATURE_V8)
1317 && arm_current_el(env) == 0
1318 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1319 && isread) {
1320 return CP_ACCESS_OK;
1321 }
1322
1323 return pmreg_access(env, ri, isread);
1324 }
1325
1326 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1327 const ARMCPRegInfo *ri,
1328 bool isread)
1329 {
1330 /* SW: software increment write trap control */
1331 if (arm_feature(env, ARM_FEATURE_V8)
1332 && arm_current_el(env) == 0
1333 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1334 && !isread) {
1335 return CP_ACCESS_OK;
1336 }
1337
1338 return pmreg_access(env, ri, isread);
1339 }
1340
1341 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1342 const ARMCPRegInfo *ri,
1343 bool isread)
1344 {
1345 /* ER: event counter read trap control */
1346 if (arm_feature(env, ARM_FEATURE_V8)
1347 && arm_current_el(env) == 0
1348 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1349 return CP_ACCESS_OK;
1350 }
1351
1352 return pmreg_access(env, ri, isread);
1353 }
1354
1355 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1356 const ARMCPRegInfo *ri,
1357 bool isread)
1358 {
1359 /* CR: cycle counter read trap control */
1360 if (arm_feature(env, ARM_FEATURE_V8)
1361 && arm_current_el(env) == 0
1362 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1363 && isread) {
1364 return CP_ACCESS_OK;
1365 }
1366
1367 return pmreg_access(env, ri, isread);
1368 }
1369
1370 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1371 * the current EL, security state, and register configuration.
1372 */
1373 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1374 {
1375 uint64_t filter;
1376 bool e, p, u, nsk, nsu, nsh, m;
1377 bool enabled, prohibited, filtered;
1378 bool secure = arm_is_secure(env);
1379 int el = arm_current_el(env);
1380 uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1381
1382 if (!arm_feature(env, ARM_FEATURE_PMU)) {
1383 return false;
1384 }
1385
1386 if (!arm_feature(env, ARM_FEATURE_EL2) ||
1387 (counter < hpmn || counter == 31)) {
1388 e = env->cp15.c9_pmcr & PMCRE;
1389 } else {
1390 e = env->cp15.mdcr_el2 & MDCR_HPME;
1391 }
1392 enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1393
1394 if (!secure) {
1395 if (el == 2 && (counter < hpmn || counter == 31)) {
1396 prohibited = env->cp15.mdcr_el2 & MDCR_HPMD;
1397 } else {
1398 prohibited = false;
1399 }
1400 } else {
1401 prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1402 (env->cp15.mdcr_el3 & MDCR_SPME);
1403 }
1404
1405 if (prohibited && counter == 31) {
1406 prohibited = env->cp15.c9_pmcr & PMCRDP;
1407 }
1408
1409 if (counter == 31) {
1410 filter = env->cp15.pmccfiltr_el0;
1411 } else {
1412 filter = env->cp15.c14_pmevtyper[counter];
1413 }
1414
1415 p = filter & PMXEVTYPER_P;
1416 u = filter & PMXEVTYPER_U;
1417 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1418 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1419 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1420 m = arm_el_is_aa64(env, 1) &&
1421 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1422
1423 if (el == 0) {
1424 filtered = secure ? u : u != nsu;
1425 } else if (el == 1) {
1426 filtered = secure ? p : p != nsk;
1427 } else if (el == 2) {
1428 filtered = !nsh;
1429 } else { /* EL3 */
1430 filtered = m != p;
1431 }
1432
1433 if (counter != 31) {
1434 /*
1435 * If not checking PMCCNTR, ensure the counter is setup to an event we
1436 * support
1437 */
1438 uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1439 if (!event_supported(event)) {
1440 return false;
1441 }
1442 }
1443
1444 return enabled && !prohibited && !filtered;
1445 }
1446
1447 static void pmu_update_irq(CPUARMState *env)
1448 {
1449 ARMCPU *cpu = env_archcpu(env);
1450 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1451 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1452 }
1453
1454 /*
1455 * Ensure c15_ccnt is the guest-visible count so that operations such as
1456 * enabling/disabling the counter or filtering, modifying the count itself,
1457 * etc. can be done logically. This is essentially a no-op if the counter is
1458 * not enabled at the time of the call.
1459 */
1460 static void pmccntr_op_start(CPUARMState *env)
1461 {
1462 uint64_t cycles = cycles_get_count(env);
1463
1464 if (pmu_counter_enabled(env, 31)) {
1465 uint64_t eff_cycles = cycles;
1466 if (env->cp15.c9_pmcr & PMCRD) {
1467 /* Increment once every 64 processor clock cycles */
1468 eff_cycles /= 64;
1469 }
1470
1471 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1472
1473 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1474 1ull << 63 : 1ull << 31;
1475 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1476 env->cp15.c9_pmovsr |= (1 << 31);
1477 pmu_update_irq(env);
1478 }
1479
1480 env->cp15.c15_ccnt = new_pmccntr;
1481 }
1482 env->cp15.c15_ccnt_delta = cycles;
1483 }
1484
1485 /*
1486 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1487 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1488 * pmccntr_op_start.
1489 */
1490 static void pmccntr_op_finish(CPUARMState *env)
1491 {
1492 if (pmu_counter_enabled(env, 31)) {
1493 #ifndef CONFIG_USER_ONLY
1494 /* Calculate when the counter will next overflow */
1495 uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1496 if (!(env->cp15.c9_pmcr & PMCRLC)) {
1497 remaining_cycles = (uint32_t)remaining_cycles;
1498 }
1499 int64_t overflow_in = cycles_ns_per(remaining_cycles);
1500
1501 if (overflow_in > 0) {
1502 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1503 overflow_in;
1504 ARMCPU *cpu = env_archcpu(env);
1505 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1506 }
1507 #endif
1508
1509 uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1510 if (env->cp15.c9_pmcr & PMCRD) {
1511 /* Increment once every 64 processor clock cycles */
1512 prev_cycles /= 64;
1513 }
1514 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1515 }
1516 }
1517
1518 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1519 {
1520
1521 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1522 uint64_t count = 0;
1523 if (event_supported(event)) {
1524 uint16_t event_idx = supported_event_map[event];
1525 count = pm_events[event_idx].get_count(env);
1526 }
1527
1528 if (pmu_counter_enabled(env, counter)) {
1529 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1530
1531 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1532 env->cp15.c9_pmovsr |= (1 << counter);
1533 pmu_update_irq(env);
1534 }
1535 env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1536 }
1537 env->cp15.c14_pmevcntr_delta[counter] = count;
1538 }
1539
1540 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1541 {
1542 if (pmu_counter_enabled(env, counter)) {
1543 #ifndef CONFIG_USER_ONLY
1544 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1545 uint16_t event_idx = supported_event_map[event];
1546 uint64_t delta = UINT32_MAX -
1547 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1548 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1549
1550 if (overflow_in > 0) {
1551 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1552 overflow_in;
1553 ARMCPU *cpu = env_archcpu(env);
1554 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1555 }
1556 #endif
1557
1558 env->cp15.c14_pmevcntr_delta[counter] -=
1559 env->cp15.c14_pmevcntr[counter];
1560 }
1561 }
1562
1563 void pmu_op_start(CPUARMState *env)
1564 {
1565 unsigned int i;
1566 pmccntr_op_start(env);
1567 for (i = 0; i < pmu_num_counters(env); i++) {
1568 pmevcntr_op_start(env, i);
1569 }
1570 }
1571
1572 void pmu_op_finish(CPUARMState *env)
1573 {
1574 unsigned int i;
1575 pmccntr_op_finish(env);
1576 for (i = 0; i < pmu_num_counters(env); i++) {
1577 pmevcntr_op_finish(env, i);
1578 }
1579 }
1580
1581 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1582 {
1583 pmu_op_start(&cpu->env);
1584 }
1585
1586 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1587 {
1588 pmu_op_finish(&cpu->env);
1589 }
1590
1591 void arm_pmu_timer_cb(void *opaque)
1592 {
1593 ARMCPU *cpu = opaque;
1594
1595 /*
1596 * Update all the counter values based on the current underlying counts,
1597 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1598 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1599 * counter may expire.
1600 */
1601 pmu_op_start(&cpu->env);
1602 pmu_op_finish(&cpu->env);
1603 }
1604
1605 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1606 uint64_t value)
1607 {
1608 pmu_op_start(env);
1609
1610 if (value & PMCRC) {
1611 /* The counter has been reset */
1612 env->cp15.c15_ccnt = 0;
1613 }
1614
1615 if (value & PMCRP) {
1616 unsigned int i;
1617 for (i = 0; i < pmu_num_counters(env); i++) {
1618 env->cp15.c14_pmevcntr[i] = 0;
1619 }
1620 }
1621
1622 env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
1623 env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK);
1624
1625 pmu_op_finish(env);
1626 }
1627
1628 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1629 uint64_t value)
1630 {
1631 unsigned int i;
1632 for (i = 0; i < pmu_num_counters(env); i++) {
1633 /* Increment a counter's count iff: */
1634 if ((value & (1 << i)) && /* counter's bit is set */
1635 /* counter is enabled and not filtered */
1636 pmu_counter_enabled(env, i) &&
1637 /* counter is SW_INCR */
1638 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1639 pmevcntr_op_start(env, i);
1640
1641 /*
1642 * Detect if this write causes an overflow since we can't predict
1643 * PMSWINC overflows like we can for other events
1644 */
1645 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1646
1647 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1648 env->cp15.c9_pmovsr |= (1 << i);
1649 pmu_update_irq(env);
1650 }
1651
1652 env->cp15.c14_pmevcntr[i] = new_pmswinc;
1653
1654 pmevcntr_op_finish(env, i);
1655 }
1656 }
1657 }
1658
1659 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1660 {
1661 uint64_t ret;
1662 pmccntr_op_start(env);
1663 ret = env->cp15.c15_ccnt;
1664 pmccntr_op_finish(env);
1665 return ret;
1666 }
1667
1668 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1669 uint64_t value)
1670 {
1671 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1672 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1673 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1674 * accessed.
1675 */
1676 env->cp15.c9_pmselr = value & 0x1f;
1677 }
1678
1679 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1680 uint64_t value)
1681 {
1682 pmccntr_op_start(env);
1683 env->cp15.c15_ccnt = value;
1684 pmccntr_op_finish(env);
1685 }
1686
1687 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1688 uint64_t value)
1689 {
1690 uint64_t cur_val = pmccntr_read(env, NULL);
1691
1692 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1693 }
1694
1695 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1696 uint64_t value)
1697 {
1698 pmccntr_op_start(env);
1699 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1700 pmccntr_op_finish(env);
1701 }
1702
1703 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1704 uint64_t value)
1705 {
1706 pmccntr_op_start(env);
1707 /* M is not accessible from AArch32 */
1708 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1709 (value & PMCCFILTR);
1710 pmccntr_op_finish(env);
1711 }
1712
1713 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1714 {
1715 /* M is not visible in AArch32 */
1716 return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1717 }
1718
1719 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1720 uint64_t value)
1721 {
1722 value &= pmu_counter_mask(env);
1723 env->cp15.c9_pmcnten |= value;
1724 }
1725
1726 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1727 uint64_t value)
1728 {
1729 value &= pmu_counter_mask(env);
1730 env->cp15.c9_pmcnten &= ~value;
1731 }
1732
1733 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1734 uint64_t value)
1735 {
1736 value &= pmu_counter_mask(env);
1737 env->cp15.c9_pmovsr &= ~value;
1738 pmu_update_irq(env);
1739 }
1740
1741 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1742 uint64_t value)
1743 {
1744 value &= pmu_counter_mask(env);
1745 env->cp15.c9_pmovsr |= value;
1746 pmu_update_irq(env);
1747 }
1748
1749 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1750 uint64_t value, const uint8_t counter)
1751 {
1752 if (counter == 31) {
1753 pmccfiltr_write(env, ri, value);
1754 } else if (counter < pmu_num_counters(env)) {
1755 pmevcntr_op_start(env, counter);
1756
1757 /*
1758 * If this counter's event type is changing, store the current
1759 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1760 * pmevcntr_op_finish has the correct baseline when it converts back to
1761 * a delta.
1762 */
1763 uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1764 PMXEVTYPER_EVTCOUNT;
1765 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1766 if (old_event != new_event) {
1767 uint64_t count = 0;
1768 if (event_supported(new_event)) {
1769 uint16_t event_idx = supported_event_map[new_event];
1770 count = pm_events[event_idx].get_count(env);
1771 }
1772 env->cp15.c14_pmevcntr_delta[counter] = count;
1773 }
1774
1775 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1776 pmevcntr_op_finish(env, counter);
1777 }
1778 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1779 * PMSELR value is equal to or greater than the number of implemented
1780 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1781 */
1782 }
1783
1784 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1785 const uint8_t counter)
1786 {
1787 if (counter == 31) {
1788 return env->cp15.pmccfiltr_el0;
1789 } else if (counter < pmu_num_counters(env)) {
1790 return env->cp15.c14_pmevtyper[counter];
1791 } else {
1792 /*
1793 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1794 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1795 */
1796 return 0;
1797 }
1798 }
1799
1800 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1801 uint64_t value)
1802 {
1803 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1804 pmevtyper_write(env, ri, value, counter);
1805 }
1806
1807 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1808 uint64_t value)
1809 {
1810 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1811 env->cp15.c14_pmevtyper[counter] = value;
1812
1813 /*
1814 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1815 * pmu_op_finish calls when loading saved state for a migration. Because
1816 * we're potentially updating the type of event here, the value written to
1817 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1818 * different counter type. Therefore, we need to set this value to the
1819 * current count for the counter type we're writing so that pmu_op_finish
1820 * has the correct count for its calculation.
1821 */
1822 uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1823 if (event_supported(event)) {
1824 uint16_t event_idx = supported_event_map[event];
1825 env->cp15.c14_pmevcntr_delta[counter] =
1826 pm_events[event_idx].get_count(env);
1827 }
1828 }
1829
1830 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1831 {
1832 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1833 return pmevtyper_read(env, ri, counter);
1834 }
1835
1836 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1837 uint64_t value)
1838 {
1839 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1840 }
1841
1842 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1843 {
1844 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1845 }
1846
1847 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1848 uint64_t value, uint8_t counter)
1849 {
1850 if (counter < pmu_num_counters(env)) {
1851 pmevcntr_op_start(env, counter);
1852 env->cp15.c14_pmevcntr[counter] = value;
1853 pmevcntr_op_finish(env, counter);
1854 }
1855 /*
1856 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1857 * are CONSTRAINED UNPREDICTABLE.
1858 */
1859 }
1860
1861 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1862 uint8_t counter)
1863 {
1864 if (counter < pmu_num_counters(env)) {
1865 uint64_t ret;
1866 pmevcntr_op_start(env, counter);
1867 ret = env->cp15.c14_pmevcntr[counter];
1868 pmevcntr_op_finish(env, counter);
1869 return ret;
1870 } else {
1871 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1872 * are CONSTRAINED UNPREDICTABLE. */
1873 return 0;
1874 }
1875 }
1876
1877 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1878 uint64_t value)
1879 {
1880 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1881 pmevcntr_write(env, ri, value, counter);
1882 }
1883
1884 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1885 {
1886 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1887 return pmevcntr_read(env, ri, counter);
1888 }
1889
1890 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1891 uint64_t value)
1892 {
1893 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1894 assert(counter < pmu_num_counters(env));
1895 env->cp15.c14_pmevcntr[counter] = value;
1896 pmevcntr_write(env, ri, value, counter);
1897 }
1898
1899 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1900 {
1901 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1902 assert(counter < pmu_num_counters(env));
1903 return env->cp15.c14_pmevcntr[counter];
1904 }
1905
1906 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1907 uint64_t value)
1908 {
1909 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1910 }
1911
1912 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1913 {
1914 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1915 }
1916
1917 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1918 uint64_t value)
1919 {
1920 if (arm_feature(env, ARM_FEATURE_V8)) {
1921 env->cp15.c9_pmuserenr = value & 0xf;
1922 } else {
1923 env->cp15.c9_pmuserenr = value & 1;
1924 }
1925 }
1926
1927 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1928 uint64_t value)
1929 {
1930 /* We have no event counters so only the C bit can be changed */
1931 value &= pmu_counter_mask(env);
1932 env->cp15.c9_pminten |= value;
1933 pmu_update_irq(env);
1934 }
1935
1936 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1937 uint64_t value)
1938 {
1939 value &= pmu_counter_mask(env);
1940 env->cp15.c9_pminten &= ~value;
1941 pmu_update_irq(env);
1942 }
1943
1944 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1945 uint64_t value)
1946 {
1947 /* Note that even though the AArch64 view of this register has bits
1948 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1949 * architectural requirements for bits which are RES0 only in some
1950 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1951 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1952 */
1953 raw_write(env, ri, value & ~0x1FULL);
1954 }
1955
1956 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1957 {
1958 /* Begin with base v8.0 state. */
1959 uint32_t valid_mask = 0x3fff;
1960 ARMCPU *cpu = env_archcpu(env);
1961
1962 if (arm_el_is_aa64(env, 3)) {
1963 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
1964 valid_mask &= ~SCR_NET;
1965 } else {
1966 valid_mask &= ~(SCR_RW | SCR_ST);
1967 }
1968
1969 if (!arm_feature(env, ARM_FEATURE_EL2)) {
1970 valid_mask &= ~SCR_HCE;
1971
1972 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1973 * supported if EL2 exists. The bit is UNK/SBZP when
1974 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1975 * when EL2 is unavailable.
1976 * On ARMv8, this bit is always available.
1977 */
1978 if (arm_feature(env, ARM_FEATURE_V7) &&
1979 !arm_feature(env, ARM_FEATURE_V8)) {
1980 valid_mask &= ~SCR_SMD;
1981 }
1982 }
1983 if (cpu_isar_feature(aa64_lor, cpu)) {
1984 valid_mask |= SCR_TLOR;
1985 }
1986 if (cpu_isar_feature(aa64_pauth, cpu)) {
1987 valid_mask |= SCR_API | SCR_APK;
1988 }
1989
1990 /* Clear all-context RES0 bits. */
1991 value &= valid_mask;
1992 raw_write(env, ri, value);
1993 }
1994
1995 static CPAccessResult access_aa64_tid2(CPUARMState *env,
1996 const ARMCPRegInfo *ri,
1997 bool isread)
1998 {
1999 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
2000 return CP_ACCESS_TRAP_EL2;
2001 }
2002
2003 return CP_ACCESS_OK;
2004 }
2005
2006 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2007 {
2008 ARMCPU *cpu = env_archcpu(env);
2009
2010 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
2011 * bank
2012 */
2013 uint32_t index = A32_BANKED_REG_GET(env, csselr,
2014 ri->secure & ARM_CP_SECSTATE_S);
2015
2016 return cpu->ccsidr[index];
2017 }
2018
2019 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2020 uint64_t value)
2021 {
2022 raw_write(env, ri, value & 0xf);
2023 }
2024
2025 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2026 {
2027 CPUState *cs = env_cpu(env);
2028 uint64_t hcr_el2 = arm_hcr_el2_eff(env);
2029 uint64_t ret = 0;
2030 bool allow_virt = (arm_current_el(env) == 1 &&
2031 (!arm_is_secure_below_el3(env) ||
2032 (env->cp15.scr_el3 & SCR_EEL2)));
2033
2034 if (allow_virt && (hcr_el2 & HCR_IMO)) {
2035 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
2036 ret |= CPSR_I;
2037 }
2038 } else {
2039 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
2040 ret |= CPSR_I;
2041 }
2042 }
2043
2044 if (allow_virt && (hcr_el2 & HCR_FMO)) {
2045 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
2046 ret |= CPSR_F;
2047 }
2048 } else {
2049 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
2050 ret |= CPSR_F;
2051 }
2052 }
2053
2054 /* External aborts are not possible in QEMU so A bit is always clear */
2055 return ret;
2056 }
2057
2058 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2059 bool isread)
2060 {
2061 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
2062 return CP_ACCESS_TRAP_EL2;
2063 }
2064
2065 return CP_ACCESS_OK;
2066 }
2067
2068 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2069 bool isread)
2070 {
2071 if (arm_feature(env, ARM_FEATURE_V8)) {
2072 return access_aa64_tid1(env, ri, isread);
2073 }
2074
2075 return CP_ACCESS_OK;
2076 }
2077
2078 static const ARMCPRegInfo v7_cp_reginfo[] = {
2079 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2080 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
2081 .access = PL1_W, .type = ARM_CP_NOP },
2082 /* Performance monitors are implementation defined in v7,
2083 * but with an ARM recommended set of registers, which we
2084 * follow.
2085 *
2086 * Performance registers fall into three categories:
2087 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2088 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2089 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2090 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2091 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2092 */
2093 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
2094 .access = PL0_RW, .type = ARM_CP_ALIAS,
2095 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2096 .writefn = pmcntenset_write,
2097 .accessfn = pmreg_access,
2098 .raw_writefn = raw_write },
2099 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
2100 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
2101 .access = PL0_RW, .accessfn = pmreg_access,
2102 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
2103 .writefn = pmcntenset_write, .raw_writefn = raw_write },
2104 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
2105 .access = PL0_RW,
2106 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2107 .accessfn = pmreg_access,
2108 .writefn = pmcntenclr_write,
2109 .type = ARM_CP_ALIAS },
2110 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
2111 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
2112 .access = PL0_RW, .accessfn = pmreg_access,
2113 .type = ARM_CP_ALIAS,
2114 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
2115 .writefn = pmcntenclr_write },
2116 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
2117 .access = PL0_RW, .type = ARM_CP_IO,
2118 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2119 .accessfn = pmreg_access,
2120 .writefn = pmovsr_write,
2121 .raw_writefn = raw_write },
2122 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2123 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2124 .access = PL0_RW, .accessfn = pmreg_access,
2125 .type = ARM_CP_ALIAS | ARM_CP_IO,
2126 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2127 .writefn = pmovsr_write,
2128 .raw_writefn = raw_write },
2129 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2130 .access = PL0_W, .accessfn = pmreg_access_swinc,
2131 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2132 .writefn = pmswinc_write },
2133 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2134 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2135 .access = PL0_W, .accessfn = pmreg_access_swinc,
2136 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2137 .writefn = pmswinc_write },
2138 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2139 .access = PL0_RW, .type = ARM_CP_ALIAS,
2140 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2141 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2142 .raw_writefn = raw_write},
2143 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2144 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2145 .access = PL0_RW, .accessfn = pmreg_access_selr,
2146 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2147 .writefn = pmselr_write, .raw_writefn = raw_write, },
2148 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2149 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2150 .readfn = pmccntr_read, .writefn = pmccntr_write32,
2151 .accessfn = pmreg_access_ccntr },
2152 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2153 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2154 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2155 .type = ARM_CP_IO,
2156 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2157 .readfn = pmccntr_read, .writefn = pmccntr_write,
2158 .raw_readfn = raw_read, .raw_writefn = raw_write, },
2159 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2160 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2161 .access = PL0_RW, .accessfn = pmreg_access,
2162 .type = ARM_CP_ALIAS | ARM_CP_IO,
2163 .resetvalue = 0, },
2164 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2165 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2166 .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2167 .access = PL0_RW, .accessfn = pmreg_access,
2168 .type = ARM_CP_IO,
2169 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2170 .resetvalue = 0, },
2171 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2172 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2173 .accessfn = pmreg_access,
2174 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2175 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2176 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2177 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2178 .accessfn = pmreg_access,
2179 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2180 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2181 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2182 .accessfn = pmreg_access_xevcntr,
2183 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2184 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2185 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2186 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2187 .accessfn = pmreg_access_xevcntr,
2188 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2189 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2190 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2191 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2192 .resetvalue = 0,
2193 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2194 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2195 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2196 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2197 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2198 .resetvalue = 0,
2199 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2200 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2201 .access = PL1_RW, .accessfn = access_tpm,
2202 .type = ARM_CP_ALIAS | ARM_CP_IO,
2203 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2204 .resetvalue = 0,
2205 .writefn = pmintenset_write, .raw_writefn = raw_write },
2206 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2207 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2208 .access = PL1_RW, .accessfn = access_tpm,
2209 .type = ARM_CP_IO,
2210 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2211 .writefn = pmintenset_write, .raw_writefn = raw_write,
2212 .resetvalue = 0x0 },
2213 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2214 .access = PL1_RW, .accessfn = access_tpm,
2215 .type = ARM_CP_ALIAS | ARM_CP_IO,
2216 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2217 .writefn = pmintenclr_write, },
2218 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2219 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2220 .access = PL1_RW, .accessfn = access_tpm,
2221 .type = ARM_CP_ALIAS | ARM_CP_IO,
2222 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2223 .writefn = pmintenclr_write },
2224 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2225 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2226 .access = PL1_R,
2227 .accessfn = access_aa64_tid2,
2228 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2229 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2230 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2231 .access = PL1_RW,
2232 .accessfn = access_aa64_tid2,
2233 .writefn = csselr_write, .resetvalue = 0,
2234 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2235 offsetof(CPUARMState, cp15.csselr_ns) } },
2236 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2237 * just RAZ for all cores:
2238 */
2239 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2240 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2241 .access = PL1_R, .type = ARM_CP_CONST,
2242 .accessfn = access_aa64_tid1,
2243 .resetvalue = 0 },
2244 /* Auxiliary fault status registers: these also are IMPDEF, and we
2245 * choose to RAZ/WI for all cores.
2246 */
2247 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2248 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2249 .access = PL1_RW, .accessfn = access_tvm_trvm,
2250 .type = ARM_CP_CONST, .resetvalue = 0 },
2251 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2252 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2253 .access = PL1_RW, .accessfn = access_tvm_trvm,
2254 .type = ARM_CP_CONST, .resetvalue = 0 },
2255 /* MAIR can just read-as-written because we don't implement caches
2256 * and so don't need to care about memory attributes.
2257 */
2258 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2259 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2260 .access = PL1_RW, .accessfn = access_tvm_trvm,
2261 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2262 .resetvalue = 0 },
2263 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2264 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2265 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2266 .resetvalue = 0 },
2267 /* For non-long-descriptor page tables these are PRRR and NMRR;
2268 * regardless they still act as reads-as-written for QEMU.
2269 */
2270 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2271 * allows them to assign the correct fieldoffset based on the endianness
2272 * handled in the field definitions.
2273 */
2274 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2275 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2276 .access = PL1_RW, .accessfn = access_tvm_trvm,
2277 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2278 offsetof(CPUARMState, cp15.mair0_ns) },
2279 .resetfn = arm_cp_reset_ignore },
2280 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2281 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2282 .access = PL1_RW, .accessfn = access_tvm_trvm,
2283 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2284 offsetof(CPUARMState, cp15.mair1_ns) },
2285 .resetfn = arm_cp_reset_ignore },
2286 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2287 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2288 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2289 /* 32 bit ITLB invalidates */
2290 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2291 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2292 .writefn = tlbiall_write },
2293 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2294 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2295 .writefn = tlbimva_write },
2296 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2297 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2298 .writefn = tlbiasid_write },
2299 /* 32 bit DTLB invalidates */
2300 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2301 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2302 .writefn = tlbiall_write },
2303 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2304 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2305 .writefn = tlbimva_write },
2306 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2307 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2308 .writefn = tlbiasid_write },
2309 /* 32 bit TLB invalidates */
2310 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2311 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2312 .writefn = tlbiall_write },
2313 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2314 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2315 .writefn = tlbimva_write },
2316 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2317 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2318 .writefn = tlbiasid_write },
2319 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2320 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2321 .writefn = tlbimvaa_write },
2322 REGINFO_SENTINEL
2323 };
2324
2325 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2326 /* 32 bit TLB invalidates, Inner Shareable */
2327 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2328 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2329 .writefn = tlbiall_is_write },
2330 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2331 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2332 .writefn = tlbimva_is_write },
2333 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2334 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2335 .writefn = tlbiasid_is_write },
2336 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2337 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2338 .writefn = tlbimvaa_is_write },
2339 REGINFO_SENTINEL
2340 };
2341
2342 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2343 /* PMOVSSET is not implemented in v7 before v7ve */
2344 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2345 .access = PL0_RW, .accessfn = pmreg_access,
2346 .type = ARM_CP_ALIAS | ARM_CP_IO,
2347 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2348 .writefn = pmovsset_write,
2349 .raw_writefn = raw_write },
2350 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2351 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2352 .access = PL0_RW, .accessfn = pmreg_access,
2353 .type = ARM_CP_ALIAS | ARM_CP_IO,
2354 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2355 .writefn = pmovsset_write,
2356 .raw_writefn = raw_write },
2357 REGINFO_SENTINEL
2358 };
2359
2360 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2361 uint64_t value)
2362 {
2363 value &= 1;
2364 env->teecr = value;
2365 }
2366
2367 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2368 bool isread)
2369 {
2370 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2371 return CP_ACCESS_TRAP;
2372 }
2373 return CP_ACCESS_OK;
2374 }
2375
2376 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2377 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2378 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2379 .resetvalue = 0,
2380 .writefn = teecr_write },
2381 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2382 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2383 .accessfn = teehbr_access, .resetvalue = 0 },
2384 REGINFO_SENTINEL
2385 };
2386
2387 static const ARMCPRegInfo v6k_cp_reginfo[] = {
2388 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2389 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2390 .access = PL0_RW,
2391 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2392 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2393 .access = PL0_RW,
2394 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2395 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2396 .resetfn = arm_cp_reset_ignore },
2397 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2398 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2399 .access = PL0_R|PL1_W,
2400 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2401 .resetvalue = 0},
2402 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2403 .access = PL0_R|PL1_W,
2404 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2405 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2406 .resetfn = arm_cp_reset_ignore },
2407 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2408 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2409 .access = PL1_RW,
2410 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2411 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2412 .access = PL1_RW,
2413 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2414 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2415 .resetvalue = 0 },
2416 REGINFO_SENTINEL
2417 };
2418
2419 #ifndef CONFIG_USER_ONLY
2420
2421 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2422 bool isread)
2423 {
2424 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2425 * Writable only at the highest implemented exception level.
2426 */
2427 int el = arm_current_el(env);
2428 uint64_t hcr;
2429 uint32_t cntkctl;
2430
2431 switch (el) {
2432 case 0:
2433 hcr = arm_hcr_el2_eff(env);
2434 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2435 cntkctl = env->cp15.cnthctl_el2;
2436 } else {
2437 cntkctl = env->cp15.c14_cntkctl;
2438 }
2439 if (!extract32(cntkctl, 0, 2)) {
2440 return CP_ACCESS_TRAP;
2441 }
2442 break;
2443 case 1:
2444 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2445 arm_is_secure_below_el3(env)) {
2446 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2447 return CP_ACCESS_TRAP_UNCATEGORIZED;
2448 }
2449 break;
2450 case 2:
2451 case 3:
2452 break;
2453 }
2454
2455 if (!isread && el < arm_highest_el(env)) {
2456 return CP_ACCESS_TRAP_UNCATEGORIZED;
2457 }
2458
2459 return CP_ACCESS_OK;
2460 }
2461
2462 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2463 bool isread)
2464 {
2465 unsigned int cur_el = arm_current_el(env);
2466 bool secure = arm_is_secure(env);
2467 uint64_t hcr = arm_hcr_el2_eff(env);
2468
2469 switch (cur_el) {
2470 case 0:
2471 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2472 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2473 return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2474 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2475 }
2476
2477 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2478 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2479 return CP_ACCESS_TRAP;
2480 }
2481
2482 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2483 if (hcr & HCR_E2H) {
2484 if (timeridx == GTIMER_PHYS &&
2485 !extract32(env->cp15.cnthctl_el2, 10, 1)) {
2486 return CP_ACCESS_TRAP_EL2;
2487 }
2488 } else {
2489 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2490 if (arm_feature(env, ARM_FEATURE_EL2) &&
2491 timeridx == GTIMER_PHYS && !secure &&
2492 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2493 return CP_ACCESS_TRAP_EL2;
2494 }
2495 }
2496 break;
2497
2498 case 1:
2499 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2500 if (arm_feature(env, ARM_FEATURE_EL2) &&
2501 timeridx == GTIMER_PHYS && !secure &&
2502 (hcr & HCR_E2H
2503 ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2504 : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2505 return CP_ACCESS_TRAP_EL2;
2506 }
2507 break;
2508 }
2509 return CP_ACCESS_OK;
2510 }
2511
2512 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2513 bool isread)
2514 {
2515 unsigned int cur_el = arm_current_el(env);
2516 bool secure = arm_is_secure(env);
2517 uint64_t hcr = arm_hcr_el2_eff(env);
2518
2519 switch (cur_el) {
2520 case 0:
2521 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2522 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2523 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2524 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2525 }
2526
2527 /*
2528 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2529 * EL0 if EL0[PV]TEN is zero.
2530 */
2531 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2532 return CP_ACCESS_TRAP;
2533 }
2534 /* fall through */
2535
2536 case 1:
2537 if (arm_feature(env, ARM_FEATURE_EL2) &&
2538 timeridx == GTIMER_PHYS && !secure) {
2539 if (hcr & HCR_E2H) {
2540 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2541 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2542 return CP_ACCESS_TRAP_EL2;
2543 }
2544 } else {
2545 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2546 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2547 return CP_ACCESS_TRAP_EL2;
2548 }
2549 }
2550 }
2551 break;
2552 }
2553 return CP_ACCESS_OK;
2554 }
2555
2556 static CPAccessResult gt_pct_access(CPUARMState *env,
2557 const ARMCPRegInfo *ri,
2558 bool isread)
2559 {
2560 return gt_counter_access(env, GTIMER_PHYS, isread);
2561 }
2562
2563 static CPAccessResult gt_vct_access(CPUARMState *env,
2564 const ARMCPRegInfo *ri,
2565 bool isread)
2566 {
2567 return gt_counter_access(env, GTIMER_VIRT, isread);
2568 }
2569
2570 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2571 bool isread)
2572 {
2573 return gt_timer_access(env, GTIMER_PHYS, isread);
2574 }
2575
2576 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2577 bool isread)
2578 {
2579 return gt_timer_access(env, GTIMER_VIRT, isread);
2580 }
2581
2582 static CPAccessResult gt_stimer_access(CPUARMState *env,
2583 const ARMCPRegInfo *ri,
2584 bool isread)
2585 {
2586 /* The AArch64 register view of the secure physical timer is
2587 * always accessible from EL3, and configurably accessible from
2588 * Secure EL1.
2589 */
2590 switch (arm_current_el(env)) {
2591 case 1:
2592 if (!arm_is_secure(env)) {
2593 return CP_ACCESS_TRAP;
2594 }
2595 if (!(env->cp15.scr_el3 & SCR_ST)) {
2596 return CP_ACCESS_TRAP_EL3;
2597 }
2598 return CP_ACCESS_OK;
2599 case 0:
2600 case 2:
2601 return CP_ACCESS_TRAP;
2602 case 3:
2603 return CP_ACCESS_OK;
2604 default:
2605 g_assert_not_reached();
2606 }
2607 }
2608
2609 static uint64_t gt_get_countervalue(CPUARMState *env)
2610 {
2611 ARMCPU *cpu = env_archcpu(env);
2612
2613 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
2614 }
2615
2616 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2617 {
2618 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2619
2620 if (gt->ctl & 1) {
2621 /* Timer enabled: calculate and set current ISTATUS, irq, and
2622 * reset timer to when ISTATUS next has to change
2623 */
2624 uint64_t offset = timeridx == GTIMER_VIRT ?
2625 cpu->env.cp15.cntvoff_el2 : 0;
2626 uint64_t count = gt_get_countervalue(&cpu->env);
2627 /* Note that this must be unsigned 64 bit arithmetic: */
2628 int istatus = count - offset >= gt->cval;
2629 uint64_t nexttick;
2630 int irqstate;
2631
2632 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2633
2634 irqstate = (istatus && !(gt->ctl & 2));
2635 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2636
2637 if (istatus) {
2638 /* Next transition is when count rolls back over to zero */
2639 nexttick = UINT64_MAX;
2640 } else {
2641 /* Next transition is when we hit cval */
2642 nexttick = gt->cval + offset;
2643 }
2644 /* Note that the desired next expiry time might be beyond the
2645 * signed-64-bit range of a QEMUTimer -- in this case we just
2646 * set the timer for as far in the future as possible. When the
2647 * timer expires we will reset the timer for any remaining period.
2648 */
2649 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
2650 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2651 } else {
2652 timer_mod(cpu->gt_timer[timeridx], nexttick);
2653 }
2654 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2655 } else {
2656 /* Timer disabled: ISTATUS and timer output always clear */
2657 gt->ctl &= ~4;
2658 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2659 timer_del(cpu->gt_timer[timeridx]);
2660 trace_arm_gt_recalc_disabled(timeridx);
2661 }
2662 }
2663
2664 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2665 int timeridx)
2666 {
2667 ARMCPU *cpu = env_archcpu(env);
2668
2669 timer_del(cpu->gt_timer[timeridx]);
2670 }
2671
2672 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2673 {
2674 return gt_get_countervalue(env);
2675 }
2676
2677 static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2678 {
2679 uint64_t hcr;
2680
2681 switch (arm_current_el(env)) {
2682 case 2:
2683 hcr = arm_hcr_el2_eff(env);
2684 if (hcr & HCR_E2H) {
2685 return 0;
2686 }
2687 break;
2688 case 0:
2689 hcr = arm_hcr_el2_eff(env);
2690 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2691 return 0;
2692 }
2693 break;
2694 }
2695
2696 return env->cp15.cntvoff_el2;
2697 }
2698
2699 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2700 {
2701 return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
2702 }
2703
2704 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2705 int timeridx,
2706 uint64_t value)
2707 {
2708 trace_arm_gt_cval_write(timeridx, value);
2709 env->cp15.c14_timer[timeridx].cval = value;
2710 gt_recalc_timer(env_archcpu(env), timeridx);
2711 }
2712
2713 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2714 int timeridx)
2715 {
2716 uint64_t offset = 0;
2717
2718 switch (timeridx) {
2719 case GTIMER_VIRT:
2720 case GTIMER_HYPVIRT:
2721 offset = gt_virt_cnt_offset(env);
2722 break;
2723 }
2724
2725 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2726 (gt_get_countervalue(env) - offset));
2727 }
2728
2729 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2730 int timeridx,
2731 uint64_t value)
2732 {
2733 uint64_t offset = 0;
2734
2735 switch (timeridx) {
2736 case GTIMER_VIRT:
2737 case GTIMER_HYPVIRT:
2738 offset = gt_virt_cnt_offset(env);
2739 break;
2740 }
2741
2742 trace_arm_gt_tval_write(timeridx, value);
2743 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2744 sextract64(value, 0, 32);
2745 gt_recalc_timer(env_archcpu(env), timeridx);
2746 }
2747
2748 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2749 int timeridx,
2750 uint64_t value)
2751 {
2752 ARMCPU *cpu = env_archcpu(env);
2753 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2754
2755 trace_arm_gt_ctl_write(timeridx, value);
2756 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2757 if ((oldval ^ value) & 1) {
2758 /* Enable toggled */
2759 gt_recalc_timer(cpu, timeridx);
2760 } else if ((oldval ^ value) & 2) {
2761 /* IMASK toggled: don't need to recalculate,
2762 * just set the interrupt line based on ISTATUS
2763 */
2764 int irqstate = (oldval & 4) && !(value & 2);
2765
2766 trace_arm_gt_imask_toggle(timeridx, irqstate);
2767 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2768 }
2769 }
2770
2771 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2772 {
2773 gt_timer_reset(env, ri, GTIMER_PHYS);
2774 }
2775
2776 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2777 uint64_t value)
2778 {
2779 gt_cval_write(env, ri, GTIMER_PHYS, value);
2780 }
2781
2782 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2783 {
2784 return gt_tval_read(env, ri, GTIMER_PHYS);
2785 }
2786
2787 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2788 uint64_t value)
2789 {
2790 gt_tval_write(env, ri, GTIMER_PHYS, value);
2791 }
2792
2793 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2794 uint64_t value)
2795 {
2796 gt_ctl_write(env, ri, GTIMER_PHYS, value);
2797 }
2798
2799 static int gt_phys_redir_timeridx(CPUARMState *env)
2800 {
2801 switch (arm_mmu_idx(env)) {
2802 case ARMMMUIdx_E20_0:
2803 case ARMMMUIdx_E20_2:
2804 case ARMMMUIdx_E20_2_PAN:
2805 return GTIMER_HYP;
2806 default:
2807 return GTIMER_PHYS;
2808 }
2809 }
2810
2811 static int gt_virt_redir_timeridx(CPUARMState *env)
2812 {
2813 switch (arm_mmu_idx(env)) {
2814 case ARMMMUIdx_E20_0:
2815 case ARMMMUIdx_E20_2:
2816 case ARMMMUIdx_E20_2_PAN:
2817 return GTIMER_HYPVIRT;
2818 default:
2819 return GTIMER_VIRT;
2820 }
2821 }
2822
2823 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2824 const ARMCPRegInfo *ri)
2825 {
2826 int timeridx = gt_phys_redir_timeridx(env);
2827 return env->cp15.c14_timer[timeridx].cval;
2828 }
2829
2830 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2831 uint64_t value)
2832 {
2833 int timeridx = gt_phys_redir_timeridx(env);
2834 gt_cval_write(env, ri, timeridx, value);
2835 }
2836
2837 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2838 const ARMCPRegInfo *ri)
2839 {
2840 int timeridx = gt_phys_redir_timeridx(env);
2841 return gt_tval_read(env, ri, timeridx);
2842 }
2843
2844 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2845 uint64_t value)
2846 {
2847 int timeridx = gt_phys_redir_timeridx(env);
2848 gt_tval_write(env, ri, timeridx, value);
2849 }
2850
2851 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2852 const ARMCPRegInfo *ri)
2853 {
2854 int timeridx = gt_phys_redir_timeridx(env);
2855 return env->cp15.c14_timer[timeridx].ctl;
2856 }
2857
2858 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2859 uint64_t value)
2860 {
2861 int timeridx = gt_phys_redir_timeridx(env);
2862 gt_ctl_write(env, ri, timeridx, value);
2863 }
2864
2865 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2866 {
2867 gt_timer_reset(env, ri, GTIMER_VIRT);
2868 }
2869
2870 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2871 uint64_t value)
2872 {
2873 gt_cval_write(env, ri, GTIMER_VIRT, value);
2874 }
2875
2876 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2877 {
2878 return gt_tval_read(env, ri, GTIMER_VIRT);
2879 }
2880
2881 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2882 uint64_t value)
2883 {
2884 gt_tval_write(env, ri, GTIMER_VIRT, value);
2885 }
2886
2887 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2888 uint64_t value)
2889 {
2890 gt_ctl_write(env, ri, GTIMER_VIRT, value);
2891 }
2892
2893 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2894 uint64_t value)
2895 {
2896 ARMCPU *cpu = env_archcpu(env);
2897
2898 trace_arm_gt_cntvoff_write(value);
2899 raw_write(env, ri, value);
2900 gt_recalc_timer(cpu, GTIMER_VIRT);
2901 }
2902
2903 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
2904 const ARMCPRegInfo *ri)
2905 {
2906 int timeridx = gt_virt_redir_timeridx(env);
2907 return env->cp15.c14_timer[timeridx].cval;
2908 }
2909
2910 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2911 uint64_t value)
2912 {
2913 int timeridx = gt_virt_redir_timeridx(env);
2914 gt_cval_write(env, ri, timeridx, value);
2915 }
2916
2917 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
2918 const ARMCPRegInfo *ri)
2919 {
2920 int timeridx = gt_virt_redir_timeridx(env);
2921 return gt_tval_read(env, ri, timeridx);
2922 }
2923
2924 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2925 uint64_t value)
2926 {
2927 int timeridx = gt_virt_redir_timeridx(env);
2928 gt_tval_write(env, ri, timeridx, value);
2929 }
2930
2931 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
2932 const ARMCPRegInfo *ri)
2933 {
2934 int timeridx = gt_virt_redir_timeridx(env);
2935 return env->cp15.c14_timer[timeridx].ctl;
2936 }
2937
2938 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2939 uint64_t value)
2940 {
2941 int timeridx = gt_virt_redir_timeridx(env);
2942 gt_ctl_write(env, ri, timeridx, value);
2943 }
2944
2945 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2946 {
2947 gt_timer_reset(env, ri, GTIMER_HYP);
2948 }
2949
2950 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2951 uint64_t value)
2952 {
2953 gt_cval_write(env, ri, GTIMER_HYP, value);
2954 }
2955
2956 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2957 {
2958 return gt_tval_read(env, ri, GTIMER_HYP);
2959 }
2960
2961 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2962 uint64_t value)
2963 {
2964 gt_tval_write(env, ri, GTIMER_HYP, value);
2965 }
2966
2967 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2968 uint64_t value)
2969 {
2970 gt_ctl_write(env, ri, GTIMER_HYP, value);
2971 }
2972
2973 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2974 {
2975 gt_timer_reset(env, ri, GTIMER_SEC);
2976 }
2977
2978 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2979 uint64_t value)
2980 {
2981 gt_cval_write(env, ri, GTIMER_SEC, value);
2982 }
2983
2984 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2985 {
2986 return gt_tval_read(env, ri, GTIMER_SEC);
2987 }
2988
2989 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2990 uint64_t value)
2991 {
2992 gt_tval_write(env, ri, GTIMER_SEC, value);
2993 }
2994
2995 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2996 uint64_t value)
2997 {
2998 gt_ctl_write(env, ri, GTIMER_SEC, value);
2999 }
3000
3001 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3002 {
3003 gt_timer_reset(env, ri, GTIMER_HYPVIRT);
3004 }
3005
3006 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3007 uint64_t value)
3008 {
3009 gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
3010 }
3011
3012 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3013 {
3014 return gt_tval_read(env, ri, GTIMER_HYPVIRT);
3015 }
3016
3017 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3018 uint64_t value)
3019 {
3020 gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
3021 }
3022
3023 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3024 uint64_t value)
3025 {
3026 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
3027 }
3028
3029 void arm_gt_ptimer_cb(void *opaque)
3030 {
3031 ARMCPU *cpu = opaque;
3032
3033 gt_recalc_timer(cpu, GTIMER_PHYS);
3034 }
3035
3036 void arm_gt_vtimer_cb(void *opaque)
3037 {
3038 ARMCPU *cpu = opaque;
3039
3040 gt_recalc_timer(cpu, GTIMER_VIRT);
3041 }
3042
3043 void arm_gt_htimer_cb(void *opaque)
3044 {
3045 ARMCPU *cpu = opaque;
3046
3047 gt_recalc_timer(cpu, GTIMER_HYP);
3048 }
3049
3050 void arm_gt_stimer_cb(void *opaque)
3051 {
3052 ARMCPU *cpu = opaque;
3053
3054 gt_recalc_timer(cpu, GTIMER_SEC);
3055 }
3056
3057 void arm_gt_hvtimer_cb(void *opaque)
3058 {
3059 ARMCPU *cpu = opaque;
3060
3061 gt_recalc_timer(cpu, GTIMER_HYPVIRT);
3062 }
3063
3064 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
3065 {
3066 ARMCPU *cpu = env_archcpu(env);
3067
3068 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
3069 }
3070
3071 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3072 /* Note that CNTFRQ is purely reads-as-written for the benefit
3073 * of software; writing it doesn't actually change the timer frequency.
3074 * Our reset value matches the fixed frequency we implement the timer at.
3075 */
3076 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
3077 .type = ARM_CP_ALIAS,
3078 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3079 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
3080 },
3081 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3082 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3083 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3084 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3085 .resetfn = arm_gt_cntfrq_reset,
3086 },
3087 /* overall control: mostly access permissions */
3088 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
3089 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
3090 .access = PL1_RW,
3091 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
3092 .resetvalue = 0,
3093 },
3094 /* per-timer control */
3095 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3096 .secure = ARM_CP_SECSTATE_NS,
3097 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3098 .accessfn = gt_ptimer_access,
3099 .fieldoffset = offsetoflow32(CPUARMState,
3100 cp15.c14_timer[GTIMER_PHYS].ctl),
3101 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3102 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3103 },
3104 { .name = "CNTP_CTL_S",
3105 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3106 .secure = ARM_CP_SECSTATE_S,
3107 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3108 .accessfn = gt_ptimer_access,
3109 .fieldoffset = offsetoflow32(CPUARMState,
3110 cp15.c14_timer[GTIMER_SEC].ctl),
3111 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3112 },
3113 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
3114 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
3115 .type = ARM_CP_IO, .access = PL0_RW,
3116 .accessfn = gt_ptimer_access,
3117 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
3118 .resetvalue = 0,
3119 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3120 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3121 },
3122 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
3123 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3124 .accessfn = gt_vtimer_access,
3125 .fieldoffset = offsetoflow32(CPUARMState,
3126 cp15.c14_timer[GTIMER_VIRT].ctl),
3127 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3128 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3129 },
3130 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
3131 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
3132 .type = ARM_CP_IO, .access = PL0_RW,
3133 .accessfn = gt_vtimer_access,
3134 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
3135 .resetvalue = 0,
3136 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3137 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3138 },
3139 /* TimerValue views: a 32 bit downcounting view of the underlying state */
3140 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3141 .secure = ARM_CP_SECSTATE_NS,
3142 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3143 .accessfn = gt_ptimer_access,
3144 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3145 },
3146 { .name = "CNTP_TVAL_S",
3147 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3148 .secure = ARM_CP_SECSTATE_S,
3149 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3150 .accessfn = gt_ptimer_access,
3151 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
3152 },
3153 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3154 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
3155 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3156 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
3157 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3158 },
3159 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
3160 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3161 .accessfn = gt_vtimer_access,
3162 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3163 },
3164 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3165 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
3166 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3167 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
3168 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3169 },
3170 /* The counter itself */
3171 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
3172 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3173 .accessfn = gt_pct_access,
3174 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
3175 },
3176 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
3177 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
3178 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3179 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
3180 },
3181 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
3182 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3183 .accessfn = gt_vct_access,
3184 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
3185 },
3186 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3187 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3188 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3189 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
3190 },
3191 /* Comparison value, indicating when the timer goes off */
3192 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
3193 .secure = ARM_CP_SECSTATE_NS,
3194 .access = PL0_RW,
3195 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3196 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3197 .accessfn = gt_ptimer_access,
3198 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3199 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3200 },
3201 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
3202 .secure = ARM_CP_SECSTATE_S,
3203 .access = PL0_RW,
3204 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3205 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3206 .accessfn = gt_ptimer_access,
3207 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3208 },
3209 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3210 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
3211 .access = PL0_RW,
3212 .type = ARM_CP_IO,
3213 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3214 .resetvalue = 0, .accessfn = gt_ptimer_access,
3215 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3216 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3217 },
3218 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
3219 .access = PL0_RW,
3220 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3221 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3222 .accessfn = gt_vtimer_access,
3223 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3224 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3225 },
3226 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3227 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
3228 .access = PL0_RW,
3229 .type = ARM_CP_IO,
3230 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3231 .resetvalue = 0, .accessfn = gt_vtimer_access,
3232 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3233 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3234 },
3235 /* Secure timer -- this is actually restricted to only EL3
3236 * and configurably Secure-EL1 via the accessfn.
3237 */
3238 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
3239 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
3240 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
3241 .accessfn = gt_stimer_access,
3242 .readfn = gt_sec_tval_read,
3243 .writefn = gt_sec_tval_write,
3244 .resetfn = gt_sec_timer_reset,
3245 },
3246 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
3247 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
3248 .type = ARM_CP_IO, .access = PL1_RW,
3249 .accessfn = gt_stimer_access,
3250 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
3251 .resetvalue = 0,
3252 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3253 },
3254 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
3255 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
3256 .type = ARM_CP_IO, .access = PL1_RW,
3257 .accessfn = gt_stimer_access,
3258 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3259 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3260 },
3261 REGINFO_SENTINEL
3262 };
3263
3264 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
3265 bool isread)
3266 {
3267 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
3268 return CP_ACCESS_TRAP;
3269 }
3270 return CP_ACCESS_OK;
3271 }
3272
3273 #else
3274
3275 /* In user-mode most of the generic timer registers are inaccessible
3276 * however modern kernels (4.12+) allow access to cntvct_el0
3277 */
3278
3279 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
3280 {
3281 ARMCPU *cpu = env_archcpu(env);
3282
3283 /* Currently we have no support for QEMUTimer in linux-user so we
3284 * can't call gt_get_countervalue(env), instead we directly
3285 * call the lower level functions.
3286 */
3287 return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
3288 }
3289
3290 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3291 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3292 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3293 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3294 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3295 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
3296 },
3297 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3298 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3299 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3300 .readfn = gt_virt_cnt_read,
3301 },
3302 REGINFO_SENTINEL
3303 };
3304
3305 #endif
3306
3307 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3308 {
3309 if (arm_feature(env, ARM_FEATURE_LPAE)) {
3310 raw_write(env, ri, value);
3311 } else if (arm_feature(env, ARM_FEATURE_V7)) {
3312 raw_write(env, ri, value & 0xfffff6ff);
3313 } else {
3314 raw_write(env, ri, value & 0xfffff1ff);
3315 }
3316 }
3317
3318 #ifndef CONFIG_USER_ONLY
3319 /* get_phys_addr() isn't present for user-mode-only targets */
3320
3321 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
3322 bool isread)
3323 {
3324 if (ri->opc2 & 4) {
3325 /* The ATS12NSO* operations must trap to EL3 if executed in
3326 * Secure EL1 (which can only happen if EL3 is AArch64).
3327 * They are simply UNDEF if executed from NS EL1.
3328 * They function normally from EL2 or EL3.
3329 */
3330 if (arm_current_el(env) == 1) {
3331 if (arm_is_secure_below_el3(env)) {
3332 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
3333 }
3334 return CP_ACCESS_TRAP_UNCATEGORIZED;
3335 }
3336 }
3337 return CP_ACCESS_OK;
3338 }
3339
3340 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
3341 MMUAccessType access_type, ARMMMUIdx mmu_idx)
3342 {
3343 hwaddr phys_addr;
3344 target_ulong page_size;
3345 int prot;
3346 bool ret;
3347 uint64_t par64;
3348 bool format64 = false;
3349 MemTxAttrs attrs = {};
3350 ARMMMUFaultInfo fi = {};
3351 ARMCacheAttrs cacheattrs = {};
3352
3353 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
3354 &prot, &page_size, &fi, &cacheattrs);
3355
3356 if (ret) {
3357 /*
3358 * Some kinds of translation fault must cause exceptions rather
3359 * than being reported in the PAR.
3360 */
3361 int current_el = arm_current_el(env);
3362 int target_el;
3363 uint32_t syn, fsr, fsc;
3364 bool take_exc = false;
3365
3366 if (fi.s1ptw && current_el == 1 && !arm_is_secure(env)
3367 && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
3368 /*
3369 * Synchronous stage 2 fault on an access made as part of the
3370 * translation table walk for AT S1E0* or AT S1E1* insn
3371 * executed from NS EL1. If this is a synchronous external abort
3372 * and SCR_EL3.EA == 1, then we take a synchronous external abort
3373 * to EL3. Otherwise the fault is taken as an exception to EL2,
3374 * and HPFAR_EL2 holds the faulting IPA.
3375 */
3376 if (fi.type == ARMFault_SyncExternalOnWalk &&
3377 (env->cp15.scr_el3 & SCR_EA)) {
3378 target_el = 3;
3379 } else {
3380 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
3381 target_el = 2;
3382 }
3383 take_exc = true;
3384 } else if (fi.type == ARMFault_SyncExternalOnWalk) {
3385 /*
3386 * Synchronous external aborts during a translation table walk
3387 * are taken as Data Abort exceptions.
3388 */
3389 if (fi.stage2) {
3390 if (current_el == 3) {
3391 target_el = 3;
3392 } else {
3393 target_el = 2;
3394 }
3395 } else {
3396 target_el = exception_target_el(env);
3397 }
3398 take_exc = true;
3399 }
3400
3401 if (take_exc) {
3402 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3403 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
3404 arm_s1_regime_using_lpae_format(env, mmu_idx)) {
3405 fsr = arm_fi_to_lfsc(&fi);
3406 fsc = extract32(fsr, 0, 6);
3407 } else {
3408 fsr = arm_fi_to_sfsc(&fi);
3409 fsc = 0x3f;
3410 }
3411 /*
3412 * Report exception with ESR indicating a fault due to a
3413 * translation table walk for a cache maintenance instruction.
3414 */
3415 syn = syn_data_abort_no_iss(current_el == target_el,
3416 fi.ea, 1, fi.s1ptw, 1, fsc);
3417 env->exception.vaddress = value;
3418 env->exception.fsr = fsr;
3419 raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
3420 }
3421 }
3422
3423 if (is_a64(env)) {
3424 format64 = true;
3425 } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
3426 /*
3427 * ATS1Cxx:
3428 * * TTBCR.EAE determines whether the result is returned using the
3429 * 32-bit or the 64-bit PAR format
3430 * * Instructions executed in Hyp mode always use the 64bit format
3431 *
3432 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3433 * * The Non-secure TTBCR.EAE bit is set to 1
3434 * * The implementation includes EL2, and the value of HCR.VM is 1
3435 *
3436 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3437 *
3438 * ATS1Hx always uses the 64bit format.
3439 */