4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
25 #include "hw/semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/tcg.h"
29 #include "qemu/range.h"
30 #include "qapi/qapi-commands-machine-target.h"
31 #include "qapi/error.h"
32 #include "qemu/guest-random.h"
35 #include "exec/cpu_ldst.h"
38 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
40 #ifndef CONFIG_USER_ONLY
42 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
43 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
45 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
46 target_ulong
*page_size_ptr
,
47 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
48 __attribute__((nonnull
));
51 static void switch_mode(CPUARMState
*env
, int mode
);
53 static int vfp_gdb_get_reg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
55 ARMCPU
*cpu
= env_archcpu(env
);
56 int nregs
= cpu_isar_feature(aa32_simd_r32
, cpu
) ?
32 : 16;
58 /* VFP data registers are always little-endian. */
60 return gdb_get_reg64(buf
, *aa32_vfp_dreg(env
, reg
));
62 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
63 /* Aliases for Q regs. */
66 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
67 return gdb_get_reg128(buf
, q
[0], q
[1]);
70 switch (reg
- nregs
) {
71 case 0: return gdb_get_reg32(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); break;
72 case 1: return gdb_get_reg32(buf
, vfp_get_fpscr(env
)); break;
73 case 2: return gdb_get_reg32(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); break;
78 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
80 ARMCPU
*cpu
= env_archcpu(env
);
81 int nregs
= cpu_isar_feature(aa32_simd_r32
, cpu
) ?
32 : 16;
84 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
87 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
90 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
92 q
[1] = ldq_le_p(buf
+ 8);
96 switch (reg
- nregs
) {
97 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
98 case 1: vfp_set_fpscr(env
, ldl_p(buf
)); return 4;
99 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
104 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
109 /* 128 bit FP register - quads are in LE order */
110 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
111 return gdb_get_reg128(buf
, q
[1], q
[0]);
115 return gdb_get_reg32(buf
, vfp_get_fpsr(env
));
118 return gdb_get_reg32(buf
,vfp_get_fpcr(env
));
124 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
128 /* 128 bit FP register */
130 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
131 q
[0] = ldq_le_p(buf
);
132 q
[1] = ldq_le_p(buf
+ 8);
137 vfp_set_fpsr(env
, ldl_p(buf
));
141 vfp_set_fpcr(env
, ldl_p(buf
));
148 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
150 assert(ri
->fieldoffset
);
151 if (cpreg_field_is_64bit(ri
)) {
152 return CPREG_FIELD64(env
, ri
);
154 return CPREG_FIELD32(env
, ri
);
158 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
161 assert(ri
->fieldoffset
);
162 if (cpreg_field_is_64bit(ri
)) {
163 CPREG_FIELD64(env
, ri
) = value
;
165 CPREG_FIELD32(env
, ri
) = value
;
169 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
171 return (char *)env
+ ri
->fieldoffset
;
174 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
176 /* Raw read of a coprocessor register (as needed for migration, etc). */
177 if (ri
->type
& ARM_CP_CONST
) {
178 return ri
->resetvalue
;
179 } else if (ri
->raw_readfn
) {
180 return ri
->raw_readfn(env
, ri
);
181 } else if (ri
->readfn
) {
182 return ri
->readfn(env
, ri
);
184 return raw_read(env
, ri
);
188 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
191 /* Raw write of a coprocessor register (as needed for migration, etc).
192 * Note that constant registers are treated as write-ignored; the
193 * caller should check for success by whether a readback gives the
196 if (ri
->type
& ARM_CP_CONST
) {
198 } else if (ri
->raw_writefn
) {
199 ri
->raw_writefn(env
, ri
, v
);
200 } else if (ri
->writefn
) {
201 ri
->writefn(env
, ri
, v
);
203 raw_write(env
, ri
, v
);
208 * arm_get/set_gdb_*: get/set a gdb register
209 * @env: the CPU state
210 * @buf: a buffer to copy to/from
211 * @reg: register number (offset from start of group)
213 * We return the number of bytes copied
216 static int arm_gdb_get_sysreg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
218 ARMCPU
*cpu
= env_archcpu(env
);
219 const ARMCPRegInfo
*ri
;
222 key
= cpu
->dyn_sysreg_xml
.data
.cpregs
.keys
[reg
];
223 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
225 if (cpreg_field_is_64bit(ri
)) {
226 return gdb_get_reg64(buf
, (uint64_t)read_raw_cp_reg(env
, ri
));
228 return gdb_get_reg32(buf
, (uint32_t)read_raw_cp_reg(env
, ri
));
234 static int arm_gdb_set_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
239 #ifdef TARGET_AARCH64
240 static int arm_gdb_get_svereg(CPUARMState
*env
, GByteArray
*buf
, int reg
)
242 ARMCPU
*cpu
= env_archcpu(env
);
245 /* The first 32 registers are the zregs */
249 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
++) {
250 len
+= gdb_get_reg128(buf
,
251 env
->vfp
.zregs
[reg
].d
[vq
* 2 + 1],
252 env
->vfp
.zregs
[reg
].d
[vq
* 2]);
257 return gdb_get_reg32(buf
, vfp_get_fpsr(env
));
259 return gdb_get_reg32(buf
, vfp_get_fpcr(env
));
260 /* then 16 predicates and the ffr */
265 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
= vq
+ 4) {
266 len
+= gdb_get_reg64(buf
, env
->vfp
.pregs
[preg
].p
[vq
/ 4]);
273 * We report in Vector Granules (VG) which is 64bit in a Z reg
274 * while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
276 int vq
= sve_zcr_len_for_el(env
, arm_current_el(env
)) + 1;
277 return gdb_get_reg32(buf
, vq
* 2);
280 /* gdbstub asked for something out our range */
281 qemu_log_mask(LOG_UNIMP
, "%s: out of range register %d", __func__
, reg
);
288 static int arm_gdb_set_svereg(CPUARMState
*env
, uint8_t *buf
, int reg
)
290 ARMCPU
*cpu
= env_archcpu(env
);
292 /* The first 32 registers are the zregs */
294 /* The first 32 registers are the zregs */
298 uint64_t *p
= (uint64_t *) buf
;
299 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
++) {
300 env
->vfp
.zregs
[reg
].d
[vq
* 2 + 1] = *p
++;
301 env
->vfp
.zregs
[reg
].d
[vq
* 2] = *p
++;
307 vfp_set_fpsr(env
, *(uint32_t *)buf
);
310 vfp_set_fpcr(env
, *(uint32_t *)buf
);
316 uint64_t *p
= (uint64_t *) buf
;
317 for (vq
= 0; vq
< cpu
->sve_max_vq
; vq
= vq
+ 4) {
318 env
->vfp
.pregs
[preg
].p
[vq
/ 4] = *p
++;
324 /* cannot set vg via gdbstub */
327 /* gdbstub asked for something out our range */
333 #endif /* TARGET_AARCH64 */
335 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
337 /* Return true if the regdef would cause an assertion if you called
338 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
339 * program bug for it not to have the NO_RAW flag).
340 * NB that returning false here doesn't necessarily mean that calling
341 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
342 * read/write access functions which are safe for raw use" from "has
343 * read/write access functions which have side effects but has forgotten
344 * to provide raw access functions".
345 * The tests here line up with the conditions in read/write_raw_cp_reg()
346 * and assertions in raw_read()/raw_write().
348 if ((ri
->type
& ARM_CP_CONST
) ||
350 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
356 bool write_cpustate_to_list(ARMCPU
*cpu
, bool kvm_sync
)
358 /* Write the coprocessor state from cpu->env to the (index,value) list. */
362 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
363 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
364 const ARMCPRegInfo
*ri
;
367 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
372 if (ri
->type
& ARM_CP_NO_RAW
) {
376 newval
= read_raw_cp_reg(&cpu
->env
, ri
);
379 * Only sync if the previous list->cpustate sync succeeded.
380 * Rather than tracking the success/failure state for every
381 * item in the list, we just recheck "does the raw write we must
382 * have made in write_list_to_cpustate() read back OK" here.
384 uint64_t oldval
= cpu
->cpreg_values
[i
];
386 if (oldval
== newval
) {
390 write_raw_cp_reg(&cpu
->env
, ri
, oldval
);
391 if (read_raw_cp_reg(&cpu
->env
, ri
) != oldval
) {
395 write_raw_cp_reg(&cpu
->env
, ri
, newval
);
397 cpu
->cpreg_values
[i
] = newval
;
402 bool write_list_to_cpustate(ARMCPU
*cpu
)
407 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
408 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
409 uint64_t v
= cpu
->cpreg_values
[i
];
410 const ARMCPRegInfo
*ri
;
412 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
417 if (ri
->type
& ARM_CP_NO_RAW
) {
420 /* Write value and confirm it reads back as written
421 * (to catch read-only registers and partially read-only
422 * registers where the incoming migration value doesn't match)
424 write_raw_cp_reg(&cpu
->env
, ri
, v
);
425 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
432 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
434 ARMCPU
*cpu
= opaque
;
436 const ARMCPRegInfo
*ri
;
438 regidx
= *(uint32_t *)key
;
439 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
441 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
442 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
443 /* The value array need not be initialized at this point */
444 cpu
->cpreg_array_len
++;
448 static void count_cpreg(gpointer key
, gpointer opaque
)
450 ARMCPU
*cpu
= opaque
;
452 const ARMCPRegInfo
*ri
;
454 regidx
= *(uint32_t *)key
;
455 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
457 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
458 cpu
->cpreg_array_len
++;
462 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
464 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
465 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
476 void init_cpreg_list(ARMCPU
*cpu
)
478 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
479 * Note that we require cpreg_tuples[] to be sorted by key ID.
484 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
485 keys
= g_list_sort(keys
, cpreg_key_compare
);
487 cpu
->cpreg_array_len
= 0;
489 g_list_foreach(keys
, count_cpreg
, cpu
);
491 arraylen
= cpu
->cpreg_array_len
;
492 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
493 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
494 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
495 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
496 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
497 cpu
->cpreg_array_len
= 0;
499 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
501 assert(cpu
->cpreg_array_len
== arraylen
);
507 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
509 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
510 const ARMCPRegInfo
*ri
,
513 if (!is_a64(env
) && arm_current_el(env
) == 3 &&
514 arm_is_secure_below_el3(env
)) {
515 return CP_ACCESS_TRAP_UNCATEGORIZED
;
520 /* Some secure-only AArch32 registers trap to EL3 if used from
521 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
522 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
523 * We assume that the .access field is set to PL1_RW.
525 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
526 const ARMCPRegInfo
*ri
,
529 if (arm_current_el(env
) == 3) {
532 if (arm_is_secure_below_el3(env
)) {
533 return CP_ACCESS_TRAP_EL3
;
535 /* This will be EL1 NS and EL2 NS, which just UNDEF */
536 return CP_ACCESS_TRAP_UNCATEGORIZED
;
539 /* Check for traps to "powerdown debug" registers, which are controlled
542 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
545 int el
= arm_current_el(env
);
546 bool mdcr_el2_tdosa
= (env
->cp15
.mdcr_el2
& MDCR_TDOSA
) ||
547 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
548 (arm_hcr_el2_eff(env
) & HCR_TGE
);
550 if (el
< 2 && mdcr_el2_tdosa
&& !arm_is_secure_below_el3(env
)) {
551 return CP_ACCESS_TRAP_EL2
;
553 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
554 return CP_ACCESS_TRAP_EL3
;
559 /* Check for traps to "debug ROM" registers, which are controlled
560 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
562 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
565 int el
= arm_current_el(env
);
566 bool mdcr_el2_tdra
= (env
->cp15
.mdcr_el2
& MDCR_TDRA
) ||
567 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
568 (arm_hcr_el2_eff(env
) & HCR_TGE
);
570 if (el
< 2 && mdcr_el2_tdra
&& !arm_is_secure_below_el3(env
)) {
571 return CP_ACCESS_TRAP_EL2
;
573 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
574 return CP_ACCESS_TRAP_EL3
;
579 /* Check for traps to general debug registers, which are controlled
580 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
582 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
585 int el
= arm_current_el(env
);
586 bool mdcr_el2_tda
= (env
->cp15
.mdcr_el2
& MDCR_TDA
) ||
587 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
588 (arm_hcr_el2_eff(env
) & HCR_TGE
);
590 if (el
< 2 && mdcr_el2_tda
&& !arm_is_secure_below_el3(env
)) {
591 return CP_ACCESS_TRAP_EL2
;
593 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
594 return CP_ACCESS_TRAP_EL3
;
599 /* Check for traps to performance monitor registers, which are controlled
600 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
602 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
605 int el
= arm_current_el(env
);
607 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
608 && !arm_is_secure_below_el3(env
)) {
609 return CP_ACCESS_TRAP_EL2
;
611 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
612 return CP_ACCESS_TRAP_EL3
;
617 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
618 static CPAccessResult
access_tvm_trvm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
621 if (arm_current_el(env
) == 1) {
622 uint64_t trap
= isread ? HCR_TRVM
: HCR_TVM
;
623 if (arm_hcr_el2_eff(env
) & trap
) {
624 return CP_ACCESS_TRAP_EL2
;
630 /* Check for traps from EL1 due to HCR_EL2.TSW. */
631 static CPAccessResult
access_tsw(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
634 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TSW
)) {
635 return CP_ACCESS_TRAP_EL2
;
640 /* Check for traps from EL1 due to HCR_EL2.TACR. */
641 static CPAccessResult
access_tacr(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
644 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TACR
)) {
645 return CP_ACCESS_TRAP_EL2
;
650 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
651 static CPAccessResult
access_ttlb(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
654 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TTLB
)) {
655 return CP_ACCESS_TRAP_EL2
;
660 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
662 ARMCPU
*cpu
= env_archcpu(env
);
664 raw_write(env
, ri
, value
);
665 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
668 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
670 ARMCPU
*cpu
= env_archcpu(env
);
672 if (raw_read(env
, ri
) != value
) {
673 /* Unlike real hardware the qemu TLB uses virtual addresses,
674 * not modified virtual addresses, so this causes a TLB flush.
677 raw_write(env
, ri
, value
);
681 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
684 ARMCPU
*cpu
= env_archcpu(env
);
686 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
687 && !extended_addresses_enabled(env
)) {
688 /* For VMSA (when not using the LPAE long descriptor page table
689 * format) this register includes the ASID, so do a TLB flush.
690 * For PMSA it is purely a process ID and no action is needed.
694 raw_write(env
, ri
, value
);
697 /* IS variants of TLB operations must affect all cores */
698 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
701 CPUState
*cs
= env_cpu(env
);
703 tlb_flush_all_cpus_synced(cs
);
706 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
709 CPUState
*cs
= env_cpu(env
);
711 tlb_flush_all_cpus_synced(cs
);
714 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
717 CPUState
*cs
= env_cpu(env
);
719 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
722 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
725 CPUState
*cs
= env_cpu(env
);
727 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
731 * Non-IS variants of TLB operations are upgraded to
732 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
733 * force broadcast of these operations.
735 static bool tlb_force_broadcast(CPUARMState
*env
)
737 return (env
->cp15
.hcr_el2
& HCR_FB
) &&
738 arm_current_el(env
) == 1 && arm_is_secure_below_el3(env
);
741 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
744 /* Invalidate all (TLBIALL) */
745 CPUState
*cs
= env_cpu(env
);
747 if (tlb_force_broadcast(env
)) {
748 tlb_flush_all_cpus_synced(cs
);
754 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
757 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
758 CPUState
*cs
= env_cpu(env
);
760 value
&= TARGET_PAGE_MASK
;
761 if (tlb_force_broadcast(env
)) {
762 tlb_flush_page_all_cpus_synced(cs
, value
);
764 tlb_flush_page(cs
, value
);
768 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
771 /* Invalidate by ASID (TLBIASID) */
772 CPUState
*cs
= env_cpu(env
);
774 if (tlb_force_broadcast(env
)) {
775 tlb_flush_all_cpus_synced(cs
);
781 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
784 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
785 CPUState
*cs
= env_cpu(env
);
787 value
&= TARGET_PAGE_MASK
;
788 if (tlb_force_broadcast(env
)) {
789 tlb_flush_page_all_cpus_synced(cs
, value
);
791 tlb_flush_page(cs
, value
);
795 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
798 CPUState
*cs
= env_cpu(env
);
800 tlb_flush_by_mmuidx(cs
,
802 ARMMMUIdxBit_E10_1_PAN
|
806 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
809 CPUState
*cs
= env_cpu(env
);
811 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
813 ARMMMUIdxBit_E10_1_PAN
|
818 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
821 CPUState
*cs
= env_cpu(env
);
823 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_E2
);
826 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
829 CPUState
*cs
= env_cpu(env
);
831 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_E2
);
834 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
837 CPUState
*cs
= env_cpu(env
);
838 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
840 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_E2
);
843 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
846 CPUState
*cs
= env_cpu(env
);
847 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
849 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
853 static const ARMCPRegInfo cp_reginfo
[] = {
854 /* Define the secure and non-secure FCSE identifier CP registers
855 * separately because there is no secure bank in V8 (no _EL3). This allows
856 * the secure register to be properly reset and migrated. There is also no
857 * v8 EL1 version of the register so the non-secure instance stands alone.
860 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
861 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
862 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
863 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
864 { .name
= "FCSEIDR_S",
865 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
866 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
867 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
868 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
869 /* Define the secure and non-secure context identifier CP registers
870 * separately because there is no secure bank in V8 (no _EL3). This allows
871 * the secure register to be properly reset and migrated. In the
872 * non-secure case, the 32-bit register will have reset and migration
873 * disabled during registration as it is handled by the 64-bit instance.
875 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
876 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
877 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
878 .secure
= ARM_CP_SECSTATE_NS
,
879 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
880 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
881 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
882 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
883 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
884 .secure
= ARM_CP_SECSTATE_S
,
885 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
886 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
890 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
891 /* NB: Some of these registers exist in v8 but with more precise
892 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
894 /* MMU Domain access control / MPU write buffer control */
896 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
897 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
898 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
899 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
900 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
901 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
902 * For v6 and v5, these mappings are overly broad.
904 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
905 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
906 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
907 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
908 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
909 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
910 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
911 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
912 /* Cache maintenance ops; some of this space may be overridden later. */
913 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
914 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
915 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
919 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
920 /* Not all pre-v6 cores implemented this WFI, so this is slightly
923 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
924 .access
= PL1_W
, .type
= ARM_CP_WFI
},
928 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
929 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
930 * is UNPREDICTABLE; we choose to NOP as most implementations do).
932 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
933 .access
= PL1_W
, .type
= ARM_CP_WFI
},
934 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
935 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
936 * OMAPCP will override this space.
938 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
939 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
941 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
942 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
944 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
945 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
946 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
948 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
949 * implementing it as RAZ means the "debug architecture version" bits
950 * will read as a reserved value, which should cause Linux to not try
951 * to use the debug hardware.
953 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
954 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
955 /* MMU TLB control. Note that the wildcarding means we cover not just
956 * the unified TLB ops but also the dside/iside/inner-shareable variants.
958 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
959 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
960 .type
= ARM_CP_NO_RAW
},
961 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
962 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
963 .type
= ARM_CP_NO_RAW
},
964 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
965 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
966 .type
= ARM_CP_NO_RAW
},
967 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
968 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
969 .type
= ARM_CP_NO_RAW
},
970 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
971 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
972 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
973 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
977 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
982 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
983 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
984 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
985 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
986 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
988 if (cpu_isar_feature(aa32_vfp_simd
, env_archcpu(env
))) {
989 /* VFP coprocessor: cp10 & cp11 [23:20] */
990 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
992 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
993 /* ASEDIS [31] bit is RAO/WI */
997 /* VFPv3 and upwards with NEON implement 32 double precision
998 * registers (D0-D31).
1000 if (!cpu_isar_feature(aa32_simd_r32
, env_archcpu(env
))) {
1001 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
1009 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1010 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1012 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
1013 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
1014 value
&= ~(0xf << 20);
1015 value
|= env
->cp15
.cpacr_el1
& (0xf << 20);
1018 env
->cp15
.cpacr_el1
= value
;
1021 static uint64_t cpacr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1024 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1025 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1027 uint64_t value
= env
->cp15
.cpacr_el1
;
1029 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
1030 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
1031 value
&= ~(0xf << 20);
1037 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1039 /* Call cpacr_write() so that we reset with the correct RAO bits set
1040 * for our CPU features.
1042 cpacr_write(env
, ri
, 0);
1045 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1048 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1049 /* Check if CPACR accesses are to be trapped to EL2 */
1050 if (arm_current_el(env
) == 1 &&
1051 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
1052 return CP_ACCESS_TRAP_EL2
;
1053 /* Check if CPACR accesses are to be trapped to EL3 */
1054 } else if (arm_current_el(env
) < 3 &&
1055 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
1056 return CP_ACCESS_TRAP_EL3
;
1060 return CP_ACCESS_OK
;
1063 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1066 /* Check if CPTR accesses are set to trap to EL3 */
1067 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
1068 return CP_ACCESS_TRAP_EL3
;
1071 return CP_ACCESS_OK
;
1074 static const ARMCPRegInfo v6_cp_reginfo
[] = {
1075 /* prefetch by MVA in v6, NOP in v7 */
1076 { .name
= "MVA_prefetch",
1077 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
1078 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1079 /* We need to break the TB after ISB to execute self-modifying code
1080 * correctly and also to take any pending interrupts immediately.
1081 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
1083 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
1084 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
1085 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
1086 .access
= PL0_W
, .type
= ARM_CP_NOP
},
1087 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
1088 .access
= PL0_W
, .type
= ARM_CP_NOP
},
1089 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
1090 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
1091 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
1092 offsetof(CPUARMState
, cp15
.ifar_ns
) },
1094 /* Watchpoint Fault Address Register : should actually only be present
1095 * for 1136, 1176, 11MPCore.
1097 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
1098 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
1099 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
1100 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
1101 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
1102 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
, .readfn
= cpacr_read
},
1106 /* Definitions for the PMU registers */
1107 #define PMCRN_MASK 0xf800
1108 #define PMCRN_SHIFT 11
1117 * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
1118 * which can be written as 1 to trigger behaviour but which stay RAZ).
1120 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1122 #define PMXEVTYPER_P 0x80000000
1123 #define PMXEVTYPER_U 0x40000000
1124 #define PMXEVTYPER_NSK 0x20000000
1125 #define PMXEVTYPER_NSU 0x10000000
1126 #define PMXEVTYPER_NSH 0x08000000
1127 #define PMXEVTYPER_M 0x04000000
1128 #define PMXEVTYPER_MT 0x02000000
1129 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1130 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1131 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1132 PMXEVTYPER_M | PMXEVTYPER_MT | \
1133 PMXEVTYPER_EVTCOUNT)
1135 #define PMCCFILTR 0xf8000000
1136 #define PMCCFILTR_M PMXEVTYPER_M
1137 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1139 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
1141 return (env
->cp15
.c9_pmcr
& PMCRN_MASK
) >> PMCRN_SHIFT
;
1144 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1145 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
1147 return (1 << 31) | ((1 << pmu_num_counters(env
)) - 1);
1150 typedef struct pm_event
{
1151 uint16_t number
; /* PMEVTYPER.evtCount is 16 bits wide */
1152 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1153 bool (*supported
)(CPUARMState
*);
1155 * Retrieve the current count of the underlying event. The programmed
1156 * counters hold a difference from the return value from this function
1158 uint64_t (*get_count
)(CPUARMState
*);
1160 * Return how many nanoseconds it will take (at a minimum) for count events
1161 * to occur. A negative value indicates the counter will never overflow, or
1162 * that the counter has otherwise arranged for the overflow bit to be set
1163 * and the PMU interrupt to be raised on overflow.
1165 int64_t (*ns_per_count
)(uint64_t);
1168 static bool event_always_supported(CPUARMState
*env
)
1173 static uint64_t swinc_get_count(CPUARMState
*env
)
1176 * SW_INCR events are written directly to the pmevcntr's by writes to
1177 * PMSWINC, so there is no underlying count maintained by the PMU itself
1182 static int64_t swinc_ns_per(uint64_t ignored
)
1188 * Return the underlying cycle count for the PMU cycle counters. If we're in
1189 * usermode, simply return 0.
1191 static uint64_t cycles_get_count(CPUARMState
*env
)
1193 #ifndef CONFIG_USER_ONLY
1194 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1195 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1197 return cpu_get_host_ticks();
1201 #ifndef CONFIG_USER_ONLY
1202 static int64_t cycles_ns_per(uint64_t cycles
)
1204 return (ARM_CPU_FREQ
/ NANOSECONDS_PER_SECOND
) * cycles
;
1207 static bool instructions_supported(CPUARMState
*env
)
1209 return use_icount
== 1 /* Precise instruction counting */;
1212 static uint64_t instructions_get_count(CPUARMState
*env
)
1214 return (uint64_t)cpu_get_icount_raw();
1217 static int64_t instructions_ns_per(uint64_t icount
)
1219 return cpu_icount_to_ns((int64_t)icount
);
1223 static bool pmu_8_1_events_supported(CPUARMState
*env
)
1225 /* For events which are supported in any v8.1 PMU */
1226 return cpu_isar_feature(any_pmu_8_1
, env_archcpu(env
));
1229 static bool pmu_8_4_events_supported(CPUARMState
*env
)
1231 /* For events which are supported in any v8.1 PMU */
1232 return cpu_isar_feature(any_pmu_8_4
, env_archcpu(env
));
1235 static uint64_t zero_event_get_count(CPUARMState
*env
)
1237 /* For events which on QEMU never fire, so their count is always zero */
1241 static int64_t zero_event_ns_per(uint64_t cycles
)
1243 /* An event which never fires can never overflow */
1247 static const pm_event pm_events
[] = {
1248 { .number
= 0x000, /* SW_INCR */
1249 .supported
= event_always_supported
,
1250 .get_count
= swinc_get_count
,
1251 .ns_per_count
= swinc_ns_per
,
1253 #ifndef CONFIG_USER_ONLY
1254 { .number
= 0x008, /* INST_RETIRED, Instruction architecturally executed */
1255 .supported
= instructions_supported
,
1256 .get_count
= instructions_get_count
,
1257 .ns_per_count
= instructions_ns_per
,
1259 { .number
= 0x011, /* CPU_CYCLES, Cycle */
1260 .supported
= event_always_supported
,
1261 .get_count
= cycles_get_count
,
1262 .ns_per_count
= cycles_ns_per
,
1265 { .number
= 0x023, /* STALL_FRONTEND */
1266 .supported
= pmu_8_1_events_supported
,
1267 .get_count
= zero_event_get_count
,
1268 .ns_per_count
= zero_event_ns_per
,
1270 { .number
= 0x024, /* STALL_BACKEND */
1271 .supported
= pmu_8_1_events_supported
,
1272 .get_count
= zero_event_get_count
,
1273 .ns_per_count
= zero_event_ns_per
,
1275 { .number
= 0x03c, /* STALL */
1276 .supported
= pmu_8_4_events_supported
,
1277 .get_count
= zero_event_get_count
,
1278 .ns_per_count
= zero_event_ns_per
,
1283 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1284 * events (i.e. the statistical profiling extension), this implementation
1285 * should first be updated to something sparse instead of the current
1286 * supported_event_map[] array.
1288 #define MAX_EVENT_ID 0x3c
1289 #define UNSUPPORTED_EVENT UINT16_MAX
1290 static uint16_t supported_event_map
[MAX_EVENT_ID
+ 1];
1293 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1294 * of ARM event numbers to indices in our pm_events array.
1296 * Note: Events in the 0x40XX range are not currently supported.
1298 void pmu_init(ARMCPU
*cpu
)
1303 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1306 for (i
= 0; i
< ARRAY_SIZE(supported_event_map
); i
++) {
1307 supported_event_map
[i
] = UNSUPPORTED_EVENT
;
1312 for (i
= 0; i
< ARRAY_SIZE(pm_events
); i
++) {
1313 const pm_event
*cnt
= &pm_events
[i
];
1314 assert(cnt
->number
<= MAX_EVENT_ID
);
1315 /* We do not currently support events in the 0x40xx range */
1316 assert(cnt
->number
<= 0x3f);
1318 if (cnt
->supported(&cpu
->env
)) {
1319 supported_event_map
[cnt
->number
] = i
;
1320 uint64_t event_mask
= 1ULL << (cnt
->number
& 0x1f);
1321 if (cnt
->number
& 0x20) {
1322 cpu
->pmceid1
|= event_mask
;
1324 cpu
->pmceid0
|= event_mask
;
1331 * Check at runtime whether a PMU event is supported for the current machine
1333 static bool event_supported(uint16_t number
)
1335 if (number
> MAX_EVENT_ID
) {
1338 return supported_event_map
[number
] != UNSUPPORTED_EVENT
;
1341 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1344 /* Performance monitor registers user accessibility is controlled
1345 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1346 * trapping to EL2 or EL3 for other accesses.
1348 int el
= arm_current_el(env
);
1350 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
1351 return CP_ACCESS_TRAP
;
1353 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
1354 && !arm_is_secure_below_el3(env
)) {
1355 return CP_ACCESS_TRAP_EL2
;
1357 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
1358 return CP_ACCESS_TRAP_EL3
;
1361 return CP_ACCESS_OK
;
1364 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
1365 const ARMCPRegInfo
*ri
,
1368 /* ER: event counter read trap control */
1369 if (arm_feature(env
, ARM_FEATURE_V8
)
1370 && arm_current_el(env
) == 0
1371 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
1373 return CP_ACCESS_OK
;
1376 return pmreg_access(env
, ri
, isread
);
1379 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1380 const ARMCPRegInfo
*ri
,
1383 /* SW: software increment write trap control */
1384 if (arm_feature(env
, ARM_FEATURE_V8
)
1385 && arm_current_el(env
) == 0
1386 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1388 return CP_ACCESS_OK
;
1391 return pmreg_access(env
, ri
, isread
);
1394 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1395 const ARMCPRegInfo
*ri
,
1398 /* ER: event counter read trap control */
1399 if (arm_feature(env
, ARM_FEATURE_V8
)
1400 && arm_current_el(env
) == 0
1401 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1402 return CP_ACCESS_OK
;
1405 return pmreg_access(env
, ri
, isread
);
1408 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1409 const ARMCPRegInfo
*ri
,
1412 /* CR: cycle counter read trap control */
1413 if (arm_feature(env
, ARM_FEATURE_V8
)
1414 && arm_current_el(env
) == 0
1415 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1417 return CP_ACCESS_OK
;
1420 return pmreg_access(env
, ri
, isread
);
1423 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1424 * the current EL, security state, and register configuration.
1426 static bool pmu_counter_enabled(CPUARMState
*env
, uint8_t counter
)
1429 bool e
, p
, u
, nsk
, nsu
, nsh
, m
;
1430 bool enabled
, prohibited
, filtered
;
1431 bool secure
= arm_is_secure(env
);
1432 int el
= arm_current_el(env
);
1433 uint8_t hpmn
= env
->cp15
.mdcr_el2
& MDCR_HPMN
;
1435 if (!arm_feature(env
, ARM_FEATURE_PMU
)) {
1439 if (!arm_feature(env
, ARM_FEATURE_EL2
) ||
1440 (counter
< hpmn
|| counter
== 31)) {
1441 e
= env
->cp15
.c9_pmcr
& PMCRE
;
1443 e
= env
->cp15
.mdcr_el2
& MDCR_HPME
;
1445 enabled
= e
&& (env
->cp15
.c9_pmcnten
& (1 << counter
));
1448 if (el
== 2 && (counter
< hpmn
|| counter
== 31)) {
1449 prohibited
= env
->cp15
.mdcr_el2
& MDCR_HPMD
;
1454 prohibited
= arm_feature(env
, ARM_FEATURE_EL3
) &&
1455 (env
->cp15
.mdcr_el3
& MDCR_SPME
);
1458 if (prohibited
&& counter
== 31) {
1459 prohibited
= env
->cp15
.c9_pmcr
& PMCRDP
;
1462 if (counter
== 31) {
1463 filter
= env
->cp15
.pmccfiltr_el0
;
1465 filter
= env
->cp15
.c14_pmevtyper
[counter
];
1468 p
= filter
& PMXEVTYPER_P
;
1469 u
= filter
& PMXEVTYPER_U
;
1470 nsk
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSK
);
1471 nsu
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSU
);
1472 nsh
= arm_feature(env
, ARM_FEATURE_EL2
) && (filter
& PMXEVTYPER_NSH
);
1473 m
= arm_el_is_aa64(env
, 1) &&
1474 arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_M
);
1477 filtered
= secure ? u
: u
!= nsu
;
1478 } else if (el
== 1) {
1479 filtered
= secure ? p
: p
!= nsk
;
1480 } else if (el
== 2) {
1486 if (counter
!= 31) {
1488 * If not checking PMCCNTR, ensure the counter is setup to an event we
1491 uint16_t event
= filter
& PMXEVTYPER_EVTCOUNT
;
1492 if (!event_supported(event
)) {
1497 return enabled
&& !prohibited
&& !filtered
;
1500 static void pmu_update_irq(CPUARMState
*env
)
1502 ARMCPU
*cpu
= env_archcpu(env
);
1503 qemu_set_irq(cpu
->pmu_interrupt
, (env
->cp15
.c9_pmcr
& PMCRE
) &&
1504 (env
->cp15
.c9_pminten
& env
->cp15
.c9_pmovsr
));
1508 * Ensure c15_ccnt is the guest-visible count so that operations such as
1509 * enabling/disabling the counter or filtering, modifying the count itself,
1510 * etc. can be done logically. This is essentially a no-op if the counter is
1511 * not enabled at the time of the call.
1513 static void pmccntr_op_start(CPUARMState
*env
)
1515 uint64_t cycles
= cycles_get_count(env
);
1517 if (pmu_counter_enabled(env
, 31)) {
1518 uint64_t eff_cycles
= cycles
;
1519 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1520 /* Increment once every 64 processor clock cycles */
1524 uint64_t new_pmccntr
= eff_cycles
- env
->cp15
.c15_ccnt_delta
;
1526 uint64_t overflow_mask
= env
->cp15
.c9_pmcr
& PMCRLC ? \
1527 1ull << 63 : 1ull << 31;
1528 if (env
->cp15
.c15_ccnt
& ~new_pmccntr
& overflow_mask
) {
1529 env
->cp15
.c9_pmovsr
|= (1 << 31);
1530 pmu_update_irq(env
);
1533 env
->cp15
.c15_ccnt
= new_pmccntr
;
1535 env
->cp15
.c15_ccnt_delta
= cycles
;
1539 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1540 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1543 static void pmccntr_op_finish(CPUARMState
*env
)
1545 if (pmu_counter_enabled(env
, 31)) {
1546 #ifndef CONFIG_USER_ONLY
1547 /* Calculate when the counter will next overflow */
1548 uint64_t remaining_cycles
= -env
->cp15
.c15_ccnt
;
1549 if (!(env
->cp15
.c9_pmcr
& PMCRLC
)) {
1550 remaining_cycles
= (uint32_t)remaining_cycles
;
1552 int64_t overflow_in
= cycles_ns_per(remaining_cycles
);
1554 if (overflow_in
> 0) {
1555 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1557 ARMCPU
*cpu
= env_archcpu(env
);
1558 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1562 uint64_t prev_cycles
= env
->cp15
.c15_ccnt_delta
;
1563 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1564 /* Increment once every 64 processor clock cycles */
1567 env
->cp15
.c15_ccnt_delta
= prev_cycles
- env
->cp15
.c15_ccnt
;
1571 static void pmevcntr_op_start(CPUARMState
*env
, uint8_t counter
)
1574 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1576 if (event_supported(event
)) {
1577 uint16_t event_idx
= supported_event_map
[event
];
1578 count
= pm_events
[event_idx
].get_count(env
);
1581 if (pmu_counter_enabled(env
, counter
)) {
1582 uint32_t new_pmevcntr
= count
- env
->cp15
.c14_pmevcntr_delta
[counter
];
1584 if (env
->cp15
.c14_pmevcntr
[counter
] & ~new_pmevcntr
& INT32_MIN
) {
1585 env
->cp15
.c9_pmovsr
|= (1 << counter
);
1586 pmu_update_irq(env
);
1588 env
->cp15
.c14_pmevcntr
[counter
] = new_pmevcntr
;
1590 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1593 static void pmevcntr_op_finish(CPUARMState
*env
, uint8_t counter
)
1595 if (pmu_counter_enabled(env
, counter
)) {
1596 #ifndef CONFIG_USER_ONLY
1597 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1598 uint16_t event_idx
= supported_event_map
[event
];
1599 uint64_t delta
= UINT32_MAX
-
1600 (uint32_t)env
->cp15
.c14_pmevcntr
[counter
] + 1;
1601 int64_t overflow_in
= pm_events
[event_idx
].ns_per_count(delta
);
1603 if (overflow_in
> 0) {
1604 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1606 ARMCPU
*cpu
= env_archcpu(env
);
1607 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1611 env
->cp15
.c14_pmevcntr_delta
[counter
] -=
1612 env
->cp15
.c14_pmevcntr
[counter
];
1616 void pmu_op_start(CPUARMState
*env
)
1619 pmccntr_op_start(env
);
1620 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1621 pmevcntr_op_start(env
, i
);
1625 void pmu_op_finish(CPUARMState
*env
)
1628 pmccntr_op_finish(env
);
1629 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1630 pmevcntr_op_finish(env
, i
);
1634 void pmu_pre_el_change(ARMCPU
*cpu
, void *ignored
)
1636 pmu_op_start(&cpu
->env
);
1639 void pmu_post_el_change(ARMCPU
*cpu
, void *ignored
)
1641 pmu_op_finish(&cpu
->env
);
1644 void arm_pmu_timer_cb(void *opaque
)
1646 ARMCPU
*cpu
= opaque
;
1649 * Update all the counter values based on the current underlying counts,
1650 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1651 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1652 * counter may expire.
1654 pmu_op_start(&cpu
->env
);
1655 pmu_op_finish(&cpu
->env
);
1658 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1663 if (value
& PMCRC
) {
1664 /* The counter has been reset */
1665 env
->cp15
.c15_ccnt
= 0;
1668 if (value
& PMCRP
) {
1670 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1671 env
->cp15
.c14_pmevcntr
[i
] = 0;
1675 env
->cp15
.c9_pmcr
&= ~PMCR_WRITEABLE_MASK
;
1676 env
->cp15
.c9_pmcr
|= (value
& PMCR_WRITEABLE_MASK
);
1681 static void pmswinc_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1685 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1686 /* Increment a counter's count iff: */
1687 if ((value
& (1 << i
)) && /* counter's bit is set */
1688 /* counter is enabled and not filtered */
1689 pmu_counter_enabled(env
, i
) &&
1690 /* counter is SW_INCR */
1691 (env
->cp15
.c14_pmevtyper
[i
] & PMXEVTYPER_EVTCOUNT
) == 0x0) {
1692 pmevcntr_op_start(env
, i
);
1695 * Detect if this write causes an overflow since we can't predict
1696 * PMSWINC overflows like we can for other events
1698 uint32_t new_pmswinc
= env
->cp15
.c14_pmevcntr
[i
] + 1;
1700 if (env
->cp15
.c14_pmevcntr
[i
] & ~new_pmswinc
& INT32_MIN
) {
1701 env
->cp15
.c9_pmovsr
|= (1 << i
);
1702 pmu_update_irq(env
);
1705 env
->cp15
.c14_pmevcntr
[i
] = new_pmswinc
;
1707 pmevcntr_op_finish(env
, i
);
1712 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1715 pmccntr_op_start(env
);
1716 ret
= env
->cp15
.c15_ccnt
;
1717 pmccntr_op_finish(env
);
1721 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1724 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1725 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1726 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1729 env
->cp15
.c9_pmselr
= value
& 0x1f;
1732 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1735 pmccntr_op_start(env
);
1736 env
->cp15
.c15_ccnt
= value
;
1737 pmccntr_op_finish(env
);
1740 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1743 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1745 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1748 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1751 pmccntr_op_start(env
);
1752 env
->cp15
.pmccfiltr_el0
= value
& PMCCFILTR_EL0
;
1753 pmccntr_op_finish(env
);
1756 static void pmccfiltr_write_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1759 pmccntr_op_start(env
);
1760 /* M is not accessible from AArch32 */
1761 env
->cp15
.pmccfiltr_el0
= (env
->cp15
.pmccfiltr_el0
& PMCCFILTR_M
) |
1762 (value
& PMCCFILTR
);
1763 pmccntr_op_finish(env
);
1766 static uint64_t pmccfiltr_read_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1768 /* M is not visible in AArch32 */
1769 return env
->cp15
.pmccfiltr_el0
& PMCCFILTR
;
1772 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1775 value
&= pmu_counter_mask(env
);
1776 env
->cp15
.c9_pmcnten
|= value
;
1779 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1782 value
&= pmu_counter_mask(env
);
1783 env
->cp15
.c9_pmcnten
&= ~value
;
1786 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1789 value
&= pmu_counter_mask(env
);
1790 env
->cp15
.c9_pmovsr
&= ~value
;
1791 pmu_update_irq(env
);
1794 static void pmovsset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1797 value
&= pmu_counter_mask(env
);
1798 env
->cp15
.c9_pmovsr
|= value
;
1799 pmu_update_irq(env
);
1802 static void pmevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1803 uint64_t value
, const uint8_t counter
)
1805 if (counter
== 31) {
1806 pmccfiltr_write(env
, ri
, value
);
1807 } else if (counter
< pmu_num_counters(env
)) {
1808 pmevcntr_op_start(env
, counter
);
1811 * If this counter's event type is changing, store the current
1812 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1813 * pmevcntr_op_finish has the correct baseline when it converts back to
1816 uint16_t old_event
= env
->cp15
.c14_pmevtyper
[counter
] &
1817 PMXEVTYPER_EVTCOUNT
;
1818 uint16_t new_event
= value
& PMXEVTYPER_EVTCOUNT
;
1819 if (old_event
!= new_event
) {
1821 if (event_supported(new_event
)) {
1822 uint16_t event_idx
= supported_event_map
[new_event
];
1823 count
= pm_events
[event_idx
].get_count(env
);
1825 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1828 env
->cp15
.c14_pmevtyper
[counter
] = value
& PMXEVTYPER_MASK
;
1829 pmevcntr_op_finish(env
, counter
);
1831 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1832 * PMSELR value is equal to or greater than the number of implemented
1833 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1837 static uint64_t pmevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1838 const uint8_t counter
)
1840 if (counter
== 31) {
1841 return env
->cp15
.pmccfiltr_el0
;
1842 } else if (counter
< pmu_num_counters(env
)) {
1843 return env
->cp15
.c14_pmevtyper
[counter
];
1846 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1847 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1853 static void pmevtyper_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1856 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1857 pmevtyper_write(env
, ri
, value
, counter
);
1860 static void pmevtyper_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1863 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1864 env
->cp15
.c14_pmevtyper
[counter
] = value
;
1867 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1868 * pmu_op_finish calls when loading saved state for a migration. Because
1869 * we're potentially updating the type of event here, the value written to
1870 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1871 * different counter type. Therefore, we need to set this value to the
1872 * current count for the counter type we're writing so that pmu_op_finish
1873 * has the correct count for its calculation.
1875 uint16_t event
= value
& PMXEVTYPER_EVTCOUNT
;
1876 if (event_supported(event
)) {
1877 uint16_t event_idx
= supported_event_map
[event
];
1878 env
->cp15
.c14_pmevcntr_delta
[counter
] =
1879 pm_events
[event_idx
].get_count(env
);
1883 static uint64_t pmevtyper_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1885 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1886 return pmevtyper_read(env
, ri
, counter
);
1889 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1892 pmevtyper_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1895 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1897 return pmevtyper_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1900 static void pmevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1901 uint64_t value
, uint8_t counter
)
1903 if (counter
< pmu_num_counters(env
)) {
1904 pmevcntr_op_start(env
, counter
);
1905 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1906 pmevcntr_op_finish(env
, counter
);
1909 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1910 * are CONSTRAINED UNPREDICTABLE.
1914 static uint64_t pmevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1917 if (counter
< pmu_num_counters(env
)) {
1919 pmevcntr_op_start(env
, counter
);
1920 ret
= env
->cp15
.c14_pmevcntr
[counter
];
1921 pmevcntr_op_finish(env
, counter
);
1924 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1925 * are CONSTRAINED UNPREDICTABLE. */
1930 static void pmevcntr_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1933 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1934 pmevcntr_write(env
, ri
, value
, counter
);
1937 static uint64_t pmevcntr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1939 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1940 return pmevcntr_read(env
, ri
, counter
);
1943 static void pmevcntr_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1946 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1947 assert(counter
< pmu_num_counters(env
));
1948 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1949 pmevcntr_write(env
, ri
, value
, counter
);
1952 static uint64_t pmevcntr_rawread(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1954 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1955 assert(counter
< pmu_num_counters(env
));
1956 return env
->cp15
.c14_pmevcntr
[counter
];
1959 static void pmxevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1962 pmevcntr_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1965 static uint64_t pmxevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1967 return pmevcntr_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1970 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1973 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1974 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1976 env
->cp15
.c9_pmuserenr
= value
& 1;
1980 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1983 /* We have no event counters so only the C bit can be changed */
1984 value
&= pmu_counter_mask(env
);
1985 env
->cp15
.c9_pminten
|= value
;
1986 pmu_update_irq(env
);
1989 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1992 value
&= pmu_counter_mask(env
);
1993 env
->cp15
.c9_pminten
&= ~value
;
1994 pmu_update_irq(env
);
1997 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2000 /* Note that even though the AArch64 view of this register has bits
2001 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
2002 * architectural requirements for bits which are RES0 only in some
2003 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
2004 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
2006 raw_write(env
, ri
, value
& ~0x1FULL
);
2009 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2011 /* Begin with base v8.0 state. */
2012 uint32_t valid_mask
= 0x3fff;
2013 ARMCPU
*cpu
= env_archcpu(env
);
2015 if (ri
->state
== ARM_CP_STATE_AA64
) {
2016 value
|= SCR_FW
| SCR_AW
; /* these two bits are RES1. */
2017 valid_mask
&= ~SCR_NET
;
2019 if (cpu_isar_feature(aa64_lor
, cpu
)) {
2020 valid_mask
|= SCR_TLOR
;
2022 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
2023 valid_mask
|= SCR_API
| SCR_APK
;
2025 if (cpu_isar_feature(aa64_mte
, cpu
)) {
2026 valid_mask
|= SCR_ATA
;
2029 valid_mask
&= ~(SCR_RW
| SCR_ST
);
2032 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
2033 valid_mask
&= ~SCR_HCE
;
2035 /* On ARMv7, SMD (or SCD as it is called in v7) is only
2036 * supported if EL2 exists. The bit is UNK/SBZP when
2037 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
2038 * when EL2 is unavailable.
2039 * On ARMv8, this bit is always available.
2041 if (arm_feature(env
, ARM_FEATURE_V7
) &&
2042 !arm_feature(env
, ARM_FEATURE_V8
)) {
2043 valid_mask
&= ~SCR_SMD
;
2047 /* Clear all-context RES0 bits. */
2048 value
&= valid_mask
;
2049 raw_write(env
, ri
, value
);
2052 static CPAccessResult
access_aa64_tid2(CPUARMState
*env
,
2053 const ARMCPRegInfo
*ri
,
2056 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID2
)) {
2057 return CP_ACCESS_TRAP_EL2
;
2060 return CP_ACCESS_OK
;
2063 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2065 ARMCPU
*cpu
= env_archcpu(env
);
2067 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
2070 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
2071 ri
->secure
& ARM_CP_SECSTATE_S
);
2073 return cpu
->ccsidr
[index
];
2076 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2079 raw_write(env
, ri
, value
& 0xf);
2082 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2084 CPUState
*cs
= env_cpu(env
);
2085 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
2087 bool allow_virt
= (arm_current_el(env
) == 1 &&
2088 (!arm_is_secure_below_el3(env
) ||
2089 (env
->cp15
.scr_el3
& SCR_EEL2
)));
2091 if (allow_virt
&& (hcr_el2
& HCR_IMO
)) {
2092 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
2096 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
2101 if (allow_virt
&& (hcr_el2
& HCR_FMO
)) {
2102 if (cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) {
2106 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
2111 /* External aborts are not possible in QEMU so A bit is always clear */
2115 static CPAccessResult
access_aa64_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2118 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID1
)) {
2119 return CP_ACCESS_TRAP_EL2
;
2122 return CP_ACCESS_OK
;
2125 static CPAccessResult
access_aa32_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2128 if (arm_feature(env
, ARM_FEATURE_V8
)) {
2129 return access_aa64_tid1(env
, ri
, isread
);
2132 return CP_ACCESS_OK
;
2135 static const ARMCPRegInfo v7_cp_reginfo
[] = {
2136 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2137 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
2138 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2139 /* Performance monitors are implementation defined in v7,
2140 * but with an ARM recommended set of registers, which we
2143 * Performance registers fall into three categories:
2144 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2145 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2146 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2147 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2148 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2150 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
2151 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2152 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2153 .writefn
= pmcntenset_write
,
2154 .accessfn
= pmreg_access
,
2155 .raw_writefn
= raw_write
},
2156 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
2157 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
2158 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2159 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
2160 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
2161 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
2163 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2164 .accessfn
= pmreg_access
,
2165 .writefn
= pmcntenclr_write
,
2166 .type
= ARM_CP_ALIAS
},
2167 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2168 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
2169 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2170 .type
= ARM_CP_ALIAS
,
2171 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
2172 .writefn
= pmcntenclr_write
},
2173 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
2174 .access
= PL0_RW
, .type
= ARM_CP_IO
,
2175 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2176 .accessfn
= pmreg_access
,
2177 .writefn
= pmovsr_write
,
2178 .raw_writefn
= raw_write
},
2179 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2180 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
2181 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2182 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2183 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2184 .writefn
= pmovsr_write
,
2185 .raw_writefn
= raw_write
},
2186 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
2187 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2188 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2189 .writefn
= pmswinc_write
},
2190 { .name
= "PMSWINC_EL0", .state
= ARM_CP_STATE_AA64
,
2191 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 4,
2192 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2193 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2194 .writefn
= pmswinc_write
},
2195 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
2196 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2197 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
2198 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
2199 .raw_writefn
= raw_write
},
2200 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
2201 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
2202 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
2203 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
2204 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
2205 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
2206 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2207 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
2208 .accessfn
= pmreg_access_ccntr
},
2209 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2210 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
2211 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
2213 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ccnt
),
2214 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
2215 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
, },
2216 { .name
= "PMCCFILTR", .cp
= 15, .opc1
= 0, .crn
= 14, .crm
= 15, .opc2
= 7,
2217 .writefn
= pmccfiltr_write_a32
, .readfn
= pmccfiltr_read_a32
,
2218 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2219 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2221 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
2222 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
2223 .writefn
= pmccfiltr_write
, .raw_writefn
= raw_write
,
2224 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2226 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
2228 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
2229 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2230 .accessfn
= pmreg_access
,
2231 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2232 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
2233 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
2234 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2235 .accessfn
= pmreg_access
,
2236 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2237 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
2238 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2239 .accessfn
= pmreg_access_xevcntr
,
2240 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2241 { .name
= "PMXEVCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2242 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 2,
2243 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2244 .accessfn
= pmreg_access_xevcntr
,
2245 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2246 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
2247 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
2248 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
2250 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2251 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
2252 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
2253 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
2254 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
2256 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2257 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
2258 .access
= PL1_RW
, .accessfn
= access_tpm
,
2259 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2260 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
2262 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
2263 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
2264 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
2265 .access
= PL1_RW
, .accessfn
= access_tpm
,
2267 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2268 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
2269 .resetvalue
= 0x0 },
2270 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
2271 .access
= PL1_RW
, .accessfn
= access_tpm
,
2272 .type
= ARM_CP_ALIAS
| ARM_CP_IO
| ARM_CP_NO_RAW
,
2273 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2274 .writefn
= pmintenclr_write
, },
2275 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
2276 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
2277 .access
= PL1_RW
, .accessfn
= access_tpm
,
2278 .type
= ARM_CP_ALIAS
| ARM_CP_IO
| ARM_CP_NO_RAW
,
2279 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2280 .writefn
= pmintenclr_write
},
2281 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
2282 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
2284 .accessfn
= access_aa64_tid2
,
2285 .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
2286 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
2287 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
2289 .accessfn
= access_aa64_tid2
,
2290 .writefn
= csselr_write
, .resetvalue
= 0,
2291 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
2292 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
2293 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2294 * just RAZ for all cores:
2296 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
2297 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
2298 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2299 .accessfn
= access_aa64_tid1
,
2301 /* Auxiliary fault status registers: these also are IMPDEF, and we
2302 * choose to RAZ/WI for all cores.
2304 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2305 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
2306 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2307 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2308 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2309 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
2310 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2311 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2312 /* MAIR can just read-as-written because we don't implement caches
2313 * and so don't need to care about memory attributes.
2315 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
2316 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2317 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2318 .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
2320 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
2321 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
2322 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
2324 /* For non-long-descriptor page tables these are PRRR and NMRR;
2325 * regardless they still act as reads-as-written for QEMU.
2327 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2328 * allows them to assign the correct fieldoffset based on the endianness
2329 * handled in the field definitions.
2331 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
2332 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2333 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2334 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
2335 offsetof(CPUARMState
, cp15
.mair0_ns
) },
2336 .resetfn
= arm_cp_reset_ignore
},
2337 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
2338 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1,
2339 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2340 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
2341 offsetof(CPUARMState
, cp15
.mair1_ns
) },
2342 .resetfn
= arm_cp_reset_ignore
},
2343 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
2344 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
2345 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
2346 /* 32 bit ITLB invalidates */
2347 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
2348 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2349 .writefn
= tlbiall_write
},
2350 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
2351 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2352 .writefn
= tlbimva_write
},
2353 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
2354 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2355 .writefn
= tlbiasid_write
},
2356 /* 32 bit DTLB invalidates */
2357 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
2358 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2359 .writefn
= tlbiall_write
},
2360 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
2361 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2362 .writefn
= tlbimva_write
},
2363 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
2364 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2365 .writefn
= tlbiasid_write
},
2366 /* 32 bit TLB invalidates */
2367 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
2368 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2369 .writefn
= tlbiall_write
},
2370 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
2371 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2372 .writefn
= tlbimva_write
},
2373 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
2374 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2375 .writefn
= tlbiasid_write
},
2376 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
2377 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2378 .writefn
= tlbimvaa_write
},
2382 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
2383 /* 32 bit TLB invalidates, Inner Shareable */
2384 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
2385 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2386 .writefn
= tlbiall_is_write
},
2387 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
2388 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2389 .writefn
= tlbimva_is_write
},
2390 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
2391 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2392 .writefn
= tlbiasid_is_write
},
2393 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
2394 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2395 .writefn
= tlbimvaa_is_write
},
2399 static const ARMCPRegInfo pmovsset_cp_reginfo
[] = {
2400 /* PMOVSSET is not implemented in v7 before v7ve */
2401 { .name
= "PMOVSSET", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 3,
2402 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2403 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2404 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2405 .writefn
= pmovsset_write
,
2406 .raw_writefn
= raw_write
},
2407 { .name
= "PMOVSSET_EL0", .state
= ARM_CP_STATE_AA64
,
2408 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 3,
2409 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2410 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2411 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2412 .writefn
= pmovsset_write
,
2413 .raw_writefn
= raw_write
},
2417 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2424 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2427 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
2428 return CP_ACCESS_TRAP
;
2430 return CP_ACCESS_OK
;
2433 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
2434 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
2435 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
2437 .writefn
= teecr_write
},
2438 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
2439 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
2440 .accessfn
= teehbr_access
, .resetvalue
= 0 },
2444 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
2445 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
2446 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
2448 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
2449 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
2451 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
2452 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
2453 .resetfn
= arm_cp_reset_ignore
},
2454 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
2455 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
2456 .access
= PL0_R
|PL1_W
,
2457 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
2459 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
2460 .access
= PL0_R
|PL1_W
,
2461 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
2462 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
2463 .resetfn
= arm_cp_reset_ignore
},
2464 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2465 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
2467 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
2468 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
2470 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
2471 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
2476 #ifndef CONFIG_USER_ONLY
2478 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2481 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2482 * Writable only at the highest implemented exception level.
2484 int el
= arm_current_el(env
);
2490 hcr
= arm_hcr_el2_eff(env
);
2491 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2492 cntkctl
= env
->cp15
.cnthctl_el2
;
2494 cntkctl
= env
->cp15
.c14_cntkctl
;
2496 if (!extract32(cntkctl
, 0, 2)) {
2497 return CP_ACCESS_TRAP
;
2501 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
2502 arm_is_secure_below_el3(env
)) {
2503 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2504 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2512 if (!isread
&& el
< arm_highest_el(env
)) {
2513 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2516 return CP_ACCESS_OK
;
2519 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
2522 unsigned int cur_el
= arm_current_el(env
);
2523 bool secure
= arm_is_secure(env
);
2524 uint64_t hcr
= arm_hcr_el2_eff(env
);
2528 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2529 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2530 return (extract32(env
->cp15
.cnthctl_el2
, timeridx
, 1)
2531 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2534 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2535 if (!extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
2536 return CP_ACCESS_TRAP
;
2539 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2540 if (hcr
& HCR_E2H
) {
2541 if (timeridx
== GTIMER_PHYS
&&
2542 !extract32(env
->cp15
.cnthctl_el2
, 10, 1)) {
2543 return CP_ACCESS_TRAP_EL2
;
2546 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2547 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2548 timeridx
== GTIMER_PHYS
&& !secure
&&
2549 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2550 return CP_ACCESS_TRAP_EL2
;
2556 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2557 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2558 timeridx
== GTIMER_PHYS
&& !secure
&&
2560 ?
!extract32(env
->cp15
.cnthctl_el2
, 10, 1)
2561 : !extract32(env
->cp15
.cnthctl_el2
, 0, 1))) {
2562 return CP_ACCESS_TRAP_EL2
;
2566 return CP_ACCESS_OK
;
2569 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
2572 unsigned int cur_el
= arm_current_el(env
);
2573 bool secure
= arm_is_secure(env
);
2574 uint64_t hcr
= arm_hcr_el2_eff(env
);
2578 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2579 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2580 return (extract32(env
->cp15
.cnthctl_el2
, 9 - timeridx
, 1)
2581 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2585 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2586 * EL0 if EL0[PV]TEN is zero.
2588 if (!extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
2589 return CP_ACCESS_TRAP
;
2594 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2595 timeridx
== GTIMER_PHYS
&& !secure
) {
2596 if (hcr
& HCR_E2H
) {
2597 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2598 if (!extract32(env
->cp15
.cnthctl_el2
, 11, 1)) {
2599 return CP_ACCESS_TRAP_EL2
;
2602 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2603 if (!extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2604 return CP_ACCESS_TRAP_EL2
;
2610 return CP_ACCESS_OK
;
2613 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
2614 const ARMCPRegInfo
*ri
,
2617 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
2620 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
2621 const ARMCPRegInfo
*ri
,
2624 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
2627 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2630 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
2633 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2636 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
2639 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
2640 const ARMCPRegInfo
*ri
,
2643 /* The AArch64 register view of the secure physical timer is
2644 * always accessible from EL3, and configurably accessible from
2647 switch (arm_current_el(env
)) {
2649 if (!arm_is_secure(env
)) {
2650 return CP_ACCESS_TRAP
;
2652 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
2653 return CP_ACCESS_TRAP_EL3
;
2655 return CP_ACCESS_OK
;
2658 return CP_ACCESS_TRAP
;
2660 return CP_ACCESS_OK
;
2662 g_assert_not_reached();
2666 static uint64_t gt_get_countervalue(CPUARMState
*env
)
2668 ARMCPU
*cpu
= env_archcpu(env
);
2670 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / gt_cntfrq_period_ns(cpu
);
2673 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
2675 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
2678 /* Timer enabled: calculate and set current ISTATUS, irq, and
2679 * reset timer to when ISTATUS next has to change
2681 uint64_t offset
= timeridx
== GTIMER_VIRT ?
2682 cpu
->env
.cp15
.cntvoff_el2
: 0;
2683 uint64_t count
= gt_get_countervalue(&cpu
->env
);
2684 /* Note that this must be unsigned 64 bit arithmetic: */
2685 int istatus
= count
- offset
>= gt
->cval
;
2689 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
2691 irqstate
= (istatus
&& !(gt
->ctl
& 2));
2692 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2695 /* Next transition is when count rolls back over to zero */
2696 nexttick
= UINT64_MAX
;
2698 /* Next transition is when we hit cval */
2699 nexttick
= gt
->cval
+ offset
;
2701 /* Note that the desired next expiry time might be beyond the
2702 * signed-64-bit range of a QEMUTimer -- in this case we just
2703 * set the timer for as far in the future as possible. When the
2704 * timer expires we will reset the timer for any remaining period.
2706 if (nexttick
> INT64_MAX
/ gt_cntfrq_period_ns(cpu
)) {
2707 timer_mod_ns(cpu
->gt_timer
[timeridx
], INT64_MAX
);
2709 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
2711 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
2713 /* Timer disabled: ISTATUS and timer output always clear */
2715 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
2716 timer_del(cpu
->gt_timer
[timeridx
]);
2717 trace_arm_gt_recalc_disabled(timeridx
);
2721 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2724 ARMCPU
*cpu
= env_archcpu(env
);
2726 timer_del(cpu
->gt_timer
[timeridx
]);
2729 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2731 return gt_get_countervalue(env
);
2734 static uint64_t gt_virt_cnt_offset(CPUARMState
*env
)
2738 switch (arm_current_el(env
)) {
2740 hcr
= arm_hcr_el2_eff(env
);
2741 if (hcr
& HCR_E2H
) {
2746 hcr
= arm_hcr_el2_eff(env
);
2747 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2753 return env
->cp15
.cntvoff_el2
;
2756 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2758 return gt_get_countervalue(env
) - gt_virt_cnt_offset(env
);
2761 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2765 trace_arm_gt_cval_write(timeridx
, value
);
2766 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
2767 gt_recalc_timer(env_archcpu(env
), timeridx
);
2770 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2773 uint64_t offset
= 0;
2777 case GTIMER_HYPVIRT
:
2778 offset
= gt_virt_cnt_offset(env
);
2782 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
2783 (gt_get_countervalue(env
) - offset
));
2786 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2790 uint64_t offset
= 0;
2794 case GTIMER_HYPVIRT
:
2795 offset
= gt_virt_cnt_offset(env
);
2799 trace_arm_gt_tval_write(timeridx
, value
);
2800 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
2801 sextract64(value
, 0, 32);
2802 gt_recalc_timer(env_archcpu(env
), timeridx
);
2805 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2809 ARMCPU
*cpu
= env_archcpu(env
);
2810 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
2812 trace_arm_gt_ctl_write(timeridx
, value
);
2813 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
2814 if ((oldval
^ value
) & 1) {
2815 /* Enable toggled */
2816 gt_recalc_timer(cpu
, timeridx
);
2817 } else if ((oldval
^ value
) & 2) {
2818 /* IMASK toggled: don't need to recalculate,
2819 * just set the interrupt line based on ISTATUS
2821 int irqstate
= (oldval
& 4) && !(value
& 2);
2823 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
2824 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2828 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2830 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
2833 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2836 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
2839 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2841 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
2844 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2847 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
2850 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2853 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
2856 static int gt_phys_redir_timeridx(CPUARMState
*env
)
2858 switch (arm_mmu_idx(env
)) {
2859 case ARMMMUIdx_E20_0
:
2860 case ARMMMUIdx_E20_2
:
2861 case ARMMMUIdx_E20_2_PAN
:
2868 static int gt_virt_redir_timeridx(CPUARMState
*env
)
2870 switch (arm_mmu_idx(env
)) {
2871 case ARMMMUIdx_E20_0
:
2872 case ARMMMUIdx_E20_2
:
2873 case ARMMMUIdx_E20_2_PAN
:
2874 return GTIMER_HYPVIRT
;
2880 static uint64_t gt_phys_redir_cval_read(CPUARMState
*env
,
2881 const ARMCPRegInfo
*ri
)
2883 int timeridx
= gt_phys_redir_timeridx(env
);
2884 return env
->cp15
.c14_timer
[timeridx
].cval
;
2887 static void gt_phys_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2890 int timeridx
= gt_phys_redir_timeridx(env
);
2891 gt_cval_write(env
, ri
, timeridx
, value
);
2894 static uint64_t gt_phys_redir_tval_read(CPUARMState
*env
,
2895 const ARMCPRegInfo
*ri
)
2897 int timeridx
= gt_phys_redir_timeridx(env
);
2898 return gt_tval_read(env
, ri
, timeridx
);
2901 static void gt_phys_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2904 int timeridx
= gt_phys_redir_timeridx(env
);
2905 gt_tval_write(env
, ri
, timeridx
, value
);
2908 static uint64_t gt_phys_redir_ctl_read(CPUARMState
*env
,
2909 const ARMCPRegInfo
*ri
)
2911 int timeridx
= gt_phys_redir_timeridx(env
);
2912 return env
->cp15
.c14_timer
[timeridx
].ctl
;
2915 static void gt_phys_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2918 int timeridx
= gt_phys_redir_timeridx(env
);
2919 gt_ctl_write(env
, ri
, timeridx
, value
);
2922 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2924 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
2927 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2930 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
2933 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2935 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
2938 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2941 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
2944 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2947 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
2950 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2953 ARMCPU
*cpu
= env_archcpu(env
);
2955 trace_arm_gt_cntvoff_write(value
);
2956 raw_write(env
, ri
, value
);
2957 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2960 static uint64_t gt_virt_redir_cval_read(CPUARMState
*env
,
2961 const ARMCPRegInfo
*ri
)
2963 int timeridx
= gt_virt_redir_timeridx(env
);
2964 return env
->cp15
.c14_timer
[timeridx
].cval
;
2967 static void gt_virt_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2970 int timeridx
= gt_virt_redir_timeridx(env
);
2971 gt_cval_write(env
, ri
, timeridx
, value
);
2974 static uint64_t gt_virt_redir_tval_read(CPUARMState
*env
,
2975 const ARMCPRegInfo
*ri
)
2977 int timeridx
= gt_virt_redir_timeridx(env
);
2978 return gt_tval_read(env
, ri
, timeridx
);
2981 static void gt_virt_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2984 int timeridx
= gt_virt_redir_timeridx(env
);
2985 gt_tval_write(env
, ri
, timeridx
, value
);
2988 static uint64_t gt_virt_redir_ctl_read(CPUARMState
*env
,
2989 const ARMCPRegInfo
*ri
)
2991 int timeridx
= gt_virt_redir_timeridx(env
);
2992 return env
->cp15
.c14_timer
[timeridx
].ctl
;
2995 static void gt_virt_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2998 int timeridx
= gt_virt_redir_timeridx(env
);
2999 gt_ctl_write(env
, ri
, timeridx
, value
);
3002 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3004 gt_timer_reset(env
, ri
, GTIMER_HYP
);
3007 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3010 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
3013 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3015 return gt_tval_read(env
, ri
, GTIMER_HYP
);
3018 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3021 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
3024 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3027 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
3030 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3032 gt_timer_reset(env
, ri
, GTIMER_SEC
);
3035 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3038 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
3041 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3043 return gt_tval_read(env
, ri
, GTIMER_SEC
);
3046 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3049 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
3052 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3055 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
3058 static void gt_hv_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3060 gt_timer_reset(env
, ri
, GTIMER_HYPVIRT
);
3063 static void gt_hv_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3066 gt_cval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
3069 static uint64_t gt_hv_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3071 return gt_tval_read(env
, ri
, GTIMER_HYPVIRT
);
3074 static void gt_hv_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3077 gt_tval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
3080 static void gt_hv_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3083 gt_ctl_write(env
, ri
, GTIMER_HYPVIRT
, value
);
3086 void arm_gt_ptimer_cb(void *opaque
)
3088 ARMCPU
*cpu
= opaque
;
3090 gt_recalc_timer(cpu
, GTIMER_PHYS
);
3093 void arm_gt_vtimer_cb(void *opaque
)
3095 ARMCPU
*cpu
= opaque
;
3097 gt_recalc_timer(cpu
, GTIMER_VIRT
);
3100 void arm_gt_htimer_cb(void *opaque
)
3102 ARMCPU
*cpu
= opaque
;
3104 gt_recalc_timer(cpu
, GTIMER_HYP
);
3107 void arm_gt_stimer_cb(void *opaque
)
3109 ARMCPU
*cpu
= opaque
;
3111 gt_recalc_timer(cpu
, GTIMER_SEC
);
3114 void arm_gt_hvtimer_cb(void *opaque
)
3116 ARMCPU
*cpu
= opaque
;
3118 gt_recalc_timer(cpu
, GTIMER_HYPVIRT
);
3121 static void arm_gt_cntfrq_reset(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
3123 ARMCPU
*cpu
= env_archcpu(env
);
3125 cpu
->env
.cp15
.c14_cntfrq
= cpu
->gt_cntfrq_hz
;
3128 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
3129 /* Note that CNTFRQ is purely reads-as-written for the benefit
3130 * of software; writing it doesn't actually change the timer frequency.
3131 * Our reset value matches the fixed frequency we implement the timer at.
3133 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
3134 .type
= ARM_CP_ALIAS
,
3135 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
3136 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
3138 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,