1 #include "qemu/osdep.h"
2 #include "target/arm/idau.h"
6 #include "exec/gdbstub.h"
7 #include "exec/helper-proto.h"
8 #include "qemu/host-utils.h"
9 #include "sysemu/arch_init.h"
10 #include "sysemu/sysemu.h"
11 #include "qemu/bitops.h"
12 #include "qemu/crc32c.h"
13 #include "exec/exec-all.h"
14 #include "exec/cpu_ldst.h"
16 #include <zlib.h> /* For crc32 */
17 #include "exec/semihost.h"
18 #include "sysemu/kvm.h"
19 #include "fpu/softfloat.h"
21 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
23 #ifndef CONFIG_USER_ONLY
24 /* Cacheability and shareability attributes for a memory access */
25 typedef struct ARMCacheAttrs
{
26 unsigned int attrs
:8; /* as in the MAIR register encoding */
27 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
30 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
31 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
32 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
33 target_ulong
*page_size
,
34 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
36 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
37 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
38 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
39 target_ulong
*page_size_ptr
,
40 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
42 /* Security attributes for an address, as returned by v8m_security_lookup. */
43 typedef struct V8M_SAttributes
{
44 bool subpage
; /* true if these attrs don't cover the whole TARGET_PAGE */
53 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
54 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
55 V8M_SAttributes
*sattrs
);
58 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
62 /* VFP data registers are always little-endian. */
63 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ?
32 : 16;
65 stq_le_p(buf
, *aa32_vfp_dreg(env
, reg
));
68 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
69 /* Aliases for Q regs. */
72 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
74 stq_le_p(buf
+ 8, q
[1]);
78 switch (reg
- nregs
) {
79 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
80 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
81 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
86 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
90 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ?
32 : 16;
92 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
95 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
98 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
100 q
[1] = ldq_le_p(buf
+ 8);
104 switch (reg
- nregs
) {
105 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
106 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
107 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
112 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
116 /* 128 bit FP register */
118 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
120 stq_le_p(buf
+ 8, q
[1]);
125 stl_p(buf
, vfp_get_fpsr(env
));
129 stl_p(buf
, vfp_get_fpcr(env
));
136 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
140 /* 128 bit FP register */
142 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
143 q
[0] = ldq_le_p(buf
);
144 q
[1] = ldq_le_p(buf
+ 8);
149 vfp_set_fpsr(env
, ldl_p(buf
));
153 vfp_set_fpcr(env
, ldl_p(buf
));
160 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
162 assert(ri
->fieldoffset
);
163 if (cpreg_field_is_64bit(ri
)) {
164 return CPREG_FIELD64(env
, ri
);
166 return CPREG_FIELD32(env
, ri
);
170 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
173 assert(ri
->fieldoffset
);
174 if (cpreg_field_is_64bit(ri
)) {
175 CPREG_FIELD64(env
, ri
) = value
;
177 CPREG_FIELD32(env
, ri
) = value
;
181 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
183 return (char *)env
+ ri
->fieldoffset
;
186 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
188 /* Raw read of a coprocessor register (as needed for migration, etc). */
189 if (ri
->type
& ARM_CP_CONST
) {
190 return ri
->resetvalue
;
191 } else if (ri
->raw_readfn
) {
192 return ri
->raw_readfn(env
, ri
);
193 } else if (ri
->readfn
) {
194 return ri
->readfn(env
, ri
);
196 return raw_read(env
, ri
);
200 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
203 /* Raw write of a coprocessor register (as needed for migration, etc).
204 * Note that constant registers are treated as write-ignored; the
205 * caller should check for success by whether a readback gives the
208 if (ri
->type
& ARM_CP_CONST
) {
210 } else if (ri
->raw_writefn
) {
211 ri
->raw_writefn(env
, ri
, v
);
212 } else if (ri
->writefn
) {
213 ri
->writefn(env
, ri
, v
);
215 raw_write(env
, ri
, v
);
219 static int arm_gdb_get_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
221 ARMCPU
*cpu
= arm_env_get_cpu(env
);
222 const ARMCPRegInfo
*ri
;
225 key
= cpu
->dyn_xml
.cpregs_keys
[reg
];
226 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
228 if (cpreg_field_is_64bit(ri
)) {
229 return gdb_get_reg64(buf
, (uint64_t)read_raw_cp_reg(env
, ri
));
231 return gdb_get_reg32(buf
, (uint32_t)read_raw_cp_reg(env
, ri
));
237 static int arm_gdb_set_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
242 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
244 /* Return true if the regdef would cause an assertion if you called
245 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
246 * program bug for it not to have the NO_RAW flag).
247 * NB that returning false here doesn't necessarily mean that calling
248 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
249 * read/write access functions which are safe for raw use" from "has
250 * read/write access functions which have side effects but has forgotten
251 * to provide raw access functions".
252 * The tests here line up with the conditions in read/write_raw_cp_reg()
253 * and assertions in raw_read()/raw_write().
255 if ((ri
->type
& ARM_CP_CONST
) ||
257 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
263 bool write_cpustate_to_list(ARMCPU
*cpu
)
265 /* Write the coprocessor state from cpu->env to the (index,value) list. */
269 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
270 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
271 const ARMCPRegInfo
*ri
;
273 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
278 if (ri
->type
& ARM_CP_NO_RAW
) {
281 cpu
->cpreg_values
[i
] = read_raw_cp_reg(&cpu
->env
, ri
);
286 bool write_list_to_cpustate(ARMCPU
*cpu
)
291 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
292 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
293 uint64_t v
= cpu
->cpreg_values
[i
];
294 const ARMCPRegInfo
*ri
;
296 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
301 if (ri
->type
& ARM_CP_NO_RAW
) {
304 /* Write value and confirm it reads back as written
305 * (to catch read-only registers and partially read-only
306 * registers where the incoming migration value doesn't match)
308 write_raw_cp_reg(&cpu
->env
, ri
, v
);
309 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
316 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
318 ARMCPU
*cpu
= opaque
;
320 const ARMCPRegInfo
*ri
;
322 regidx
= *(uint32_t *)key
;
323 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
325 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
326 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
327 /* The value array need not be initialized at this point */
328 cpu
->cpreg_array_len
++;
332 static void count_cpreg(gpointer key
, gpointer opaque
)
334 ARMCPU
*cpu
= opaque
;
336 const ARMCPRegInfo
*ri
;
338 regidx
= *(uint32_t *)key
;
339 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
341 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
342 cpu
->cpreg_array_len
++;
346 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
348 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
349 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
360 void init_cpreg_list(ARMCPU
*cpu
)
362 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
363 * Note that we require cpreg_tuples[] to be sorted by key ID.
368 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
369 keys
= g_list_sort(keys
, cpreg_key_compare
);
371 cpu
->cpreg_array_len
= 0;
373 g_list_foreach(keys
, count_cpreg
, cpu
);
375 arraylen
= cpu
->cpreg_array_len
;
376 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
377 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
378 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
379 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
380 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
381 cpu
->cpreg_array_len
= 0;
383 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
385 assert(cpu
->cpreg_array_len
== arraylen
);
391 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
392 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
394 * access_el3_aa32ns: Used to check AArch32 register views.
395 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
397 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
398 const ARMCPRegInfo
*ri
,
401 bool secure
= arm_is_secure_below_el3(env
);
403 assert(!arm_el_is_aa64(env
, 3));
405 return CP_ACCESS_TRAP_UNCATEGORIZED
;
410 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
411 const ARMCPRegInfo
*ri
,
414 if (!arm_el_is_aa64(env
, 3)) {
415 return access_el3_aa32ns(env
, ri
, isread
);
420 /* Some secure-only AArch32 registers trap to EL3 if used from
421 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
422 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
423 * We assume that the .access field is set to PL1_RW.
425 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
426 const ARMCPRegInfo
*ri
,
429 if (arm_current_el(env
) == 3) {
432 if (arm_is_secure_below_el3(env
)) {
433 return CP_ACCESS_TRAP_EL3
;
435 /* This will be EL1 NS and EL2 NS, which just UNDEF */
436 return CP_ACCESS_TRAP_UNCATEGORIZED
;
439 /* Check for traps to "powerdown debug" registers, which are controlled
442 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
445 int el
= arm_current_el(env
);
447 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDOSA
)
448 && !arm_is_secure_below_el3(env
)) {
449 return CP_ACCESS_TRAP_EL2
;
451 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
452 return CP_ACCESS_TRAP_EL3
;
457 /* Check for traps to "debug ROM" registers, which are controlled
458 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
460 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
463 int el
= arm_current_el(env
);
465 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDRA
)
466 && !arm_is_secure_below_el3(env
)) {
467 return CP_ACCESS_TRAP_EL2
;
469 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
470 return CP_ACCESS_TRAP_EL3
;
475 /* Check for traps to general debug registers, which are controlled
476 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
478 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
481 int el
= arm_current_el(env
);
483 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDA
)
484 && !arm_is_secure_below_el3(env
)) {
485 return CP_ACCESS_TRAP_EL2
;
487 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
488 return CP_ACCESS_TRAP_EL3
;
493 /* Check for traps to performance monitor registers, which are controlled
494 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
496 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
499 int el
= arm_current_el(env
);
501 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
502 && !arm_is_secure_below_el3(env
)) {
503 return CP_ACCESS_TRAP_EL2
;
505 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
506 return CP_ACCESS_TRAP_EL3
;
511 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
513 ARMCPU
*cpu
= arm_env_get_cpu(env
);
515 raw_write(env
, ri
, value
);
516 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
519 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
521 ARMCPU
*cpu
= arm_env_get_cpu(env
);
523 if (raw_read(env
, ri
) != value
) {
524 /* Unlike real hardware the qemu TLB uses virtual addresses,
525 * not modified virtual addresses, so this causes a TLB flush.
528 raw_write(env
, ri
, value
);
532 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
535 ARMCPU
*cpu
= arm_env_get_cpu(env
);
537 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
538 && !extended_addresses_enabled(env
)) {
539 /* For VMSA (when not using the LPAE long descriptor page table
540 * format) this register includes the ASID, so do a TLB flush.
541 * For PMSA it is purely a process ID and no action is needed.
545 raw_write(env
, ri
, value
);
548 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
551 /* Invalidate all (TLBIALL) */
552 ARMCPU
*cpu
= arm_env_get_cpu(env
);
557 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
560 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
561 ARMCPU
*cpu
= arm_env_get_cpu(env
);
563 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
566 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
569 /* Invalidate by ASID (TLBIASID) */
570 ARMCPU
*cpu
= arm_env_get_cpu(env
);
575 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
578 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
579 ARMCPU
*cpu
= arm_env_get_cpu(env
);
581 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
584 /* IS variants of TLB operations must affect all cores */
585 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
588 CPUState
*cs
= ENV_GET_CPU(env
);
590 tlb_flush_all_cpus_synced(cs
);
593 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
596 CPUState
*cs
= ENV_GET_CPU(env
);
598 tlb_flush_all_cpus_synced(cs
);
601 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
604 CPUState
*cs
= ENV_GET_CPU(env
);
606 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
609 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
612 CPUState
*cs
= ENV_GET_CPU(env
);
614 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
617 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
620 CPUState
*cs
= ENV_GET_CPU(env
);
622 tlb_flush_by_mmuidx(cs
,
623 ARMMMUIdxBit_S12NSE1
|
624 ARMMMUIdxBit_S12NSE0
|
628 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
631 CPUState
*cs
= ENV_GET_CPU(env
);
633 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
634 ARMMMUIdxBit_S12NSE1
|
635 ARMMMUIdxBit_S12NSE0
|
639 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
642 /* Invalidate by IPA. This has to invalidate any structures that
643 * contain only stage 2 translation information, but does not need
644 * to apply to structures that contain combined stage 1 and stage 2
645 * translation information.
646 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
648 CPUState
*cs
= ENV_GET_CPU(env
);
651 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
655 pageaddr
= sextract64(value
<< 12, 0, 40);
657 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
660 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
663 CPUState
*cs
= ENV_GET_CPU(env
);
666 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
670 pageaddr
= sextract64(value
<< 12, 0, 40);
672 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
676 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
679 CPUState
*cs
= ENV_GET_CPU(env
);
681 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
684 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
687 CPUState
*cs
= ENV_GET_CPU(env
);
689 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
692 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
695 CPUState
*cs
= ENV_GET_CPU(env
);
696 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
698 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
701 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
704 CPUState
*cs
= ENV_GET_CPU(env
);
705 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
707 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
711 static const ARMCPRegInfo cp_reginfo
[] = {
712 /* Define the secure and non-secure FCSE identifier CP registers
713 * separately because there is no secure bank in V8 (no _EL3). This allows
714 * the secure register to be properly reset and migrated. There is also no
715 * v8 EL1 version of the register so the non-secure instance stands alone.
718 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
719 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
720 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
721 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
722 { .name
= "FCSEIDR_S",
723 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
724 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
725 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
726 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
727 /* Define the secure and non-secure context identifier CP registers
728 * separately because there is no secure bank in V8 (no _EL3). This allows
729 * the secure register to be properly reset and migrated. In the
730 * non-secure case, the 32-bit register will have reset and migration
731 * disabled during registration as it is handled by the 64-bit instance.
733 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
734 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
735 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
736 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
737 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
738 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
739 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
740 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
741 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
742 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
746 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
747 /* NB: Some of these registers exist in v8 but with more precise
748 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
750 /* MMU Domain access control / MPU write buffer control */
752 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
753 .access
= PL1_RW
, .resetvalue
= 0,
754 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
755 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
756 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
757 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
758 * For v6 and v5, these mappings are overly broad.
760 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
761 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
762 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
763 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
764 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
765 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
766 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
767 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
768 /* Cache maintenance ops; some of this space may be overridden later. */
769 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
770 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
771 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
775 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
776 /* Not all pre-v6 cores implemented this WFI, so this is slightly
779 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
780 .access
= PL1_W
, .type
= ARM_CP_WFI
},
784 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
785 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
786 * is UNPREDICTABLE; we choose to NOP as most implementations do).
788 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
789 .access
= PL1_W
, .type
= ARM_CP_WFI
},
790 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
791 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
792 * OMAPCP will override this space.
794 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
795 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
797 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
798 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
800 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
801 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
802 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
804 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
805 * implementing it as RAZ means the "debug architecture version" bits
806 * will read as a reserved value, which should cause Linux to not try
807 * to use the debug hardware.
809 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
810 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
811 /* MMU TLB control. Note that the wildcarding means we cover not just
812 * the unified TLB ops but also the dside/iside/inner-shareable variants.
814 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
815 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
816 .type
= ARM_CP_NO_RAW
},
817 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
818 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
819 .type
= ARM_CP_NO_RAW
},
820 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
821 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
822 .type
= ARM_CP_NO_RAW
},
823 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
824 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
825 .type
= ARM_CP_NO_RAW
},
826 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
827 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
828 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
829 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
833 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
838 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
839 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
840 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
841 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
842 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
844 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
845 /* VFP coprocessor: cp10 & cp11 [23:20] */
846 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
848 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
849 /* ASEDIS [31] bit is RAO/WI */
853 /* VFPv3 and upwards with NEON implement 32 double precision
854 * registers (D0-D31).
856 if (!arm_feature(env
, ARM_FEATURE_NEON
) ||
857 !arm_feature(env
, ARM_FEATURE_VFP3
)) {
858 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
864 env
->cp15
.cpacr_el1
= value
;
867 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
869 /* Call cpacr_write() so that we reset with the correct RAO bits set
870 * for our CPU features.
872 cpacr_write(env
, ri
, 0);
875 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
878 if (arm_feature(env
, ARM_FEATURE_V8
)) {
879 /* Check if CPACR accesses are to be trapped to EL2 */
880 if (arm_current_el(env
) == 1 &&
881 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
882 return CP_ACCESS_TRAP_EL2
;
883 /* Check if CPACR accesses are to be trapped to EL3 */
884 } else if (arm_current_el(env
) < 3 &&
885 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
886 return CP_ACCESS_TRAP_EL3
;
893 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
896 /* Check if CPTR accesses are set to trap to EL3 */
897 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
898 return CP_ACCESS_TRAP_EL3
;
904 static const ARMCPRegInfo v6_cp_reginfo
[] = {
905 /* prefetch by MVA in v6, NOP in v7 */
906 { .name
= "MVA_prefetch",
907 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
908 .access
= PL1_W
, .type
= ARM_CP_NOP
},
909 /* We need to break the TB after ISB to execute self-modifying code
910 * correctly and also to take any pending interrupts immediately.
911 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
913 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
914 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
915 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
916 .access
= PL0_W
, .type
= ARM_CP_NOP
},
917 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
918 .access
= PL0_W
, .type
= ARM_CP_NOP
},
919 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
921 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
922 offsetof(CPUARMState
, cp15
.ifar_ns
) },
924 /* Watchpoint Fault Address Register : should actually only be present
925 * for 1136, 1176, 11MPCore.
927 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
928 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
929 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
930 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
931 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
932 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
},
936 /* Definitions for the PMU registers */
937 #define PMCRN_MASK 0xf800
938 #define PMCRN_SHIFT 11
943 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
945 return (env
->cp15
.c9_pmcr
& PMCRN_MASK
) >> PMCRN_SHIFT
;
948 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
949 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
951 return (1 << 31) | ((1 << pmu_num_counters(env
)) - 1);
954 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
957 /* Performance monitor registers user accessibility is controlled
958 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
959 * trapping to EL2 or EL3 for other accesses.
961 int el
= arm_current_el(env
);
963 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
964 return CP_ACCESS_TRAP
;
966 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
967 && !arm_is_secure_below_el3(env
)) {
968 return CP_ACCESS_TRAP_EL2
;
970 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
971 return CP_ACCESS_TRAP_EL3
;
977 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
978 const ARMCPRegInfo
*ri
,
981 /* ER: event counter read trap control */
982 if (arm_feature(env
, ARM_FEATURE_V8
)
983 && arm_current_el(env
) == 0
984 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
989 return pmreg_access(env
, ri
, isread
);
992 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
993 const ARMCPRegInfo
*ri
,
996 /* SW: software increment write trap control */
997 if (arm_feature(env
, ARM_FEATURE_V8
)
998 && arm_current_el(env
) == 0
999 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1001 return CP_ACCESS_OK
;
1004 return pmreg_access(env
, ri
, isread
);
1007 #ifndef CONFIG_USER_ONLY
1009 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1010 const ARMCPRegInfo
*ri
,
1013 /* ER: event counter read trap control */
1014 if (arm_feature(env
, ARM_FEATURE_V8
)
1015 && arm_current_el(env
) == 0
1016 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1017 return CP_ACCESS_OK
;
1020 return pmreg_access(env
, ri
, isread
);
1023 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1024 const ARMCPRegInfo
*ri
,
1027 /* CR: cycle counter read trap control */
1028 if (arm_feature(env
, ARM_FEATURE_V8
)
1029 && arm_current_el(env
) == 0
1030 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1032 return CP_ACCESS_OK
;
1035 return pmreg_access(env
, ri
, isread
);
1038 static inline bool arm_ccnt_enabled(CPUARMState
*env
)
1040 /* This does not support checking PMCCFILTR_EL0 register */
1042 if (!(env
->cp15
.c9_pmcr
& PMCRE
) || !(env
->cp15
.c9_pmcnten
& (1 << 31))) {
1049 void pmccntr_sync(CPUARMState
*env
)
1051 uint64_t temp_ticks
;
1053 temp_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1054 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1056 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1057 /* Increment once every 64 processor clock cycles */
1061 if (arm_ccnt_enabled(env
)) {
1062 env
->cp15
.c15_ccnt
= temp_ticks
- env
->cp15
.c15_ccnt
;
1066 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1071 if (value
& PMCRC
) {
1072 /* The counter has been reset */
1073 env
->cp15
.c15_ccnt
= 0;
1076 /* only the DP, X, D and E bits are writable */
1077 env
->cp15
.c9_pmcr
&= ~0x39;
1078 env
->cp15
.c9_pmcr
|= (value
& 0x39);
1083 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1085 uint64_t total_ticks
;
1087 if (!arm_ccnt_enabled(env
)) {
1088 /* Counter is disabled, do not change value */
1089 return env
->cp15
.c15_ccnt
;
1092 total_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1093 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1095 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1096 /* Increment once every 64 processor clock cycles */
1099 return total_ticks
- env
->cp15
.c15_ccnt
;
1102 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1105 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1106 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1107 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1110 env
->cp15
.c9_pmselr
= value
& 0x1f;
1113 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1116 uint64_t total_ticks
;
1118 if (!arm_ccnt_enabled(env
)) {
1119 /* Counter is disabled, set the absolute value */
1120 env
->cp15
.c15_ccnt
= value
;
1124 total_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1125 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1127 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1128 /* Increment once every 64 processor clock cycles */
1131 env
->cp15
.c15_ccnt
= total_ticks
- value
;
1134 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1137 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1139 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1142 #else /* CONFIG_USER_ONLY */
1144 void pmccntr_sync(CPUARMState
*env
)
1150 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1154 env
->cp15
.pmccfiltr_el0
= value
& 0xfc000000;
1158 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1161 value
&= pmu_counter_mask(env
);
1162 env
->cp15
.c9_pmcnten
|= value
;
1165 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1168 value
&= pmu_counter_mask(env
);
1169 env
->cp15
.c9_pmcnten
&= ~value
;
1172 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1175 env
->cp15
.c9_pmovsr
&= ~value
;
1178 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1181 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1182 * PMSELR value is equal to or greater than the number of implemented
1183 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1185 if (env
->cp15
.c9_pmselr
== 0x1f) {
1186 pmccfiltr_write(env
, ri
, value
);
1190 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1192 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1193 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write().
1195 if (env
->cp15
.c9_pmselr
== 0x1f) {
1196 return env
->cp15
.pmccfiltr_el0
;
1202 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1205 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1206 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1208 env
->cp15
.c9_pmuserenr
= value
& 1;
1212 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1215 /* We have no event counters so only the C bit can be changed */
1216 value
&= pmu_counter_mask(env
);
1217 env
->cp15
.c9_pminten
|= value
;
1220 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1223 value
&= pmu_counter_mask(env
);
1224 env
->cp15
.c9_pminten
&= ~value
;
1227 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1230 /* Note that even though the AArch64 view of this register has bits
1231 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1232 * architectural requirements for bits which are RES0 only in some
1233 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1234 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1236 raw_write(env
, ri
, value
& ~0x1FULL
);
1239 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1241 /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1242 * For bits that vary between AArch32/64, code needs to check the
1243 * current execution mode before directly using the feature bit.
1245 uint32_t valid_mask
= SCR_AARCH64_MASK
| SCR_AARCH32_MASK
;
1247 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1248 valid_mask
&= ~SCR_HCE
;
1250 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1251 * supported if EL2 exists. The bit is UNK/SBZP when
1252 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1253 * when EL2 is unavailable.
1254 * On ARMv8, this bit is always available.
1256 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1257 !arm_feature(env
, ARM_FEATURE_V8
)) {
1258 valid_mask
&= ~SCR_SMD
;
1262 /* Clear all-context RES0 bits. */
1263 value
&= valid_mask
;
1264 raw_write(env
, ri
, value
);
1267 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1269 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1271 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1274 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1275 ri
->secure
& ARM_CP_SECSTATE_S
);
1277 return cpu
->ccsidr
[index
];
1280 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1283 raw_write(env
, ri
, value
& 0xf);
1286 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1288 CPUState
*cs
= ENV_GET_CPU(env
);
1291 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1294 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1297 /* External aborts are not possible in QEMU so A bit is always clear */
1301 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1302 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1303 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1304 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1305 /* Performance monitors are implementation defined in v7,
1306 * but with an ARM recommended set of registers, which we
1307 * follow (although we don't actually implement any counters)
1309 * Performance registers fall into three categories:
1310 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1311 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1312 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1313 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1314 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1316 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
1317 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1318 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1319 .writefn
= pmcntenset_write
,
1320 .accessfn
= pmreg_access
,
1321 .raw_writefn
= raw_write
},
1322 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
1323 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
1324 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1325 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
1326 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
1327 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
1329 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1330 .accessfn
= pmreg_access
,
1331 .writefn
= pmcntenclr_write
,
1332 .type
= ARM_CP_ALIAS
},
1333 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1334 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
1335 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1336 .type
= ARM_CP_ALIAS
,
1337 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
1338 .writefn
= pmcntenclr_write
},
1339 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
1341 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
1342 .accessfn
= pmreg_access
,
1343 .writefn
= pmovsr_write
,
1344 .raw_writefn
= raw_write
},
1345 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1346 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
1347 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1348 .type
= ARM_CP_ALIAS
,
1349 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1350 .writefn
= pmovsr_write
,
1351 .raw_writefn
= raw_write
},
1352 /* Unimplemented so WI. */
1353 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
1354 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
, .type
= ARM_CP_NOP
},
1355 #ifndef CONFIG_USER_ONLY
1356 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
1357 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1358 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
1359 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
1360 .raw_writefn
= raw_write
},
1361 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
1362 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
1363 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
1364 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
1365 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
1366 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
1367 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1368 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
1369 .accessfn
= pmreg_access_ccntr
},
1370 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
1371 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
1372 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
1374 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
, },
1376 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
1377 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
1378 .writefn
= pmccfiltr_write
,
1379 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1381 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
1383 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
1384 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1385 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1386 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
1387 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
1388 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1389 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1390 /* Unimplemented, RAZ/WI. */
1391 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
1392 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
1393 .accessfn
= pmreg_access_xevcntr
},
1394 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
1395 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
1396 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
1398 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1399 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
1400 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
1401 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1402 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
1404 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1405 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
1406 .access
= PL1_RW
, .accessfn
= access_tpm
,
1407 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1408 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
1410 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
1411 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
1412 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
1413 .access
= PL1_RW
, .accessfn
= access_tpm
,
1415 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1416 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
1417 .resetvalue
= 0x0 },
1418 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
1419 .access
= PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1420 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1421 .writefn
= pmintenclr_write
, },
1422 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
1423 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
1424 .access
= PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1425 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1426 .writefn
= pmintenclr_write
},
1427 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
1428 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
1429 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
1430 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
1431 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
1432 .access
= PL1_RW
, .writefn
= csselr_write
, .resetvalue
= 0,
1433 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
1434 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
1435 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1436 * just RAZ for all cores:
1438 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
1439 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
1440 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1441 /* Auxiliary fault status registers: these also are IMPDEF, and we
1442 * choose to RAZ/WI for all cores.
1444 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
1445 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
1446 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1447 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
1448 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
1449 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1450 /* MAIR can just read-as-written because we don't implement caches
1451 * and so don't need to care about memory attributes.
1453 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
1454 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
1455 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
1457 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
1458 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
1459 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
1461 /* For non-long-descriptor page tables these are PRRR and NMRR;
1462 * regardless they still act as reads-as-written for QEMU.
1464 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1465 * allows them to assign the correct fieldoffset based on the endianness
1466 * handled in the field definitions.
1468 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
1469 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
1470 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
1471 offsetof(CPUARMState
, cp15
.mair0_ns
) },
1472 .resetfn
= arm_cp_reset_ignore
},
1473 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
1474 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
1475 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
1476 offsetof(CPUARMState
, cp15
.mair1_ns
) },
1477 .resetfn
= arm_cp_reset_ignore
},
1478 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
1479 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
1480 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
1481 /* 32 bit ITLB invalidates */
1482 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
1483 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1484 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
1485 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1486 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
1487 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1488 /* 32 bit DTLB invalidates */
1489 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
1490 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1491 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
1492 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1493 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
1494 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1495 /* 32 bit TLB invalidates */
1496 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
1497 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1498 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
1499 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1500 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
1501 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1502 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
1503 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
1507 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
1508 /* 32 bit TLB invalidates, Inner Shareable */
1509 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
1510 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_is_write
},
1511 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
1512 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
1513 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
1514 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1515 .writefn
= tlbiasid_is_write
},
1516 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
1517 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1518 .writefn
= tlbimvaa_is_write
},
1522 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1529 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1532 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
1533 return CP_ACCESS_TRAP
;
1535 return CP_ACCESS_OK
;
1538 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
1539 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
1540 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
1542 .writefn
= teecr_write
},
1543 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
1544 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
1545 .accessfn
= teehbr_access
, .resetvalue
= 0 },
1549 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
1550 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
1551 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
1553 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
1554 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
1556 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
1557 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
1558 .resetfn
= arm_cp_reset_ignore
},
1559 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
1560 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
1561 .access
= PL0_R
|PL1_W
,
1562 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
1564 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
1565 .access
= PL0_R
|PL1_W
,
1566 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
1567 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
1568 .resetfn
= arm_cp_reset_ignore
},
1569 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
1570 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
1572 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
1573 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
1575 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
1576 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
1581 #ifndef CONFIG_USER_ONLY
1583 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1586 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1587 * Writable only at the highest implemented exception level.
1589 int el
= arm_current_el(env
);
1593 if (!extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
1594 return CP_ACCESS_TRAP
;
1598 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
1599 arm_is_secure_below_el3(env
)) {
1600 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1601 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1609 if (!isread
&& el
< arm_highest_el(env
)) {
1610 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1613 return CP_ACCESS_OK
;
1616 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
1619 unsigned int cur_el
= arm_current_el(env
);
1620 bool secure
= arm_is_secure(env
);
1622 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1624 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
1625 return CP_ACCESS_TRAP
;
1628 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1629 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1630 !extract32(env
->cp15
.cnthctl_el2
, 0, 1)) {
1631 return CP_ACCESS_TRAP_EL2
;
1633 return CP_ACCESS_OK
;
1636 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
1639 unsigned int cur_el
= arm_current_el(env
);
1640 bool secure
= arm_is_secure(env
);
1642 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1643 * EL0[PV]TEN is zero.
1646 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
1647 return CP_ACCESS_TRAP
;
1650 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1651 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1652 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
1653 return CP_ACCESS_TRAP_EL2
;
1655 return CP_ACCESS_OK
;
1658 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
1659 const ARMCPRegInfo
*ri
,
1662 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
1665 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
1666 const ARMCPRegInfo
*ri
,
1669 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
1672 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1675 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
1678 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1681 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
1684 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
1685 const ARMCPRegInfo
*ri
,
1688 /* The AArch64 register view of the secure physical timer is
1689 * always accessible from EL3, and configurably accessible from
1692 switch (arm_current_el(env
)) {
1694 if (!arm_is_secure(env
)) {
1695 return CP_ACCESS_TRAP
;
1697 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
1698 return CP_ACCESS_TRAP_EL3
;
1700 return CP_ACCESS_OK
;
1703 return CP_ACCESS_TRAP
;
1705 return CP_ACCESS_OK
;
1707 g_assert_not_reached();
1711 static uint64_t gt_get_countervalue(CPUARMState
*env
)
1713 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
1716 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
1718 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
1721 /* Timer enabled: calculate and set current ISTATUS, irq, and
1722 * reset timer to when ISTATUS next has to change
1724 uint64_t offset
= timeridx
== GTIMER_VIRT ?
1725 cpu
->env
.cp15
.cntvoff_el2
: 0;
1726 uint64_t count
= gt_get_countervalue(&cpu
->env
);
1727 /* Note that this must be unsigned 64 bit arithmetic: */
1728 int istatus
= count
- offset
>= gt
->cval
;
1732 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
1734 irqstate
= (istatus
&& !(gt
->ctl
& 2));
1735 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1738 /* Next transition is when count rolls back over to zero */
1739 nexttick
= UINT64_MAX
;
1741 /* Next transition is when we hit cval */
1742 nexttick
= gt
->cval
+ offset
;
1744 /* Note that the desired next expiry time might be beyond the
1745 * signed-64-bit range of a QEMUTimer -- in this case we just
1746 * set the timer for as far in the future as possible. When the
1747 * timer expires we will reset the timer for any remaining period.
1749 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
1750 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
1752 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
1753 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
1755 /* Timer disabled: ISTATUS and timer output always clear */
1757 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
1758 timer_del(cpu
->gt_timer
[timeridx
]);
1759 trace_arm_gt_recalc_disabled(timeridx
);
1763 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1766 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1768 timer_del(cpu
->gt_timer
[timeridx
]);
1771 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1773 return gt_get_countervalue(env
);
1776 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1778 return gt_get_countervalue(env
) - env
->cp15
.cntvoff_el2
;
1781 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1785 trace_arm_gt_cval_write(timeridx
, value
);
1786 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
1787 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1790 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1793 uint64_t offset
= timeridx
== GTIMER_VIRT ? env
->cp15
.cntvoff_el2
: 0;
1795 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
1796 (gt_get_countervalue(env
) - offset
));
1799 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1803 uint64_t offset
= timeridx
== GTIMER_VIRT ? env
->cp15
.cntvoff_el2
: 0;
1805 trace_arm_gt_tval_write(timeridx
, value
);
1806 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
1807 sextract64(value
, 0, 32);
1808 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1811 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1815 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1816 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
1818 trace_arm_gt_ctl_write(timeridx
, value
);
1819 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
1820 if ((oldval
^ value
) & 1) {
1821 /* Enable toggled */
1822 gt_recalc_timer(cpu
, timeridx
);
1823 } else if ((oldval
^ value
) & 2) {
1824 /* IMASK toggled: don't need to recalculate,
1825 * just set the interrupt line based on ISTATUS
1827 int irqstate
= (oldval
& 4) && !(value
& 2);
1829 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
1830 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1834 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1836 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
1839 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1842 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
1845 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1847 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
1850 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1853 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
1856 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1859 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
1862 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1864 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
1867 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1870 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
1873 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1875 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
1878 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1881 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
1884 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1887 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
1890 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1893 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1895 trace_arm_gt_cntvoff_write(value
);
1896 raw_write(env
, ri
, value
);
1897 gt_recalc_timer(cpu
, GTIMER_VIRT
);
1900 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1902 gt_timer_reset(env
, ri
, GTIMER_HYP
);
1905 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1908 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
1911 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1913 return gt_tval_read(env
, ri
, GTIMER_HYP
);
1916 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1919 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
1922 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1925 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
1928 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1930 gt_timer_reset(env
, ri
, GTIMER_SEC
);
1933 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1936 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
1939 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1941 return gt_tval_read(env
, ri
, GTIMER_SEC
);
1944 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1947 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
1950 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1953 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
1956 void arm_gt_ptimer_cb(void *opaque
)
1958 ARMCPU
*cpu
= opaque
;
1960 gt_recalc_timer(cpu
, GTIMER_PHYS
);
1963 void arm_gt_vtimer_cb(void *opaque
)
1965 ARMCPU
*cpu
= opaque
;
1967 gt_recalc_timer(cpu
, GTIMER_VIRT
);
1970 void arm_gt_htimer_cb(void *opaque
)
1972 ARMCPU
*cpu
= opaque
;
1974 gt_recalc_timer(cpu
, GTIMER_HYP
);
1977 void arm_gt_stimer_cb(void *opaque
)
1979 ARMCPU
*cpu
= opaque
;
1981 gt_recalc_timer(cpu
, GTIMER_SEC
);
1984 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
1985 /* Note that CNTFRQ is purely reads-as-written for the benefit
1986 * of software; writing it doesn't actually change the timer frequency.
1987 * Our reset value matches the fixed frequency we implement the timer at.
1989 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
1990 .type
= ARM_CP_ALIAS
,
1991 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
1992 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
1994 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
1995 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
1996 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
1997 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
1998 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
2000 /* overall control: mostly access permissions */
2001 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
2002 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
2004 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
2007 /* per-timer control */
2008 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2009 .secure
= ARM_CP_SECSTATE_NS
,
2010 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2011 .accessfn
= gt_ptimer_access
,
2012 .fieldoffset
= offsetoflow32(CPUARMState
,
2013 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2014 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2016 { .name
= "CNTP_CTL_S",
2017 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2018 .secure
= ARM_CP_SECSTATE_S
,
2019 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2020 .accessfn
= gt_ptimer_access
,
2021 .fieldoffset
= offsetoflow32(CPUARMState
,
2022 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2023 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2025 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2026 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
2027 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2028 .accessfn
= gt_ptimer_access
,
2029 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2031 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2033 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
2034 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2035 .accessfn
= gt_vtimer_access
,
2036 .fieldoffset
= offsetoflow32(CPUARMState
,
2037 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2038 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2040 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2041 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
2042 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2043 .accessfn
= gt_vtimer_access
,
2044 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2046 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2048 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2049 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2050 .secure
= ARM_CP_SECSTATE_NS
,
2051 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2052 .accessfn
= gt_ptimer_access
,
2053 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2055 { .name
= "CNTP_TVAL_S",
2056 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2057 .secure
= ARM_CP_SECSTATE_S
,
2058 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2059 .accessfn
= gt_ptimer_access
,
2060 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
2062 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2063 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
2064 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2065 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
2066 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2068 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
2069 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2070 .accessfn
= gt_vtimer_access
,
2071 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2073 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2074 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
2075 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2076 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
2077 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2079 /* The counter itself */
2080 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
2081 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2082 .accessfn
= gt_pct_access
,
2083 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2085 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
2086 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
2087 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2088 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
2090 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
2091 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2092 .accessfn
= gt_vct_access
,
2093 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2095 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2096 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2097 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2098 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
2100 /* Comparison value, indicating when the timer goes off */
2101 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
2102 .secure
= ARM_CP_SECSTATE_NS
,
2103 .access
= PL1_RW
| PL0_R
,
2104 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2105 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2106 .accessfn
= gt_ptimer_access
,
2107 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2109 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
2110 .secure
= ARM_CP_SECSTATE_S
,
2111 .access
= PL1_RW
| PL0_R
,
2112 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2113 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2114 .accessfn
= gt_ptimer_access
,
2115 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2117 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2118 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
2119 .access
= PL1_RW
| PL0_R
,
2121 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2122 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
2123 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2125 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
2126 .access
= PL1_RW
| PL0_R
,
2127 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2128 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2129 .accessfn
= gt_vtimer_access
,
2130 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2132 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2133 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
2134 .access
= PL1_RW
| PL0_R
,
2136 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2137 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
2138 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2140 /* Secure timer -- this is actually restricted to only EL3
2141 * and configurably Secure-EL1 via the accessfn.
2143 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2144 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
2145 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
2146 .accessfn
= gt_stimer_access
,
2147 .readfn
= gt_sec_tval_read
,
2148 .writefn
= gt_sec_tval_write
,
2149 .resetfn
= gt_sec_timer_reset
,
2151 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
2152 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
2153 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2154 .accessfn
= gt_stimer_access
,
2155 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2157 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2159 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2160 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
2161 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2162 .accessfn
= gt_stimer_access
,
2163 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2164 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2171 /* In user-mode most of the generic timer registers are inaccessible
2172 * however modern kernels (4.12+) allow access to cntvct_el0
2175 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2177 /* Currently we have no support for QEMUTimer in linux-user so we
2178 * can't call gt_get_countervalue(env), instead we directly
2179 * call the lower level functions.
2181 return cpu_get_clock() / GTIMER_SCALE
;
2184 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2185 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2186 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2187 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
2188 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2189 .resetvalue
= NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
,
2191 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2192 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2193 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2194 .readfn
= gt_virt_cnt_read
,
2201 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2203 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2204 raw_write(env
, ri
, value
);
2205 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2206 raw_write(env
, ri
, value
& 0xfffff6ff);
2208 raw_write(env
, ri
, value
& 0xfffff1ff);
2212 #ifndef CONFIG_USER_ONLY
2213 /* get_phys_addr() isn't present for user-mode-only targets */
2215 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2219 /* The ATS12NSO* operations must trap to EL3 if executed in
2220 * Secure EL1 (which can only happen if EL3 is AArch64).
2221 * They are simply UNDEF if executed from NS EL1.
2222 * They function normally from EL2 or EL3.
2224 if (arm_current_el(env
) == 1) {
2225 if (arm_is_secure_below_el3(env
)) {
2226 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
2228 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2231 return CP_ACCESS_OK
;
2234 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
2235 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
2238 target_ulong page_size
;
2242 bool format64
= false;
2243 MemTxAttrs attrs
= {};
2244 ARMMMUFaultInfo fi
= {};
2245 ARMCacheAttrs cacheattrs
= {};
2247 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
2248 &prot
, &page_size
, &fi
, &cacheattrs
);
2252 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2255 * * TTBCR.EAE determines whether the result is returned using the
2256 * 32-bit or the 64-bit PAR format
2257 * * Instructions executed in Hyp mode always use the 64bit format
2259 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2260 * * The Non-secure TTBCR.EAE bit is set to 1
2261 * * The implementation includes EL2, and the value of HCR.VM is 1
2263 * ATS1Hx always uses the 64bit format (not supported yet).
2265 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
2267 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
2268 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
2269 format64
|= env
->cp15
.hcr_el2
& HCR_VM
;
2271 format64
|= arm_current_el(env
) == 2;
2277 /* Create a 64-bit PAR */
2278 par64
= (1 << 11); /* LPAE bit always set */
2280 par64
|= phys_addr
& ~0xfffULL
;
2281 if (!attrs
.secure
) {
2282 par64
|= (1 << 9); /* NS */
2284 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
2285 par64
|= cacheattrs
.shareability
<< 7; /* SH */
2287 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
2290 par64
|= (fsr
& 0x3f) << 1; /* FS */
2291 /* Note that S2WLK and FSTAGE are always zero, because we don't
2292 * implement virtualization and therefore there can't be a stage 2
2297 /* fsr is a DFSR/IFSR value for the short descriptor
2298 * translation table format (with WnR always clear).
2299 * Convert it to a 32-bit PAR.
2302 /* We do not set any attribute bits in the PAR */
2303 if (page_size
== (1 << 24)
2304 && arm_feature(env
, ARM_FEATURE_V7
)) {
2305 par64
= (phys_addr
& 0xff000000) | (1 << 1);
2307 par64
= phys_addr
& 0xfffff000;
2309 if (!attrs
.secure
) {
2310 par64
|= (1 << 9); /* NS */
2313 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
2315 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
2316 ((fsr
& 0xf) << 1) | 1;
2322 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2324 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2327 int el
= arm_current_el(env
);
2328 bool secure
= arm_is_secure_below_el3(env
);
2330 switch (ri
->opc2
& 6) {
2332 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2335 mmu_idx
= ARMMMUIdx_S1E3
;
2338 mmu_idx
= ARMMMUIdx_S1NSE1
;
2341 mmu_idx
= secure ? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2344 g_assert_not_reached();
2348 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2351 mmu_idx
= ARMMMUIdx_S1SE0
;
2354 mmu_idx
= ARMMMUIdx_S1NSE0
;
2357 mmu_idx
= secure ? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2360 g_assert_not_reached();
2364 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2365 mmu_idx
= ARMMMUIdx_S12NSE1
;
2368 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2369 mmu_idx
= ARMMMUIdx_S12NSE0
;
2372 g_assert_not_reached();
2375 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
2377 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2380 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2383 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2386 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_S2NS
);
2388 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2391 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2394 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
2395 return CP_ACCESS_TRAP
;
2397 return CP_ACCESS_OK
;
2400 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2403 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2405 int secure
= arm_is_secure_below_el3(env
);
2407 switch (ri
->opc2
& 6) {
2410 case 0: /* AT S1E1R, AT S1E1W */
2411 mmu_idx
= secure ? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2413 case 4: /* AT S1E2R, AT S1E2W */
2414 mmu_idx
= ARMMMUIdx_S1E2
;
2416 case 6: /* AT S1E3R, AT S1E3W */
2417 mmu_idx
= ARMMMUIdx_S1E3
;
2420 g_assert_not_reached();
2423 case 2: /* AT S1E0R, AT S1E0W */
2424 mmu_idx
= secure ? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2426 case 4: /* AT S12E1R, AT S12E1W */
2427 mmu_idx
= secure ? ARMMMUIdx_S1SE1
: ARMMMUIdx_S12NSE1
;
2429 case 6: /* AT S12E0R, AT S12E0W */
2430 mmu_idx
= secure ? ARMMMUIdx_S1SE0
: ARMMMUIdx_S12NSE0
;
2433 g_assert_not_reached();
2436 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
2440 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
2441 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
2442 .access
= PL1_RW
, .resetvalue
= 0,
2443 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
2444 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
2445 .writefn
= par_write
},
2446 #ifndef CONFIG_USER_ONLY
2447 /* This underdecoding is safe because the reginfo is NO_RAW. */
2448 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
2449 .access
= PL1_W
, .accessfn
= ats_access
,
2450 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
},
2455 /* Return basic MPU access permission bits. */
2456 static uint32_t simple_mpu_ap_bits(uint32_t val
)
2463 for (i
= 0; i
< 16; i
+= 2) {
2464 ret
|= (val
>> i
) & mask
;
2470 /* Pad basic MPU access permission bits to extended format. */
2471 static uint32_t extended_mpu_ap_bits(uint32_t val
)
2478 for (i
= 0; i
< 16; i
+= 2) {
2479 ret
|= (val
& mask
) << i
;
2485 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2488 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
2491 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2493 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
2496 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2499 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
2502 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2504 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
2507 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2509 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2515 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
2519 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2522 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2523 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2529 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
2530 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
2534 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2537 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2538 uint32_t nrgs
= cpu
->pmsav7_dregion
;
2540 if (value
>= nrgs
) {
2541 qemu_log_mask(LOG_GUEST_ERROR
,
2542 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2543 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
2547 raw_write(env
, ri
, value
);
2550 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
2551 /* Reset for all these registers is handled in arm_cpu_reset(),
2552 * because the PMSAv7 is also used by M-profile CPUs, which do
2553 * not register cpregs but still need the state to be reset.
2555 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
2556 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2557 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
2558 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2559 .resetfn
= arm_cp_reset_ignore
},
2560 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
2561 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2562 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
2563 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2564 .resetfn
= arm_cp_reset_ignore
},
2565 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
2566 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2567 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
2568 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2569 .resetfn
= arm_cp_reset_ignore
},
2570 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
2572 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
2573 .writefn
= pmsav7_rgnr_write
,
2574 .resetfn
= arm_cp_reset_ignore
},
2578 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
2579 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2580 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2581 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2582 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
2583 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2584 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2585 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2586 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
2587 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
2589 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2591 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
2593 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2595 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
2597 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
2598 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
2600 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
2601 /* Protection region base and size registers */
2602 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
2603 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2604 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
2605 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
2606 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2607 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
2608 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
2609 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2610 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
2611 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
2612 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2613 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
2614 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
2615 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2616 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
2617 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
2618 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2619 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
2620 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
2621 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2622 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
2623 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
2624 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2625 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
2629 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2632 TCR
*tcr
= raw_ptr(env
, ri
);
2633 int maskshift
= extract32(value
, 0, 3);
2635 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2636 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
2637 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2638 * using Long-desciptor translation table format */
2639 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
2640 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
2641 /* In an implementation that includes the Security Extensions
2642 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2643 * Short-descriptor translation table format.
2645 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
2651 /* Update the masks corresponding to the TCR bank being written
2652 * Note that we always calculate mask and base_mask, but
2653 * they are only used for short-descriptor tables (ie if EAE is 0);
2654 * for long-descriptor tables the TCR fields are used differently
2655 * and the mask and base_mask values are meaningless.
2657 tcr
->raw_tcr
= value
;
2658 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
2659 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
2662 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2665 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2667 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2668 /* With LPAE the TTBCR could result in a change of ASID
2669 * via the TTBCR.A1 bit, so do a TLB flush.
2671 tlb_flush(CPU(cpu
));
2673 vmsa_ttbcr_raw_write(env
, ri
, value
);
2676 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2678 TCR
*tcr
= raw_ptr(env
, ri
);
2680 /* Reset both the TCR as well as the masks corresponding to the bank of
2681 * the TCR being reset.
2685 tcr
->base_mask
= 0xffffc000u
;
2688 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2691 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2692 TCR
*tcr
= raw_ptr(env
, ri
);
2694 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2695 tlb_flush(CPU(cpu
));
2696 tcr
->raw_tcr
= value
;
2699 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2702 /* 64 bit accesses to the TTBRs can change the ASID and so we
2703 * must flush the TLB.
2705 if (cpreg_field_is_64bit(ri
)) {
2706 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2708 tlb_flush(CPU(cpu
));
2710 raw_write(env
, ri
, value
);
2713 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2716 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2717 CPUState
*cs
= CPU(cpu
);
2719 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
2720 if (raw_read(env
, ri
) != value
) {
2721 tlb_flush_by_mmuidx(cs
,
2722 ARMMMUIdxBit_S12NSE1
|
2723 ARMMMUIdxBit_S12NSE0
|
2725 raw_write(env
, ri
, value
);
2729 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
2730 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2731 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2732 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
2733 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
2734 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2735 .access
= PL1_RW
, .resetvalue
= 0,
2736 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
2737 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
2738 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
2739 .access
= PL1_RW
, .resetvalue
= 0,
2740 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
2741 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
2742 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
2743 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
2744 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
2749 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
2750 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
2751 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
2753 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
2754 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2755 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
2756 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2757 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
2758 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
2759 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2760 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
2761 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2762 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
2763 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
2764 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
2765 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2766 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
2767 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
2768 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
2769 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2770 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
2771 .raw_writefn
= vmsa_ttbcr_raw_write
,
2772 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
2773 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
2777 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2780 env
->cp15
.c15_ticonfig
= value
& 0xe7;
2781 /* The OS_TYPE bit in this register changes the reported CPUID! */
2782 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
2783 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
2786 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2789 env
->cp15
.c15_threadid
= value
& 0xffff;
2792 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2795 /* Wait-for-interrupt (deprecated) */
2796 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
2799 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2802 /* On OMAP there are registers indicating the max/min index of dcache lines
2803 * containing a dirty line; cache flush operations have to reset these.
2805 env
->cp15
.c15_i_max
= 0x000;
2806 env
->cp15
.c15_i_min
= 0xff0;
2809 static const ARMCPRegInfo omap_cp_reginfo
[] = {
2810 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
2811 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
2812 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
2814 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
2815 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2816 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
2818 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
2819 .writefn
= omap_ticonfig_write
},
2820 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
2822 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
2823 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
2824 .access
= PL1_RW
, .resetvalue
= 0xff0,
2825 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
2826 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
2828 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
2829 .writefn
= omap_threadid_write
},
2830 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
2831 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2832 .type
= ARM_CP_NO_RAW
,
2833 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
2834 /* TODO: Peripheral port remap register:
2835 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2836 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2839 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
2840 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
2841 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
2842 .writefn
= omap_cachemaint_write
},
2843 { .name
= "C9", .cp
= 15, .crn
= 9,
2844 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
2845 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
2849 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2852 env
->cp15
.c15_cpar
= value
& 0x3fff;
2855 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
2856 { .name
= "XSCALE_CPAR",
2857 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2858 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
2859 .writefn
= xscale_cpar_write
, },
2860 { .name
= "XSCALE_AUXCR",
2861 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
2862 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
2864 /* XScale specific cache-lockdown: since we have no cache we NOP these
2865 * and hope the guest does not really rely on cache behaviour.
2867 { .name
= "XSCALE_LOCK_ICACHE_LINE",
2868 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
2869 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2870 { .name
= "XSCALE_UNLOCK_ICACHE",
2871 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
2872 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2873 { .name
= "XSCALE_DCACHE_LOCK",
2874 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
2875 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2876 { .name
= "XSCALE_UNLOCK_DCACHE",
2877 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
2878 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2882 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
2883 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2884 * implementation of this implementation-defined space.
2885 * Ideally this should eventually disappear in favour of actually
2886 * implementing the correct behaviour for all cores.
2888 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
2889 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
2891 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
2896 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
2897 /* Cache status: RAZ because we have no cache so it's always clean */
2898 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
2899 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2904 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
2905 /* We never have a a block transfer operation in progress */
2906 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
2907 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2909 /* The cache ops themselves: these all NOP for QEMU */
2910 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
2911 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2912 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
2913 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2914 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
2915 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2916 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
2917 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2918 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
2919 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2920 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
2921 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2925 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
2926 /* The cache test-and-clean instructions always return (1 << 30)
2927 * to indicate that there are no dirty cache lines.
2929 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
2930 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2931 .resetvalue
= (1 << 30) },
2932 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
2933 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2934 .resetvalue
= (1 << 30) },
2938 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
2939 /* Ignore ReadBuffer accesses */
2940 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
2941 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
2942 .access
= PL1_RW
, .resetvalue
= 0,
2943 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
2947 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2949 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2950 unsigned int cur_el
= arm_current_el(env
);
2951 bool secure
= arm_is_secure(env
);
2953 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
2954 return env
->cp15
.vpidr_el2
;
2956 return raw_read(env
, ri
);
2959 static uint64_t mpidr_read_val(CPUARMState
*env
)
2961 ARMCPU
*cpu
= ARM_CPU(arm_env_get_cpu(env
));
2962 uint64_t mpidr
= cpu
->mp_affinity
;
2964 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
2965 mpidr
|= (1U << 31);
2966 /* Cores which are uniprocessor (non-coherent)
2967 * but still implement the MP extensions set
2968 * bit 30. (For instance, Cortex-R5).
2970 if (cpu
->mp_is_up
) {
2971 mpidr
|= (1u << 30);
2977 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2979 unsigned int cur_el
= arm_current_el(env
);
2980 bool secure
= arm_is_secure(env
);
2982 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
2983 return env
->cp15
.vmpidr_el2
;
2985 return mpidr_read_val(env
);
2988 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
2989 { .name
= "MPIDR", .state
= ARM_CP_STATE_BOTH
,
2990 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
2991 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},