1 #include "qemu/osdep.h"
5 #include "exec/gdbstub.h"
6 #include "exec/helper-proto.h"
7 #include "qemu/host-utils.h"
8 #include "sysemu/arch_init.h"
9 #include "sysemu/sysemu.h"
10 #include "qemu/bitops.h"
11 #include "qemu/crc32c.h"
12 #include "exec/exec-all.h"
13 #include "exec/cpu_ldst.h"
15 #include <zlib.h> /* For crc32 */
16 #include "exec/semihost.h"
17 #include "sysemu/kvm.h"
19 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
21 #ifndef CONFIG_USER_ONLY
22 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
23 int access_type
, ARMMMUIdx mmu_idx
,
24 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
25 target_ulong
*page_size
, uint32_t *fsr
,
28 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
29 int access_type
, ARMMMUIdx mmu_idx
,
30 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
31 target_ulong
*page_size_ptr
, uint32_t *fsr
,
34 /* Definitions for the PMCCNTR and PMCR registers */
40 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
44 /* VFP data registers are always little-endian. */
45 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ?
32 : 16;
47 stfq_le_p(buf
, env
->vfp
.regs
[reg
]);
50 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
51 /* Aliases for Q regs. */
54 stfq_le_p(buf
, env
->vfp
.regs
[(reg
- 32) * 2]);
55 stfq_le_p(buf
+ 8, env
->vfp
.regs
[(reg
- 32) * 2 + 1]);
59 switch (reg
- nregs
) {
60 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
61 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
62 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
67 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
71 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ?
32 : 16;
73 env
->vfp
.regs
[reg
] = ldfq_le_p(buf
);
76 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
79 env
->vfp
.regs
[(reg
- 32) * 2] = ldfq_le_p(buf
);
80 env
->vfp
.regs
[(reg
- 32) * 2 + 1] = ldfq_le_p(buf
+ 8);
84 switch (reg
- nregs
) {
85 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
86 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
87 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
92 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
96 /* 128 bit FP register */
97 stfq_le_p(buf
, env
->vfp
.regs
[reg
* 2]);
98 stfq_le_p(buf
+ 8, env
->vfp
.regs
[reg
* 2 + 1]);
102 stl_p(buf
, vfp_get_fpsr(env
));
106 stl_p(buf
, vfp_get_fpcr(env
));
113 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
117 /* 128 bit FP register */
118 env
->vfp
.regs
[reg
* 2] = ldfq_le_p(buf
);
119 env
->vfp
.regs
[reg
* 2 + 1] = ldfq_le_p(buf
+ 8);
123 vfp_set_fpsr(env
, ldl_p(buf
));
127 vfp_set_fpcr(env
, ldl_p(buf
));
134 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
136 assert(ri
->fieldoffset
);
137 if (cpreg_field_is_64bit(ri
)) {
138 return CPREG_FIELD64(env
, ri
);
140 return CPREG_FIELD32(env
, ri
);
144 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
147 assert(ri
->fieldoffset
);
148 if (cpreg_field_is_64bit(ri
)) {
149 CPREG_FIELD64(env
, ri
) = value
;
151 CPREG_FIELD32(env
, ri
) = value
;
155 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
157 return (char *)env
+ ri
->fieldoffset
;
160 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
162 /* Raw read of a coprocessor register (as needed for migration, etc). */
163 if (ri
->type
& ARM_CP_CONST
) {
164 return ri
->resetvalue
;
165 } else if (ri
->raw_readfn
) {
166 return ri
->raw_readfn(env
, ri
);
167 } else if (ri
->readfn
) {
168 return ri
->readfn(env
, ri
);
170 return raw_read(env
, ri
);
174 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
177 /* Raw write of a coprocessor register (as needed for migration, etc).
178 * Note that constant registers are treated as write-ignored; the
179 * caller should check for success by whether a readback gives the
182 if (ri
->type
& ARM_CP_CONST
) {
184 } else if (ri
->raw_writefn
) {
185 ri
->raw_writefn(env
, ri
, v
);
186 } else if (ri
->writefn
) {
187 ri
->writefn(env
, ri
, v
);
189 raw_write(env
, ri
, v
);
193 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
195 /* Return true if the regdef would cause an assertion if you called
196 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
197 * program bug for it not to have the NO_RAW flag).
198 * NB that returning false here doesn't necessarily mean that calling
199 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
200 * read/write access functions which are safe for raw use" from "has
201 * read/write access functions which have side effects but has forgotten
202 * to provide raw access functions".
203 * The tests here line up with the conditions in read/write_raw_cp_reg()
204 * and assertions in raw_read()/raw_write().
206 if ((ri
->type
& ARM_CP_CONST
) ||
208 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
214 bool write_cpustate_to_list(ARMCPU
*cpu
)
216 /* Write the coprocessor state from cpu->env to the (index,value) list. */
220 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
221 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
222 const ARMCPRegInfo
*ri
;
224 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
229 if (ri
->type
& ARM_CP_NO_RAW
) {
232 cpu
->cpreg_values
[i
] = read_raw_cp_reg(&cpu
->env
, ri
);
237 bool write_list_to_cpustate(ARMCPU
*cpu
)
242 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
243 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
244 uint64_t v
= cpu
->cpreg_values
[i
];
245 const ARMCPRegInfo
*ri
;
247 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
252 if (ri
->type
& ARM_CP_NO_RAW
) {
255 /* Write value and confirm it reads back as written
256 * (to catch read-only registers and partially read-only
257 * registers where the incoming migration value doesn't match)
259 write_raw_cp_reg(&cpu
->env
, ri
, v
);
260 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
267 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
269 ARMCPU
*cpu
= opaque
;
271 const ARMCPRegInfo
*ri
;
273 regidx
= *(uint32_t *)key
;
274 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
276 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
277 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
278 /* The value array need not be initialized at this point */
279 cpu
->cpreg_array_len
++;
283 static void count_cpreg(gpointer key
, gpointer opaque
)
285 ARMCPU
*cpu
= opaque
;
287 const ARMCPRegInfo
*ri
;
289 regidx
= *(uint32_t *)key
;
290 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
292 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
293 cpu
->cpreg_array_len
++;
297 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
299 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
300 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
311 void init_cpreg_list(ARMCPU
*cpu
)
313 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
314 * Note that we require cpreg_tuples[] to be sorted by key ID.
319 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
320 keys
= g_list_sort(keys
, cpreg_key_compare
);
322 cpu
->cpreg_array_len
= 0;
324 g_list_foreach(keys
, count_cpreg
, cpu
);
326 arraylen
= cpu
->cpreg_array_len
;
327 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
328 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
329 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
330 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
331 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
332 cpu
->cpreg_array_len
= 0;
334 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
336 assert(cpu
->cpreg_array_len
== arraylen
);
342 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
343 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
345 * access_el3_aa32ns: Used to check AArch32 register views.
346 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
348 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
349 const ARMCPRegInfo
*ri
,
352 bool secure
= arm_is_secure_below_el3(env
);
354 assert(!arm_el_is_aa64(env
, 3));
356 return CP_ACCESS_TRAP_UNCATEGORIZED
;
361 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
362 const ARMCPRegInfo
*ri
,
365 if (!arm_el_is_aa64(env
, 3)) {
366 return access_el3_aa32ns(env
, ri
, isread
);
371 /* Some secure-only AArch32 registers trap to EL3 if used from
372 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
373 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
374 * We assume that the .access field is set to PL1_RW.
376 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
377 const ARMCPRegInfo
*ri
,
380 if (arm_current_el(env
) == 3) {
383 if (arm_is_secure_below_el3(env
)) {
384 return CP_ACCESS_TRAP_EL3
;
386 /* This will be EL1 NS and EL2 NS, which just UNDEF */
387 return CP_ACCESS_TRAP_UNCATEGORIZED
;
390 /* Check for traps to "powerdown debug" registers, which are controlled
393 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
396 int el
= arm_current_el(env
);
398 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDOSA
)
399 && !arm_is_secure_below_el3(env
)) {
400 return CP_ACCESS_TRAP_EL2
;
402 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
403 return CP_ACCESS_TRAP_EL3
;
408 /* Check for traps to "debug ROM" registers, which are controlled
409 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
411 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
414 int el
= arm_current_el(env
);
416 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDRA
)
417 && !arm_is_secure_below_el3(env
)) {
418 return CP_ACCESS_TRAP_EL2
;
420 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
421 return CP_ACCESS_TRAP_EL3
;
426 /* Check for traps to general debug registers, which are controlled
427 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
429 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
432 int el
= arm_current_el(env
);
434 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDA
)
435 && !arm_is_secure_below_el3(env
)) {
436 return CP_ACCESS_TRAP_EL2
;
438 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
439 return CP_ACCESS_TRAP_EL3
;
444 /* Check for traps to performance monitor registers, which are controlled
445 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
447 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
450 int el
= arm_current_el(env
);
452 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
453 && !arm_is_secure_below_el3(env
)) {
454 return CP_ACCESS_TRAP_EL2
;
456 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
457 return CP_ACCESS_TRAP_EL3
;
462 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
464 ARMCPU
*cpu
= arm_env_get_cpu(env
);
466 raw_write(env
, ri
, value
);
467 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
470 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
472 ARMCPU
*cpu
= arm_env_get_cpu(env
);
474 if (raw_read(env
, ri
) != value
) {
475 /* Unlike real hardware the qemu TLB uses virtual addresses,
476 * not modified virtual addresses, so this causes a TLB flush.
479 raw_write(env
, ri
, value
);
483 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
486 ARMCPU
*cpu
= arm_env_get_cpu(env
);
488 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_MPU
)
489 && !extended_addresses_enabled(env
)) {
490 /* For VMSA (when not using the LPAE long descriptor page table
491 * format) this register includes the ASID, so do a TLB flush.
492 * For PMSA it is purely a process ID and no action is needed.
496 raw_write(env
, ri
, value
);
499 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
502 /* Invalidate all (TLBIALL) */
503 ARMCPU
*cpu
= arm_env_get_cpu(env
);
508 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
511 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
512 ARMCPU
*cpu
= arm_env_get_cpu(env
);
514 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
517 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
520 /* Invalidate by ASID (TLBIASID) */
521 ARMCPU
*cpu
= arm_env_get_cpu(env
);
526 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
529 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
530 ARMCPU
*cpu
= arm_env_get_cpu(env
);
532 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
535 /* IS variants of TLB operations must affect all cores */
536 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
539 CPUState
*cs
= ENV_GET_CPU(env
);
541 tlb_flush_all_cpus_synced(cs
);
544 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
547 CPUState
*cs
= ENV_GET_CPU(env
);
549 tlb_flush_all_cpus_synced(cs
);
552 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
555 CPUState
*cs
= ENV_GET_CPU(env
);
557 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
560 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
563 CPUState
*cs
= ENV_GET_CPU(env
);
565 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
568 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
571 CPUState
*cs
= ENV_GET_CPU(env
);
573 tlb_flush_by_mmuidx(cs
,
574 (1 << ARMMMUIdx_S12NSE1
) |
575 (1 << ARMMMUIdx_S12NSE0
) |
576 (1 << ARMMMUIdx_S2NS
));
579 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
582 CPUState
*cs
= ENV_GET_CPU(env
);
584 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
585 (1 << ARMMMUIdx_S12NSE1
) |
586 (1 << ARMMMUIdx_S12NSE0
) |
587 (1 << ARMMMUIdx_S2NS
));
590 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
593 /* Invalidate by IPA. This has to invalidate any structures that
594 * contain only stage 2 translation information, but does not need
595 * to apply to structures that contain combined stage 1 and stage 2
596 * translation information.
597 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
599 CPUState
*cs
= ENV_GET_CPU(env
);
602 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
606 pageaddr
= sextract64(value
<< 12, 0, 40);
608 tlb_flush_page_by_mmuidx(cs
, pageaddr
, (1 << ARMMMUIdx_S2NS
));
611 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
614 CPUState
*cs
= ENV_GET_CPU(env
);
617 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
621 pageaddr
= sextract64(value
<< 12, 0, 40);
623 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
624 (1 << ARMMMUIdx_S2NS
));
627 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
630 CPUState
*cs
= ENV_GET_CPU(env
);
632 tlb_flush_by_mmuidx(cs
, (1 << ARMMMUIdx_S1E2
));
635 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
638 CPUState
*cs
= ENV_GET_CPU(env
);
640 tlb_flush_by_mmuidx_all_cpus_synced(cs
, (1 << ARMMMUIdx_S1E2
));
643 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
646 CPUState
*cs
= ENV_GET_CPU(env
);
647 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
649 tlb_flush_page_by_mmuidx(cs
, pageaddr
, (1 << ARMMMUIdx_S1E2
));
652 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
655 CPUState
*cs
= ENV_GET_CPU(env
);
656 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
658 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
659 (1 << ARMMMUIdx_S1E2
));
662 static const ARMCPRegInfo cp_reginfo
[] = {
663 /* Define the secure and non-secure FCSE identifier CP registers
664 * separately because there is no secure bank in V8 (no _EL3). This allows
665 * the secure register to be properly reset and migrated. There is also no
666 * v8 EL1 version of the register so the non-secure instance stands alone.
668 { .name
= "FCSEIDR(NS)",
669 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
670 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
671 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
672 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
673 { .name
= "FCSEIDR(S)",
674 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
675 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
676 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
677 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
678 /* Define the secure and non-secure context identifier CP registers
679 * separately because there is no secure bank in V8 (no _EL3). This allows
680 * the secure register to be properly reset and migrated. In the
681 * non-secure case, the 32-bit register will have reset and migration
682 * disabled during registration as it is handled by the 64-bit instance.
684 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
685 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
686 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
687 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
688 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
689 { .name
= "CONTEXTIDR(S)", .state
= ARM_CP_STATE_AA32
,
690 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
691 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
692 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
693 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
697 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
698 /* NB: Some of these registers exist in v8 but with more precise
699 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
701 /* MMU Domain access control / MPU write buffer control */
703 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
704 .access
= PL1_RW
, .resetvalue
= 0,
705 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
706 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
707 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
708 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
709 * For v6 and v5, these mappings are overly broad.
711 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
712 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
713 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
714 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
715 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
716 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
717 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
718 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
719 /* Cache maintenance ops; some of this space may be overridden later. */
720 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
721 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
722 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
726 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
727 /* Not all pre-v6 cores implemented this WFI, so this is slightly
730 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
731 .access
= PL1_W
, .type
= ARM_CP_WFI
},
735 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
736 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
737 * is UNPREDICTABLE; we choose to NOP as most implementations do).
739 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
740 .access
= PL1_W
, .type
= ARM_CP_WFI
},
741 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
742 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
743 * OMAPCP will override this space.
745 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
746 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
748 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
749 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
751 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
752 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
753 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
755 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
756 * implementing it as RAZ means the "debug architecture version" bits
757 * will read as a reserved value, which should cause Linux to not try
758 * to use the debug hardware.
760 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
761 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
762 /* MMU TLB control. Note that the wildcarding means we cover not just
763 * the unified TLB ops but also the dside/iside/inner-shareable variants.
765 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
766 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
767 .type
= ARM_CP_NO_RAW
},
768 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
769 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
770 .type
= ARM_CP_NO_RAW
},
771 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
772 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
773 .type
= ARM_CP_NO_RAW
},
774 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
775 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
776 .type
= ARM_CP_NO_RAW
},
777 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
778 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
779 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
780 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
784 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
789 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
790 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
791 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
792 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
793 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
795 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
796 /* VFP coprocessor: cp10 & cp11 [23:20] */
797 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
799 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
800 /* ASEDIS [31] bit is RAO/WI */
804 /* VFPv3 and upwards with NEON implement 32 double precision
805 * registers (D0-D31).
807 if (!arm_feature(env
, ARM_FEATURE_NEON
) ||
808 !arm_feature(env
, ARM_FEATURE_VFP3
)) {
809 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
815 env
->cp15
.cpacr_el1
= value
;
818 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
821 if (arm_feature(env
, ARM_FEATURE_V8
)) {
822 /* Check if CPACR accesses are to be trapped to EL2 */
823 if (arm_current_el(env
) == 1 &&
824 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
825 return CP_ACCESS_TRAP_EL2
;
826 /* Check if CPACR accesses are to be trapped to EL3 */
827 } else if (arm_current_el(env
) < 3 &&
828 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
829 return CP_ACCESS_TRAP_EL3
;
836 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
839 /* Check if CPTR accesses are set to trap to EL3 */
840 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
841 return CP_ACCESS_TRAP_EL3
;
847 static const ARMCPRegInfo v6_cp_reginfo
[] = {
848 /* prefetch by MVA in v6, NOP in v7 */
849 { .name
= "MVA_prefetch",
850 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
851 .access
= PL1_W
, .type
= ARM_CP_NOP
},
852 /* We need to break the TB after ISB to execute self-modifying code
853 * correctly and also to take any pending interrupts immediately.
854 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
856 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
857 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
858 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
859 .access
= PL0_W
, .type
= ARM_CP_NOP
},
860 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
861 .access
= PL0_W
, .type
= ARM_CP_NOP
},
862 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
864 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
865 offsetof(CPUARMState
, cp15
.ifar_ns
) },
867 /* Watchpoint Fault Address Register : should actually only be present
868 * for 1136, 1176, 11MPCore.
870 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
871 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
872 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
873 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
874 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
875 .resetvalue
= 0, .writefn
= cpacr_write
},
879 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
882 /* Performance monitor registers user accessibility is controlled
883 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
884 * trapping to EL2 or EL3 for other accesses.
886 int el
= arm_current_el(env
);
888 if (el
== 0 && !env
->cp15
.c9_pmuserenr
) {
889 return CP_ACCESS_TRAP
;
891 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
892 && !arm_is_secure_below_el3(env
)) {
893 return CP_ACCESS_TRAP_EL2
;
895 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
896 return CP_ACCESS_TRAP_EL3
;
902 #ifndef CONFIG_USER_ONLY
904 static inline bool arm_ccnt_enabled(CPUARMState
*env
)
906 /* This does not support checking PMCCFILTR_EL0 register */
908 if (!(env
->cp15
.c9_pmcr
& PMCRE
)) {
915 void pmccntr_sync(CPUARMState
*env
)
919 temp_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
920 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
922 if (env
->cp15
.c9_pmcr
& PMCRD
) {
923 /* Increment once every 64 processor clock cycles */
927 if (arm_ccnt_enabled(env
)) {
928 env
->cp15
.c15_ccnt
= temp_ticks
- env
->cp15
.c15_ccnt
;
932 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
938 /* The counter has been reset */
939 env
->cp15
.c15_ccnt
= 0;
942 /* only the DP, X, D and E bits are writable */
943 env
->cp15
.c9_pmcr
&= ~0x39;
944 env
->cp15
.c9_pmcr
|= (value
& 0x39);
949 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
951 uint64_t total_ticks
;
953 if (!arm_ccnt_enabled(env
)) {
954 /* Counter is disabled, do not change value */
955 return env
->cp15
.c15_ccnt
;
958 total_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
959 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
961 if (env
->cp15
.c9_pmcr
& PMCRD
) {
962 /* Increment once every 64 processor clock cycles */
965 return total_ticks
- env
->cp15
.c15_ccnt
;
968 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
971 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
972 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
973 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
976 env
->cp15
.c9_pmselr
= value
& 0x1f;
979 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
982 uint64_t total_ticks
;
984 if (!arm_ccnt_enabled(env
)) {
985 /* Counter is disabled, set the absolute value */
986 env
->cp15
.c15_ccnt
= value
;
990 total_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
991 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
993 if (env
->cp15
.c9_pmcr
& PMCRD
) {
994 /* Increment once every 64 processor clock cycles */
997 env
->cp15
.c15_ccnt
= total_ticks
- value
;
1000 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1003 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1005 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1008 #else /* CONFIG_USER_ONLY */
1010 void pmccntr_sync(CPUARMState
*env
)
1016 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1020 env
->cp15
.pmccfiltr_el0
= value
& 0x7E000000;
1024 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1028 env
->cp15
.c9_pmcnten
|= value
;
1031 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1035 env
->cp15
.c9_pmcnten
&= ~value
;
1038 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1041 env
->cp15
.c9_pmovsr
&= ~value
;
1044 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1047 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1048 * PMSELR value is equal to or greater than the number of implemented
1049 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1051 if (env
->cp15
.c9_pmselr
== 0x1f) {
1052 pmccfiltr_write(env
, ri
, value
);
1056 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1058 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1059 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write().
1061 if (env
->cp15
.c9_pmselr
== 0x1f) {
1062 return env
->cp15
.pmccfiltr_el0
;
1068 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1071 env
->cp15
.c9_pmuserenr
= value
& 1;
1074 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1077 /* We have no event counters so only the C bit can be changed */
1079 env
->cp15
.c9_pminten
|= value
;
1082 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1086 env
->cp15
.c9_pminten
&= ~value
;
1089 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1092 /* Note that even though the AArch64 view of this register has bits
1093 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1094 * architectural requirements for bits which are RES0 only in some
1095 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1096 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1098 raw_write(env
, ri
, value
& ~0x1FULL
);
1101 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1103 /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1104 * For bits that vary between AArch32/64, code needs to check the
1105 * current execution mode before directly using the feature bit.
1107 uint32_t valid_mask
= SCR_AARCH64_MASK
| SCR_AARCH32_MASK
;
1109 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1110 valid_mask
&= ~SCR_HCE
;
1112 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1113 * supported if EL2 exists. The bit is UNK/SBZP when
1114 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1115 * when EL2 is unavailable.
1116 * On ARMv8, this bit is always available.
1118 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1119 !arm_feature(env
, ARM_FEATURE_V8
)) {
1120 valid_mask
&= ~SCR_SMD
;
1124 /* Clear all-context RES0 bits. */
1125 value
&= valid_mask
;
1126 raw_write(env
, ri
, value
);
1129 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1131 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1133 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1136 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1137 ri
->secure
& ARM_CP_SECSTATE_S
);
1139 return cpu
->ccsidr
[index
];
1142 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1145 raw_write(env
, ri
, value
& 0xf);
1148 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1150 CPUState
*cs
= ENV_GET_CPU(env
);
1153 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1156 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1159 /* External aborts are not possible in QEMU so A bit is always clear */
1163 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1164 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1165 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1166 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1167 /* Performance monitors are implementation defined in v7,
1168 * but with an ARM recommended set of registers, which we
1169 * follow (although we don't actually implement any counters)
1171 * Performance registers fall into three categories:
1172 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1173 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1174 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1175 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1176 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1178 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
1179 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1180 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1181 .writefn
= pmcntenset_write
,
1182 .accessfn
= pmreg_access
,
1183 .raw_writefn
= raw_write
},
1184 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
1185 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
1186 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1187 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
1188 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
1189 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
1191 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1192 .accessfn
= pmreg_access
,
1193 .writefn
= pmcntenclr_write
,
1194 .type
= ARM_CP_ALIAS
},
1195 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1196 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
1197 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1198 .type
= ARM_CP_ALIAS
,
1199 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
1200 .writefn
= pmcntenclr_write
},
1201 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
1202 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1203 .accessfn
= pmreg_access
,
1204 .writefn
= pmovsr_write
,
1205 .raw_writefn
= raw_write
},
1206 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1207 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
1208 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1209 .type
= ARM_CP_ALIAS
,
1210 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1211 .writefn
= pmovsr_write
,
1212 .raw_writefn
= raw_write
},
1213 /* Unimplemented so WI. */
1214 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
1215 .access
= PL0_W
, .accessfn
= pmreg_access
, .type
= ARM_CP_NOP
},
1216 #ifndef CONFIG_USER_ONLY
1217 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
1218 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1219 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
1220 .accessfn
= pmreg_access
, .writefn
= pmselr_write
,
1221 .raw_writefn
= raw_write
},
1222 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
1223 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
1224 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1225 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
1226 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
1227 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
1228 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_IO
,
1229 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
1230 .accessfn
= pmreg_access
},
1231 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
1232 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
1233 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1235 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
, },
1237 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
1238 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
1239 .writefn
= pmccfiltr_write
,
1240 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1242 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
1244 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
1245 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1246 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1247 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
1248 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
1249 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1250 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1251 /* Unimplemented, RAZ/WI. */
1252 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
1253 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
1254 .accessfn
= pmreg_access
},
1255 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
1256 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
1257 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
1259 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1260 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
1261 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
1262 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1263 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
1265 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1266 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
1267 .access
= PL1_RW
, .accessfn
= access_tpm
,
1268 .type
= ARM_CP_ALIAS
,
1269 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
1271 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
1272 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
1273 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
1274 .access
= PL1_RW
, .accessfn
= access_tpm
,
1276 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1277 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
1278 .resetvalue
= 0x0 },
1279 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
1280 .access
= PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1281 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1282 .writefn
= pmintenclr_write
, },
1283 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
1284 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
1285 .access
= PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1286 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1287 .writefn
= pmintenclr_write
},
1288 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
1289 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
1290 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
1291 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
1292 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
1293 .access
= PL1_RW
, .writefn
= csselr_write
, .resetvalue
= 0,
1294 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
1295 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
1296 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1297 * just RAZ for all cores:
1299 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
1300 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
1301 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1302 /* Auxiliary fault status registers: these also are IMPDEF, and we
1303 * choose to RAZ/WI for all cores.
1305 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
1306 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
1307 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1308 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
1309 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
1310 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1311 /* MAIR can just read-as-written because we don't implement caches
1312 * and so don't need to care about memory attributes.
1314 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
1315 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
1316 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
1318 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
1319 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
1320 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
1322 /* For non-long-descriptor page tables these are PRRR and NMRR;
1323 * regardless they still act as reads-as-written for QEMU.
1325 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1326 * allows them to assign the correct fieldoffset based on the endianness
1327 * handled in the field definitions.
1329 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
1330 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
1331 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
1332 offsetof(CPUARMState
, cp15
.mair0_ns
) },
1333 .resetfn
= arm_cp_reset_ignore
},
1334 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
1335 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
1336 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
1337 offsetof(CPUARMState
, cp15
.mair1_ns
) },
1338 .resetfn
= arm_cp_reset_ignore
},
1339 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
1340 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
1341 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
1342 /* 32 bit ITLB invalidates */
1343 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
1344 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1345 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
1346 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1347 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
1348 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1349 /* 32 bit DTLB invalidates */
1350 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
1351 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1352 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
1353 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1354 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
1355 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1356 /* 32 bit TLB invalidates */
1357 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
1358 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1359 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
1360 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1361 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
1362 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1363 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
1364 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
1368 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
1369 /* 32 bit TLB invalidates, Inner Shareable */
1370 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
1371 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_is_write
},
1372 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
1373 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
1374 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
1375 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1376 .writefn
= tlbiasid_is_write
},
1377 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
1378 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1379 .writefn
= tlbimvaa_is_write
},
1383 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1390 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1393 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
1394 return CP_ACCESS_TRAP
;
1396 return CP_ACCESS_OK
;
1399 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
1400 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
1401 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
1403 .writefn
= teecr_write
},
1404 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
1405 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
1406 .accessfn
= teehbr_access
, .resetvalue
= 0 },
1410 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
1411 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
1412 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
1414 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
1415 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
1417 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
1418 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
1419 .resetfn
= arm_cp_reset_ignore
},
1420 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
1421 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
1422 .access
= PL0_R
|PL1_W
,
1423 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
1425 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
1426 .access
= PL0_R
|PL1_W
,
1427 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
1428 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
1429 .resetfn
= arm_cp_reset_ignore
},
1430 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
1431 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
1433 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
1434 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
1436 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
1437 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
1442 #ifndef CONFIG_USER_ONLY
1444 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1447 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1448 * Writable only at the highest implemented exception level.
1450 int el
= arm_current_el(env
);
1454 if (!extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
1455 return CP_ACCESS_TRAP
;
1459 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
1460 arm_is_secure_below_el3(env
)) {
1461 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1462 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1470 if (!isread
&& el
< arm_highest_el(env
)) {
1471 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1474 return CP_ACCESS_OK
;
1477 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
1480 unsigned int cur_el
= arm_current_el(env
);
1481 bool secure
= arm_is_secure(env
);
1483 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1485 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
1486 return CP_ACCESS_TRAP
;
1489 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1490 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1491 !extract32(env
->cp15
.cnthctl_el2
, 0, 1)) {
1492 return CP_ACCESS_TRAP_EL2
;
1494 return CP_ACCESS_OK
;
1497 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
1500 unsigned int cur_el
= arm_current_el(env
);
1501 bool secure
= arm_is_secure(env
);
1503 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1504 * EL0[PV]TEN is zero.
1507 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
1508 return CP_ACCESS_TRAP
;
1511 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1512 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1513 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
1514 return CP_ACCESS_TRAP_EL2
;
1516 return CP_ACCESS_OK
;
1519 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
1520 const ARMCPRegInfo
*ri
,
1523 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
1526 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
1527 const ARMCPRegInfo
*ri
,
1530 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
1533 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1536 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
1539 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1542 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
1545 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
1546 const ARMCPRegInfo
*ri
,
1549 /* The AArch64 register view of the secure physical timer is
1550 * always accessible from EL3, and configurably accessible from
1553 switch (arm_current_el(env
)) {
1555 if (!arm_is_secure(env
)) {
1556 return CP_ACCESS_TRAP
;
1558 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
1559 return CP_ACCESS_TRAP_EL3
;
1561 return CP_ACCESS_OK
;
1564 return CP_ACCESS_TRAP
;
1566 return CP_ACCESS_OK
;
1568 g_assert_not_reached();
1572 static uint64_t gt_get_countervalue(CPUARMState
*env
)
1574 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
1577 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
1579 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
1582 /* Timer enabled: calculate and set current ISTATUS, irq, and
1583 * reset timer to when ISTATUS next has to change
1585 uint64_t offset
= timeridx
== GTIMER_VIRT ?
1586 cpu
->env
.cp15
.cntvoff_el2
: 0;
1587 uint64_t count
= gt_get_countervalue(&cpu
->env
);
1588 /* Note that this must be unsigned 64 bit arithmetic: */
1589 int istatus
= count
- offset
>= gt
->cval
;
1593 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
1595 irqstate
= (istatus
&& !(gt
->ctl
& 2));
1596 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1599 /* Next transition is when count rolls back over to zero */
1600 nexttick
= UINT64_MAX
;
1602 /* Next transition is when we hit cval */
1603 nexttick
= gt
->cval
+ offset
;
1605 /* Note that the desired next expiry time might be beyond the
1606 * signed-64-bit range of a QEMUTimer -- in this case we just
1607 * set the timer for as far in the future as possible. When the
1608 * timer expires we will reset the timer for any remaining period.
1610 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
1611 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
1613 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
1614 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
1616 /* Timer disabled: ISTATUS and timer output always clear */
1618 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
1619 timer_del(cpu
->gt_timer
[timeridx
]);
1620 trace_arm_gt_recalc_disabled(timeridx
);
1624 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1627 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1629 timer_del(cpu
->gt_timer
[timeridx
]);
1632 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1634 return gt_get_countervalue(env
);
1637 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1639 return gt_get_countervalue(env
) - env
->cp15
.cntvoff_el2
;
1642 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1646 trace_arm_gt_cval_write(timeridx
, value
);
1647 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
1648 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1651 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1654 uint64_t offset
= timeridx
== GTIMER_VIRT ? env
->cp15
.cntvoff_el2
: 0;
1656 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
1657 (gt_get_countervalue(env
) - offset
));
1660 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1664 uint64_t offset
= timeridx
== GTIMER_VIRT ? env
->cp15
.cntvoff_el2
: 0;
1666 trace_arm_gt_tval_write(timeridx
, value
);
1667 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
1668 sextract64(value
, 0, 32);
1669 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1672 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1676 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1677 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
1679 trace_arm_gt_ctl_write(timeridx
, value
);
1680 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
1681 if ((oldval
^ value
) & 1) {
1682 /* Enable toggled */
1683 gt_recalc_timer(cpu
, timeridx
);
1684 } else if ((oldval
^ value
) & 2) {
1685 /* IMASK toggled: don't need to recalculate,
1686 * just set the interrupt line based on ISTATUS
1688 int irqstate
= (oldval
& 4) && !(value
& 2);
1690 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
1691 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1695 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1697 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
1700 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1703 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
1706 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1708 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
1711 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1714 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
1717 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1720 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
1723 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1725 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
1728 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1731 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
1734 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1736 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
1739 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1742 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
1745 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1748 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
1751 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1754 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1756 trace_arm_gt_cntvoff_write(value
);
1757 raw_write(env
, ri
, value
);
1758 gt_recalc_timer(cpu
, GTIMER_VIRT
);
1761 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1763 gt_timer_reset(env
, ri
, GTIMER_HYP
);
1766 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1769 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
1772 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1774 return gt_tval_read(env
, ri
, GTIMER_HYP
);
1777 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1780 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
1783 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1786 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
1789 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1791 gt_timer_reset(env
, ri
, GTIMER_SEC
);
1794 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1797 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
1800 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1802 return gt_tval_read(env
, ri
, GTIMER_SEC
);
1805 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1808 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
1811 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1814 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
1817 void arm_gt_ptimer_cb(void *opaque
)
1819 ARMCPU
*cpu
= opaque
;
1821 gt_recalc_timer(cpu
, GTIMER_PHYS
);
1824 void arm_gt_vtimer_cb(void *opaque
)
1826 ARMCPU
*cpu
= opaque
;
1828 gt_recalc_timer(cpu
, GTIMER_VIRT
);
1831 void arm_gt_htimer_cb(void *opaque
)
1833 ARMCPU
*cpu
= opaque
;
1835 gt_recalc_timer(cpu
, GTIMER_HYP
);
1838 void arm_gt_stimer_cb(void *opaque
)
1840 ARMCPU
*cpu
= opaque
;
1842 gt_recalc_timer(cpu
, GTIMER_SEC
);
1845 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
1846 /* Note that CNTFRQ is purely reads-as-written for the benefit
1847 * of software; writing it doesn't actually change the timer frequency.
1848 * Our reset value matches the fixed frequency we implement the timer at.
1850 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
1851 .type
= ARM_CP_ALIAS
,
1852 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
1853 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
1855 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
1856 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
1857 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
1858 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
1859 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
1861 /* overall control: mostly access permissions */
1862 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
1863 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
1865 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
1868 /* per-timer control */
1869 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
1870 .secure
= ARM_CP_SECSTATE_NS
,
1871 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
1872 .accessfn
= gt_ptimer_access
,
1873 .fieldoffset
= offsetoflow32(CPUARMState
,
1874 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
1875 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
1877 { .name
= "CNTP_CTL(S)",
1878 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
1879 .secure
= ARM_CP_SECSTATE_S
,
1880 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
1881 .accessfn
= gt_ptimer_access
,
1882 .fieldoffset
= offsetoflow32(CPUARMState
,
1883 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
1884 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
1886 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
1887 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
1888 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1889 .accessfn
= gt_ptimer_access
,
1890 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
1892 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
1894 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
1895 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
1896 .accessfn
= gt_vtimer_access
,
1897 .fieldoffset
= offsetoflow32(CPUARMState
,
1898 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
1899 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
1901 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
1902 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
1903 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1904 .accessfn
= gt_vtimer_access
,
1905 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
1907 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
1909 /* TimerValue views: a 32 bit downcounting view of the underlying state */
1910 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
1911 .secure
= ARM_CP_SECSTATE_NS
,
1912 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1913 .accessfn
= gt_ptimer_access
,
1914 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
1916 { .name
= "CNTP_TVAL(S)",
1917 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
1918 .secure
= ARM_CP_SECSTATE_S
,
1919 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1920 .accessfn
= gt_ptimer_access
,
1921 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
1923 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1924 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
1925 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1926 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
1927 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
1929 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
1930 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1931 .accessfn
= gt_vtimer_access
,
1932 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
1934 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1935 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
1936 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1937 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
1938 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
1940 /* The counter itself */
1941 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
1942 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
1943 .accessfn
= gt_pct_access
,
1944 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
1946 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
1947 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
1948 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1949 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
1951 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
1952 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
1953 .accessfn
= gt_vct_access
,
1954 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
1956 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
1957 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
1958 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1959 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
1961 /* Comparison value, indicating when the timer goes off */
1962 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
1963 .secure
= ARM_CP_SECSTATE_NS
,
1964 .access
= PL1_RW
| PL0_R
,
1965 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
1966 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
1967 .accessfn
= gt_ptimer_access
,
1968 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
1970 { .name
= "CNTP_CVAL(S)", .cp
= 15, .crm
= 14, .opc1
= 2,
1971 .secure
= ARM_CP_SECSTATE_S
,
1972 .access
= PL1_RW
| PL0_R
,
1973 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
1974 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
1975 .accessfn
= gt_ptimer_access
,
1976 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
1978 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1979 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
1980 .access
= PL1_RW
| PL0_R
,
1982 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
1983 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
1984 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
1986 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
1987 .access
= PL1_RW
| PL0_R
,
1988 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
1989 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
1990 .accessfn
= gt_vtimer_access
,
1991 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
1993 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1994 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
1995 .access
= PL1_RW
| PL0_R
,
1997 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
1998 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
1999 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2001 /* Secure timer -- this is actually restricted to only EL3
2002 * and configurably Secure-EL1 via the accessfn.
2004 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2005 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
2006 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
2007 .accessfn
= gt_stimer_access
,
2008 .readfn
= gt_sec_tval_read
,
2009 .writefn
= gt_sec_tval_write
,
2010 .resetfn
= gt_sec_timer_reset
,
2012 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
2013 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
2014 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2015 .accessfn
= gt_stimer_access
,
2016 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2018 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2020 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2021 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
2022 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2023 .accessfn
= gt_stimer_access
,
2024 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2025 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2031 /* In user-mode none of the generic timer registers are accessible,
2032 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
2033 * so instead just don't register any of them.
2035 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2041 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2043 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2044 raw_write(env
, ri
, value
);
2045 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2046 raw_write(env
, ri
, value
& 0xfffff6ff);
2048 raw_write(env
, ri
, value
& 0xfffff1ff);
2052 #ifndef CONFIG_USER_ONLY
2053 /* get_phys_addr() isn't present for user-mode-only targets */
2055 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2059 /* The ATS12NSO* operations must trap to EL3 if executed in
2060 * Secure EL1 (which can only happen if EL3 is AArch64).
2061 * They are simply UNDEF if executed from NS EL1.
2062 * They function normally from EL2 or EL3.
2064 if (arm_current_el(env
) == 1) {
2065 if (arm_is_secure_below_el3(env
)) {
2066 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
2068 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2071 return CP_ACCESS_OK
;
2074 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
2075 int access_type
, ARMMMUIdx mmu_idx
)
2078 target_ulong page_size
;
2083 MemTxAttrs attrs
= {};
2084 ARMMMUFaultInfo fi
= {};
2086 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
,
2087 &phys_addr
, &attrs
, &prot
, &page_size
, &fsr
, &fi
);
2088 if (extended_addresses_enabled(env
)) {
2089 /* fsr is a DFSR/IFSR value for the long descriptor
2090 * translation table format, but with WnR always clear.
2091 * Convert it to a 64-bit PAR.
2093 par64
= (1 << 11); /* LPAE bit always set */
2095 par64
|= phys_addr
& ~0xfffULL
;
2096 if (!attrs
.secure
) {
2097 par64
|= (1 << 9); /* NS */
2099 /* We don't set the ATTR or SH fields in the PAR. */
2102 par64
|= (fsr
& 0x3f) << 1; /* FS */
2103 /* Note that S2WLK and FSTAGE are always zero, because we don't
2104 * implement virtualization and therefore there can't be a stage 2
2109 /* fsr is a DFSR/IFSR value for the short descriptor
2110 * translation table format (with WnR always clear).
2111 * Convert it to a 32-bit PAR.
2114 /* We do not set any attribute bits in the PAR */
2115 if (page_size
== (1 << 24)
2116 && arm_feature(env
, ARM_FEATURE_V7
)) {
2117 par64
= (phys_addr
& 0xff000000) | (1 << 1);
2119 par64
= phys_addr
& 0xfffff000;
2121 if (!attrs
.secure
) {
2122 par64
|= (1 << 9); /* NS */
2125 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
2126 ((fsr
& 0xf) << 1) | 1;
2132 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2134 int access_type
= ri
->opc2
& 1;
2137 int el
= arm_current_el(env
);
2138 bool secure
= arm_is_secure_below_el3(env
);
2140 switch (ri
->opc2
& 6) {
2142 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2145 mmu_idx
= ARMMMUIdx_S1E3
;
2148 mmu_idx
= ARMMMUIdx_S1NSE1
;
2151 mmu_idx
= secure ? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2154 g_assert_not_reached();
2158 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2161 mmu_idx
= ARMMMUIdx_S1SE0
;
2164 mmu_idx
= ARMMMUIdx_S1NSE0
;
2167 mmu_idx
= secure ? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2170 g_assert_not_reached();
2174 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2175 mmu_idx
= ARMMMUIdx_S12NSE1
;
2178 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2179 mmu_idx
= ARMMMUIdx_S12NSE0
;
2182 g_assert_not_reached();
2185 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
2187 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2190 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2193 int access_type
= ri
->opc2
& 1;
2196 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_S2NS
);
2198 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2201 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2204 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
2205 return CP_ACCESS_TRAP
;
2207 return CP_ACCESS_OK
;
2210 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2213 int access_type
= ri
->opc2
& 1;
2215 int secure
= arm_is_secure_below_el3(env
);
2217 switch (ri
->opc2
& 6) {
2220 case 0: /* AT S1E1R, AT S1E1W */
2221 mmu_idx
= secure ? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2223 case 4: /* AT S1E2R, AT S1E2W */
2224 mmu_idx
= ARMMMUIdx_S1E2
;
2226 case 6: /* AT S1E3R, AT S1E3W */
2227 mmu_idx
= ARMMMUIdx_S1E3
;
2230 g_assert_not_reached();
2233 case 2: /* AT S1E0R, AT S1E0W */
2234 mmu_idx
= secure ? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2236 case 4: /* AT S12E1R, AT S12E1W */
2237 mmu_idx
= secure ? ARMMMUIdx_S1SE1
: ARMMMUIdx_S12NSE1
;
2239 case 6: /* AT S12E0R, AT S12E0W */
2240 mmu_idx
= secure ? ARMMMUIdx_S1SE0
: ARMMMUIdx_S12NSE0
;
2243 g_assert_not_reached();
2246 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
2250 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
2251 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
2252 .access
= PL1_RW
, .resetvalue
= 0,
2253 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
2254 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
2255 .writefn
= par_write
},
2256 #ifndef CONFIG_USER_ONLY
2257 /* This underdecoding is safe because the reginfo is NO_RAW. */
2258 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
2259 .access
= PL1_W
, .accessfn
= ats_access
,
2260 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
},
2265 /* Return basic MPU access permission bits. */
2266 static uint32_t simple_mpu_ap_bits(uint32_t val
)
2273 for (i
= 0; i
< 16; i
+= 2) {
2274 ret
|= (val
>> i
) & mask
;
2280 /* Pad basic MPU access permission bits to extended format. */
2281 static uint32_t extended_mpu_ap_bits(uint32_t val
)
2288 for (i
= 0; i
< 16; i
+= 2) {
2289 ret
|= (val
& mask
) << i
;
2295 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2298 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
2301 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2303 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
2306 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2309 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
2312 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2314 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
2317 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2319 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2325 u32p
+= env
->cp15
.c6_rgnr
;
2329 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2332 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2333 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2339 u32p
+= env
->cp15
.c6_rgnr
;
2340 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
2344 static void pmsav7_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2346 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2347 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2353 memset(u32p
, 0, sizeof(*u32p
) * cpu
->pmsav7_dregion
);
2356 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2359 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2360 uint32_t nrgs
= cpu
->pmsav7_dregion
;
2362 if (value
>= nrgs
) {
2363 qemu_log_mask(LOG_GUEST_ERROR
,
2364 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2365 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
2369 raw_write(env
, ri
, value
);
2372 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
2373 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
2374 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2375 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
2376 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
, .resetfn
= pmsav7_reset
},
2377 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
2378 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2379 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
2380 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
, .resetfn
= pmsav7_reset
},
2381 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
2382 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2383 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
2384 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
, .resetfn
= pmsav7_reset
},
2385 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
2387 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_rgnr
),
2388 .writefn
= pmsav7_rgnr_write
},
2392 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
2393 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2394 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2395 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2396 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
2397 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2398 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2399 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2400 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
2401 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
2403 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2405 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
2407 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2409 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
2411 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
2412 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
2414 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
2415 /* Protection region base and size registers */
2416 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
2417 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2418 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
2419 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
2420 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2421 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
2422 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
2423 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2424 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
2425 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
2426 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2427 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
2428 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
2429 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2430 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
2431 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
2432 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2433 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
2434 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
2435 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2436 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
2437 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
2438 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2439 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
2443 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2446 TCR
*tcr
= raw_ptr(env
, ri
);
2447 int maskshift
= extract32(value
, 0, 3);
2449 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2450 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
2451 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2452 * using Long-desciptor translation table format */
2453 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
2454 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
2455 /* In an implementation that includes the Security Extensions
2456 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2457 * Short-descriptor translation table format.
2459 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
2465 /* Update the masks corresponding to the TCR bank being written
2466 * Note that we always calculate mask and base_mask, but
2467 * they are only used for short-descriptor tables (ie if EAE is 0);
2468 * for long-descriptor tables the TCR fields are used differently
2469 * and the mask and base_mask values are meaningless.
2471 tcr
->raw_tcr
= value
;
2472 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
2473 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
2476 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2479 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2481 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2482 /* With LPAE the TTBCR could result in a change of ASID
2483 * via the TTBCR.A1 bit, so do a TLB flush.
2485 tlb_flush(CPU(cpu
));
2487 vmsa_ttbcr_raw_write(env
, ri
, value
);
2490 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2492 TCR
*tcr
= raw_ptr(env
, ri
);
2494 /* Reset both the TCR as well as the masks corresponding to the bank of
2495 * the TCR being reset.
2499 tcr
->base_mask
= 0xffffc000u
;
2502 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2505 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2506 TCR
*tcr
= raw_ptr(env
, ri
);
2508 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2509 tlb_flush(CPU(cpu
));
2510 tcr
->raw_tcr
= value
;
2513 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2516 /* 64 bit accesses to the TTBRs can change the ASID and so we
2517 * must flush the TLB.
2519 if (cpreg_field_is_64bit(ri
)) {
2520 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2522 tlb_flush(CPU(cpu
));
2524 raw_write(env
, ri
, value
);
2527 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2530 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2531 CPUState
*cs
= CPU(cpu
);
2533 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
2534 if (raw_read(env
, ri
) != value
) {
2535 tlb_flush_by_mmuidx(cs
,
2536 (1 << ARMMMUIdx_S12NSE1
) |
2537 (1 << ARMMMUIdx_S12NSE0
) |
2538 (1 << ARMMMUIdx_S2NS
));
2539 raw_write(env
, ri
, value
);
2543 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
2544 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2545 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2546 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
2547 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
2548 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2549 .access
= PL1_RW
, .resetvalue
= 0,
2550 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
2551 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
2552 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
2553 .access
= PL1_RW
, .resetvalue
= 0,
2554 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
2555 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
2556 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
2557 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
2558 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
2563 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
2564 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
2565 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
2567 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
2568 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2569 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
2570 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2571 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
2572 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
2573 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2574 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
2575 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2576 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
2577 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
2578 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
2579 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2580 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
2581 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
2582 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
2583 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2584 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
2585 .raw_writefn
= vmsa_ttbcr_raw_write
,
2586 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
2587 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
2591 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2594 env
->cp15
.c15_ticonfig
= value
& 0xe7;
2595 /* The OS_TYPE bit in this register changes the reported CPUID! */
2596 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
2597 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
2600 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2603 env
->cp15
.c15_threadid
= value
& 0xffff;
2606 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2609 /* Wait-for-interrupt (deprecated) */
2610 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
2613 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2616 /* On OMAP there are registers indicating the max/min index of dcache lines
2617 * containing a dirty line; cache flush operations have to reset these.
2619 env
->cp15
.c15_i_max
= 0x000;
2620 env
->cp15
.c15_i_min
= 0xff0;
2623 static const ARMCPRegInfo omap_cp_reginfo
[] = {
2624 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
2625 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
2626 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
2628 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
2629 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2630 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
2632 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
2633 .writefn
= omap_ticonfig_write
},
2634 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
2636 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
2637 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
2638 .access
= PL1_RW
, .resetvalue
= 0xff0,
2639 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
2640 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
2642 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
2643 .writefn
= omap_threadid_write
},
2644 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
2645 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2646 .type
= ARM_CP_NO_RAW
,
2647 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
2648 /* TODO: Peripheral port remap register:
2649 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2650 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2653 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
2654 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
2655 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
2656 .writefn
= omap_cachemaint_write
},
2657 { .name
= "C9", .cp
= 15, .crn
= 9,
2658 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
2659 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
2663 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2666 env
->cp15
.c15_cpar
= value
& 0x3fff;
2669 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
2670 { .name
= "XSCALE_CPAR",
2671 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2672 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
2673 .writefn
= xscale_cpar_write
, },
2674 { .name
= "XSCALE_AUXCR",
2675 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
2676 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
2678 /* XScale specific cache-lockdown: since we have no cache we NOP these
2679 * and hope the guest does not really rely on cache behaviour.
2681 { .name
= "XSCALE_LOCK_ICACHE_LINE",
2682 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
2683 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2684 { .name
= "XSCALE_UNLOCK_ICACHE",
2685 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
2686 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2687 { .name
= "XSCALE_DCACHE_LOCK",
2688 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
2689 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2690 { .name
= "XSCALE_UNLOCK_DCACHE",
2691 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
2692 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2696 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
2697 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2698 * implementation of this implementation-defined space.
2699 * Ideally this should eventually disappear in favour of actually
2700 * implementing the correct behaviour for all cores.
2702 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
2703 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
2705 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
2710 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
2711 /* Cache status: RAZ because we have no cache so it's always clean */
2712 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
2713 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2718 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
2719 /* We never have a a block transfer operation in progress */
2720 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
2721 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2723 /* The cache ops themselves: these all NOP for QEMU */
2724 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
2725 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2726 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
2727 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2728 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
2729 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2730 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
2731 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2732 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
2733 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2734 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
2735 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2739 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
2740 /* The cache test-and-clean instructions always return (1 << 30)
2741 * to indicate that there are no dirty cache lines.
2743 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
2744 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2745 .resetvalue
= (1 << 30) },
2746 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
2747 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2748 .resetvalue
= (1 << 30) },
2752 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
2753 /* Ignore ReadBuffer accesses */
2754 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
2755 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
2756 .access
= PL1_RW
, .resetvalue
= 0,
2757 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
2761 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2763 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2764 unsigned int cur_el
= arm_current_el(env
);
2765 bool secure
= arm_is_secure(env
);
2767 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
2768 return env
->cp15
.vpidr_el2
;
2770 return raw_read(env
, ri
);
2773 static uint64_t mpidr_read_val(CPUARMState
*env
)
2775 ARMCPU
*cpu
= ARM_CPU(arm_env_get_cpu(env
));
2776 uint64_t mpidr
= cpu
->mp_affinity
;
2778 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
2779 mpidr
|= (1U << 31);
2780 /* Cores which are uniprocessor (non-coherent)
2781 * but still implement the MP extensions set
2782 * bit 30. (For instance, Cortex-R5).
2784 if (cpu
->mp_is_up
) {
2785 mpidr
|= (1u << 30);
2791 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2793 unsigned int cur_el
= arm_current_el(env
);
2794 bool secure
= arm_is_secure(env
);
2796 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
2797 return env
->cp15
.vmpidr_el2
;
2799 return mpidr_read_val(env
);
2802 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
2803 { .name
= "MPIDR", .state
= ARM_CP_STATE_BOTH
,
2804 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
2805 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
2809 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
2811 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
2812 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
2813 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
2815 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2816 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
2817 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
2819 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
2820 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
2821 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
2822 offsetof(CPUARMState
, cp15
.par_ns
)} },
2823 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
2824 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
2825 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
2826 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
2827 .writefn
= vmsa_ttbr_write
, },
2828 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
2829 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
2830 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
2831 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
2832 .writefn
= vmsa_ttbr_write
, },
2836 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2838 return vfp_get_fpcr(env
);
2841 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2844 vfp_set_fpcr(env
, value
);
2847 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2849 return vfp_get_fpsr(env
);
2852 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2855 vfp_set_fpsr(env
, value
);
2858 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2861 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UMA
)) {
2862 return CP_ACCESS_TRAP
;
2864 return CP_ACCESS_OK
;
2867 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2870 env
->daif
= value
& PSTATE_DAIF
;
2873 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
2874 const ARMCPRegInfo
*ri
,
2877 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
2878 * SCTLR_EL1.UCI is set.
2880 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCI
)) {
2881 return CP_ACCESS_TRAP
;
2883 return CP_ACCESS_OK
;
2886 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
2887 * Page D4-1736 (DDI0487A.b)
2890 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2893 CPUState
*cs
= ENV_GET_CPU(env
);
2895 if (arm_is_secure_below_el3(env
)) {
2896 tlb_flush_by_mmuidx(cs
,
2897 (1 << ARMMMUIdx_S1SE1
) |
2898 (1 << ARMMMUIdx_S1SE0
));
2900 tlb_flush_by_mmuidx(cs
,
2901 (1 << ARMMMUIdx_S12NSE1
) |
2902 (1 << ARMMMUIdx_S12NSE0
));
2906 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2909 CPUState
*cs
= ENV_GET_CPU(env
);
2910 bool sec
= arm_is_secure_below_el3(env
);
2913 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
2914 (1 << ARMMMUIdx_S1SE1
) |
2915 (1 << ARMMMUIdx_S1SE0
));
2917 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
2918 (1 << ARMMMUIdx_S12NSE1
) |
2919 (1 << ARMMMUIdx_S12NSE0
));
2923 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2926 /* Note that the 'ALL' scope must invalidate both stage 1 and
2927 * stage 2 translations, whereas most other scopes only invalidate
2928 * stage 1 translations.
2930 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2931 CPUState
*cs
= CPU(cpu
);
2933 if (arm_is_secure_below_el3(env
)) {
2934 tlb_flush_by_mmuidx(cs
,
2935 (1 << ARMMMUIdx_S1SE1
) |
2936 (1 << ARMMMUIdx_S1SE0
));
2938 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
2939 tlb_flush_by_mmuidx(cs
,
2940 (1 << ARMMMUIdx_S12NSE1
) |
2941 (1 << ARMMMUIdx_S12NSE0
) |
2942 (1 << ARMMMUIdx_S2NS
));
2944 tlb_flush_by_mmuidx(cs
,
2945 (1 << ARMMMUIdx_S12NSE1
) |
2946 (1 << ARMMMUIdx_S12NSE0
));
2951 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2954 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2955 CPUState
*cs
= CPU(cpu
);
2957 tlb_flush_by_mmuidx(cs
, (1 << ARMMMUIdx_S1E2
));
2960 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2963 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2964 CPUState
*cs
= CPU(cpu
);
2966 tlb_flush_by_mmuidx(cs
, (1 << ARMMMUIdx_S1E3
));
2969 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2972 /* Note that the 'ALL' scope must invalidate both stage 1 and
2973 * stage 2 translations, whereas most other scopes only invalidate
2974 * stage 1 translations.
2976 CPUState
*cs
= ENV_GET_CPU(env
);
2977 bool sec
= arm_is_secure_below_el3(env
);
2978 bool has_el2
= arm_feature(env
, ARM_FEATURE_EL2
);