linux-user: Support for restarting system calls for OpenRISC targets
[qemu.git] / target-arm / helper.c
1 #include "qemu/osdep.h"
2 #include "cpu.h"
3 #include "internals.h"
4 #include "exec/gdbstub.h"
5 #include "exec/helper-proto.h"
6 #include "qemu/host-utils.h"
7 #include "sysemu/arch_init.h"
8 #include "sysemu/sysemu.h"
9 #include "qemu/bitops.h"
10 #include "qemu/crc32c.h"
11 #include "exec/exec-all.h"
12 #include "exec/cpu_ldst.h"
13 #include "arm_ldst.h"
14 #include <zlib.h> /* For crc32 */
15 #include "exec/semihost.h"
16 #include "sysemu/kvm.h"
17
18 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
19
20 #ifndef CONFIG_USER_ONLY
21 static bool get_phys_addr(CPUARMState *env, target_ulong address,
22 int access_type, ARMMMUIdx mmu_idx,
23 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
24 target_ulong *page_size, uint32_t *fsr,
25 ARMMMUFaultInfo *fi);
26
27 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
28 int access_type, ARMMMUIdx mmu_idx,
29 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
30 target_ulong *page_size_ptr, uint32_t *fsr,
31 ARMMMUFaultInfo *fi);
32
33 /* Definitions for the PMCCNTR and PMCR registers */
34 #define PMCRD 0x8
35 #define PMCRC 0x4
36 #define PMCRE 0x1
37 #endif
38
39 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
40 {
41 int nregs;
42
43 /* VFP data registers are always little-endian. */
44 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
45 if (reg < nregs) {
46 stfq_le_p(buf, env->vfp.regs[reg]);
47 return 8;
48 }
49 if (arm_feature(env, ARM_FEATURE_NEON)) {
50 /* Aliases for Q regs. */
51 nregs += 16;
52 if (reg < nregs) {
53 stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
54 stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
55 return 16;
56 }
57 }
58 switch (reg - nregs) {
59 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
60 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
61 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
62 }
63 return 0;
64 }
65
66 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
67 {
68 int nregs;
69
70 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
71 if (reg < nregs) {
72 env->vfp.regs[reg] = ldfq_le_p(buf);
73 return 8;
74 }
75 if (arm_feature(env, ARM_FEATURE_NEON)) {
76 nregs += 16;
77 if (reg < nregs) {
78 env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
79 env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
80 return 16;
81 }
82 }
83 switch (reg - nregs) {
84 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
85 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
86 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
87 }
88 return 0;
89 }
90
91 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
92 {
93 switch (reg) {
94 case 0 ... 31:
95 /* 128 bit FP register */
96 stfq_le_p(buf, env->vfp.regs[reg * 2]);
97 stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
98 return 16;
99 case 32:
100 /* FPSR */
101 stl_p(buf, vfp_get_fpsr(env));
102 return 4;
103 case 33:
104 /* FPCR */
105 stl_p(buf, vfp_get_fpcr(env));
106 return 4;
107 default:
108 return 0;
109 }
110 }
111
112 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
113 {
114 switch (reg) {
115 case 0 ... 31:
116 /* 128 bit FP register */
117 env->vfp.regs[reg * 2] = ldfq_le_p(buf);
118 env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
119 return 16;
120 case 32:
121 /* FPSR */
122 vfp_set_fpsr(env, ldl_p(buf));
123 return 4;
124 case 33:
125 /* FPCR */
126 vfp_set_fpcr(env, ldl_p(buf));
127 return 4;
128 default:
129 return 0;
130 }
131 }
132
133 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
134 {
135 assert(ri->fieldoffset);
136 if (cpreg_field_is_64bit(ri)) {
137 return CPREG_FIELD64(env, ri);
138 } else {
139 return CPREG_FIELD32(env, ri);
140 }
141 }
142
143 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
144 uint64_t value)
145 {
146 assert(ri->fieldoffset);
147 if (cpreg_field_is_64bit(ri)) {
148 CPREG_FIELD64(env, ri) = value;
149 } else {
150 CPREG_FIELD32(env, ri) = value;
151 }
152 }
153
154 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
155 {
156 return (char *)env + ri->fieldoffset;
157 }
158
159 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
160 {
161 /* Raw read of a coprocessor register (as needed for migration, etc). */
162 if (ri->type & ARM_CP_CONST) {
163 return ri->resetvalue;
164 } else if (ri->raw_readfn) {
165 return ri->raw_readfn(env, ri);
166 } else if (ri->readfn) {
167 return ri->readfn(env, ri);
168 } else {
169 return raw_read(env, ri);
170 }
171 }
172
173 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
174 uint64_t v)
175 {
176 /* Raw write of a coprocessor register (as needed for migration, etc).
177 * Note that constant registers are treated as write-ignored; the
178 * caller should check for success by whether a readback gives the
179 * value written.
180 */
181 if (ri->type & ARM_CP_CONST) {
182 return;
183 } else if (ri->raw_writefn) {
184 ri->raw_writefn(env, ri, v);
185 } else if (ri->writefn) {
186 ri->writefn(env, ri, v);
187 } else {
188 raw_write(env, ri, v);
189 }
190 }
191
192 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
193 {
194 /* Return true if the regdef would cause an assertion if you called
195 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
196 * program bug for it not to have the NO_RAW flag).
197 * NB that returning false here doesn't necessarily mean that calling
198 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
199 * read/write access functions which are safe for raw use" from "has
200 * read/write access functions which have side effects but has forgotten
201 * to provide raw access functions".
202 * The tests here line up with the conditions in read/write_raw_cp_reg()
203 * and assertions in raw_read()/raw_write().
204 */
205 if ((ri->type & ARM_CP_CONST) ||
206 ri->fieldoffset ||
207 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
208 return false;
209 }
210 return true;
211 }
212
213 bool write_cpustate_to_list(ARMCPU *cpu)
214 {
215 /* Write the coprocessor state from cpu->env to the (index,value) list. */
216 int i;
217 bool ok = true;
218
219 for (i = 0; i < cpu->cpreg_array_len; i++) {
220 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
221 const ARMCPRegInfo *ri;
222
223 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
224 if (!ri) {
225 ok = false;
226 continue;
227 }
228 if (ri->type & ARM_CP_NO_RAW) {
229 continue;
230 }
231 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
232 }
233 return ok;
234 }
235
236 bool write_list_to_cpustate(ARMCPU *cpu)
237 {
238 int i;
239 bool ok = true;
240
241 for (i = 0; i < cpu->cpreg_array_len; i++) {
242 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
243 uint64_t v = cpu->cpreg_values[i];
244 const ARMCPRegInfo *ri;
245
246 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
247 if (!ri) {
248 ok = false;
249 continue;
250 }
251 if (ri->type & ARM_CP_NO_RAW) {
252 continue;
253 }
254 /* Write value and confirm it reads back as written
255 * (to catch read-only registers and partially read-only
256 * registers where the incoming migration value doesn't match)
257 */
258 write_raw_cp_reg(&cpu->env, ri, v);
259 if (read_raw_cp_reg(&cpu->env, ri) != v) {
260 ok = false;
261 }
262 }
263 return ok;
264 }
265
266 static void add_cpreg_to_list(gpointer key, gpointer opaque)
267 {
268 ARMCPU *cpu = opaque;
269 uint64_t regidx;
270 const ARMCPRegInfo *ri;
271
272 regidx = *(uint32_t *)key;
273 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
274
275 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
276 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
277 /* The value array need not be initialized at this point */
278 cpu->cpreg_array_len++;
279 }
280 }
281
282 static void count_cpreg(gpointer key, gpointer opaque)
283 {
284 ARMCPU *cpu = opaque;
285 uint64_t regidx;
286 const ARMCPRegInfo *ri;
287
288 regidx = *(uint32_t *)key;
289 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
290
291 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
292 cpu->cpreg_array_len++;
293 }
294 }
295
296 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
297 {
298 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
299 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
300
301 if (aidx > bidx) {
302 return 1;
303 }
304 if (aidx < bidx) {
305 return -1;
306 }
307 return 0;
308 }
309
310 void init_cpreg_list(ARMCPU *cpu)
311 {
312 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
313 * Note that we require cpreg_tuples[] to be sorted by key ID.
314 */
315 GList *keys;
316 int arraylen;
317
318 keys = g_hash_table_get_keys(cpu->cp_regs);
319 keys = g_list_sort(keys, cpreg_key_compare);
320
321 cpu->cpreg_array_len = 0;
322
323 g_list_foreach(keys, count_cpreg, cpu);
324
325 arraylen = cpu->cpreg_array_len;
326 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
327 cpu->cpreg_values = g_new(uint64_t, arraylen);
328 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
329 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
330 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
331 cpu->cpreg_array_len = 0;
332
333 g_list_foreach(keys, add_cpreg_to_list, cpu);
334
335 assert(cpu->cpreg_array_len == arraylen);
336
337 g_list_free(keys);
338 }
339
340 /*
341 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
342 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
343 *
344 * access_el3_aa32ns: Used to check AArch32 register views.
345 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
346 */
347 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
348 const ARMCPRegInfo *ri,
349 bool isread)
350 {
351 bool secure = arm_is_secure_below_el3(env);
352
353 assert(!arm_el_is_aa64(env, 3));
354 if (secure) {
355 return CP_ACCESS_TRAP_UNCATEGORIZED;
356 }
357 return CP_ACCESS_OK;
358 }
359
360 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
361 const ARMCPRegInfo *ri,
362 bool isread)
363 {
364 if (!arm_el_is_aa64(env, 3)) {
365 return access_el3_aa32ns(env, ri, isread);
366 }
367 return CP_ACCESS_OK;
368 }
369
370 /* Some secure-only AArch32 registers trap to EL3 if used from
371 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
372 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
373 * We assume that the .access field is set to PL1_RW.
374 */
375 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
376 const ARMCPRegInfo *ri,
377 bool isread)
378 {
379 if (arm_current_el(env) == 3) {
380 return CP_ACCESS_OK;
381 }
382 if (arm_is_secure_below_el3(env)) {
383 return CP_ACCESS_TRAP_EL3;
384 }
385 /* This will be EL1 NS and EL2 NS, which just UNDEF */
386 return CP_ACCESS_TRAP_UNCATEGORIZED;
387 }
388
389 /* Check for traps to "powerdown debug" registers, which are controlled
390 * by MDCR.TDOSA
391 */
392 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
393 bool isread)
394 {
395 int el = arm_current_el(env);
396
397 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA)
398 && !arm_is_secure_below_el3(env)) {
399 return CP_ACCESS_TRAP_EL2;
400 }
401 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
402 return CP_ACCESS_TRAP_EL3;
403 }
404 return CP_ACCESS_OK;
405 }
406
407 /* Check for traps to "debug ROM" registers, which are controlled
408 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
409 */
410 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
411 bool isread)
412 {
413 int el = arm_current_el(env);
414
415 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA)
416 && !arm_is_secure_below_el3(env)) {
417 return CP_ACCESS_TRAP_EL2;
418 }
419 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
420 return CP_ACCESS_TRAP_EL3;
421 }
422 return CP_ACCESS_OK;
423 }
424
425 /* Check for traps to general debug registers, which are controlled
426 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
427 */
428 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
429 bool isread)
430 {
431 int el = arm_current_el(env);
432
433 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA)
434 && !arm_is_secure_below_el3(env)) {
435 return CP_ACCESS_TRAP_EL2;
436 }
437 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
438 return CP_ACCESS_TRAP_EL3;
439 }
440 return CP_ACCESS_OK;
441 }
442
443 /* Check for traps to performance monitor registers, which are controlled
444 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
445 */
446 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
447 bool isread)
448 {
449 int el = arm_current_el(env);
450
451 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
452 && !arm_is_secure_below_el3(env)) {
453 return CP_ACCESS_TRAP_EL2;
454 }
455 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
456 return CP_ACCESS_TRAP_EL3;
457 }
458 return CP_ACCESS_OK;
459 }
460
461 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
462 {
463 ARMCPU *cpu = arm_env_get_cpu(env);
464
465 raw_write(env, ri, value);
466 tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
467 }
468
469 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
470 {
471 ARMCPU *cpu = arm_env_get_cpu(env);
472
473 if (raw_read(env, ri) != value) {
474 /* Unlike real hardware the qemu TLB uses virtual addresses,
475 * not modified virtual addresses, so this causes a TLB flush.
476 */
477 tlb_flush(CPU(cpu), 1);
478 raw_write(env, ri, value);
479 }
480 }
481
482 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
483 uint64_t value)
484 {
485 ARMCPU *cpu = arm_env_get_cpu(env);
486
487 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU)
488 && !extended_addresses_enabled(env)) {
489 /* For VMSA (when not using the LPAE long descriptor page table
490 * format) this register includes the ASID, so do a TLB flush.
491 * For PMSA it is purely a process ID and no action is needed.
492 */
493 tlb_flush(CPU(cpu), 1);
494 }
495 raw_write(env, ri, value);
496 }
497
498 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
499 uint64_t value)
500 {
501 /* Invalidate all (TLBIALL) */
502 ARMCPU *cpu = arm_env_get_cpu(env);
503
504 tlb_flush(CPU(cpu), 1);
505 }
506
507 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
508 uint64_t value)
509 {
510 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
511 ARMCPU *cpu = arm_env_get_cpu(env);
512
513 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
514 }
515
516 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
517 uint64_t value)
518 {
519 /* Invalidate by ASID (TLBIASID) */
520 ARMCPU *cpu = arm_env_get_cpu(env);
521
522 tlb_flush(CPU(cpu), value == 0);
523 }
524
525 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
526 uint64_t value)
527 {
528 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
529 ARMCPU *cpu = arm_env_get_cpu(env);
530
531 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
532 }
533
534 /* IS variants of TLB operations must affect all cores */
535 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
536 uint64_t value)
537 {
538 CPUState *other_cs;
539
540 CPU_FOREACH(other_cs) {
541 tlb_flush(other_cs, 1);
542 }
543 }
544
545 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
546 uint64_t value)
547 {
548 CPUState *other_cs;
549
550 CPU_FOREACH(other_cs) {
551 tlb_flush(other_cs, value == 0);
552 }
553 }
554
555 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
556 uint64_t value)
557 {
558 CPUState *other_cs;
559
560 CPU_FOREACH(other_cs) {
561 tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
562 }
563 }
564
565 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
566 uint64_t value)
567 {
568 CPUState *other_cs;
569
570 CPU_FOREACH(other_cs) {
571 tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
572 }
573 }
574
575 static const ARMCPRegInfo cp_reginfo[] = {
576 /* Define the secure and non-secure FCSE identifier CP registers
577 * separately because there is no secure bank in V8 (no _EL3). This allows
578 * the secure register to be properly reset and migrated. There is also no
579 * v8 EL1 version of the register so the non-secure instance stands alone.
580 */
581 { .name = "FCSEIDR(NS)",
582 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
583 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
584 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
585 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
586 { .name = "FCSEIDR(S)",
587 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
588 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
589 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
590 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
591 /* Define the secure and non-secure context identifier CP registers
592 * separately because there is no secure bank in V8 (no _EL3). This allows
593 * the secure register to be properly reset and migrated. In the
594 * non-secure case, the 32-bit register will have reset and migration
595 * disabled during registration as it is handled by the 64-bit instance.
596 */
597 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
598 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
599 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
600 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
601 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
602 { .name = "CONTEXTIDR(S)", .state = ARM_CP_STATE_AA32,
603 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
604 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
605 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
606 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
607 REGINFO_SENTINEL
608 };
609
610 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
611 /* NB: Some of these registers exist in v8 but with more precise
612 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
613 */
614 /* MMU Domain access control / MPU write buffer control */
615 { .name = "DACR",
616 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
617 .access = PL1_RW, .resetvalue = 0,
618 .writefn = dacr_write, .raw_writefn = raw_write,
619 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
620 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
621 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
622 * For v6 and v5, these mappings are overly broad.
623 */
624 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
625 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
626 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
627 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
628 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
629 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
630 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
631 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
632 /* Cache maintenance ops; some of this space may be overridden later. */
633 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
634 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
635 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
636 REGINFO_SENTINEL
637 };
638
639 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
640 /* Not all pre-v6 cores implemented this WFI, so this is slightly
641 * over-broad.
642 */
643 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
644 .access = PL1_W, .type = ARM_CP_WFI },
645 REGINFO_SENTINEL
646 };
647
648 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
649 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
650 * is UNPREDICTABLE; we choose to NOP as most implementations do).
651 */
652 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
653 .access = PL1_W, .type = ARM_CP_WFI },
654 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
655 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
656 * OMAPCP will override this space.
657 */
658 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
659 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
660 .resetvalue = 0 },
661 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
662 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
663 .resetvalue = 0 },
664 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
665 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
666 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
667 .resetvalue = 0 },
668 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
669 * implementing it as RAZ means the "debug architecture version" bits
670 * will read as a reserved value, which should cause Linux to not try
671 * to use the debug hardware.
672 */
673 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
674 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
675 /* MMU TLB control. Note that the wildcarding means we cover not just
676 * the unified TLB ops but also the dside/iside/inner-shareable variants.
677 */
678 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
679 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
680 .type = ARM_CP_NO_RAW },
681 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
682 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
683 .type = ARM_CP_NO_RAW },
684 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
685 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
686 .type = ARM_CP_NO_RAW },
687 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
688 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
689 .type = ARM_CP_NO_RAW },
690 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
691 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
692 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
693 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
694 REGINFO_SENTINEL
695 };
696
697 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
698 uint64_t value)
699 {
700 uint32_t mask = 0;
701
702 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
703 if (!arm_feature(env, ARM_FEATURE_V8)) {
704 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
705 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
706 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
707 */
708 if (arm_feature(env, ARM_FEATURE_VFP)) {
709 /* VFP coprocessor: cp10 & cp11 [23:20] */
710 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
711
712 if (!arm_feature(env, ARM_FEATURE_NEON)) {
713 /* ASEDIS [31] bit is RAO/WI */
714 value |= (1 << 31);
715 }
716
717 /* VFPv3 and upwards with NEON implement 32 double precision
718 * registers (D0-D31).
719 */
720 if (!arm_feature(env, ARM_FEATURE_NEON) ||
721 !arm_feature(env, ARM_FEATURE_VFP3)) {
722 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
723 value |= (1 << 30);
724 }
725 }
726 value &= mask;
727 }
728 env->cp15.cpacr_el1 = value;
729 }
730
731 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
732 bool isread)
733 {
734 if (arm_feature(env, ARM_FEATURE_V8)) {
735 /* Check if CPACR accesses are to be trapped to EL2 */
736 if (arm_current_el(env) == 1 &&
737 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
738 return CP_ACCESS_TRAP_EL2;
739 /* Check if CPACR accesses are to be trapped to EL3 */
740 } else if (arm_current_el(env) < 3 &&
741 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
742 return CP_ACCESS_TRAP_EL3;
743 }
744 }
745
746 return CP_ACCESS_OK;
747 }
748
749 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
750 bool isread)
751 {
752 /* Check if CPTR accesses are set to trap to EL3 */
753 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
754 return CP_ACCESS_TRAP_EL3;
755 }
756
757 return CP_ACCESS_OK;
758 }
759
760 static const ARMCPRegInfo v6_cp_reginfo[] = {
761 /* prefetch by MVA in v6, NOP in v7 */
762 { .name = "MVA_prefetch",
763 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
764 .access = PL1_W, .type = ARM_CP_NOP },
765 /* We need to break the TB after ISB to execute self-modifying code
766 * correctly and also to take any pending interrupts immediately.
767 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
768 */
769 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
770 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
771 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
772 .access = PL0_W, .type = ARM_CP_NOP },
773 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
774 .access = PL0_W, .type = ARM_CP_NOP },
775 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
776 .access = PL1_RW,
777 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
778 offsetof(CPUARMState, cp15.ifar_ns) },
779 .resetvalue = 0, },
780 /* Watchpoint Fault Address Register : should actually only be present
781 * for 1136, 1176, 11MPCore.
782 */
783 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
784 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
785 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
786 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
787 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
788 .resetvalue = 0, .writefn = cpacr_write },
789 REGINFO_SENTINEL
790 };
791
792 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
793 bool isread)
794 {
795 /* Performance monitor registers user accessibility is controlled
796 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
797 * trapping to EL2 or EL3 for other accesses.
798 */
799 int el = arm_current_el(env);
800
801 if (el == 0 && !env->cp15.c9_pmuserenr) {
802 return CP_ACCESS_TRAP;
803 }
804 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
805 && !arm_is_secure_below_el3(env)) {
806 return CP_ACCESS_TRAP_EL2;
807 }
808 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
809 return CP_ACCESS_TRAP_EL3;
810 }
811
812 return CP_ACCESS_OK;
813 }
814
815 #ifndef CONFIG_USER_ONLY
816
817 static inline bool arm_ccnt_enabled(CPUARMState *env)
818 {
819 /* This does not support checking PMCCFILTR_EL0 register */
820
821 if (!(env->cp15.c9_pmcr & PMCRE)) {
822 return false;
823 }
824
825 return true;
826 }
827
828 void pmccntr_sync(CPUARMState *env)
829 {
830 uint64_t temp_ticks;
831
832 temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
833 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
834
835 if (env->cp15.c9_pmcr & PMCRD) {
836 /* Increment once every 64 processor clock cycles */
837 temp_ticks /= 64;
838 }
839
840 if (arm_ccnt_enabled(env)) {
841 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
842 }
843 }
844
845 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
846 uint64_t value)
847 {
848 pmccntr_sync(env);
849
850 if (value & PMCRC) {
851 /* The counter has been reset */
852 env->cp15.c15_ccnt = 0;
853 }
854
855 /* only the DP, X, D and E bits are writable */
856 env->cp15.c9_pmcr &= ~0x39;
857 env->cp15.c9_pmcr |= (value & 0x39);
858
859 pmccntr_sync(env);
860 }
861
862 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
863 {
864 uint64_t total_ticks;
865
866 if (!arm_ccnt_enabled(env)) {
867 /* Counter is disabled, do not change value */
868 return env->cp15.c15_ccnt;
869 }
870
871 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
872 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
873
874 if (env->cp15.c9_pmcr & PMCRD) {
875 /* Increment once every 64 processor clock cycles */
876 total_ticks /= 64;
877 }
878 return total_ticks - env->cp15.c15_ccnt;
879 }
880
881 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
882 uint64_t value)
883 {
884 uint64_t total_ticks;
885
886 if (!arm_ccnt_enabled(env)) {
887 /* Counter is disabled, set the absolute value */
888 env->cp15.c15_ccnt = value;
889 return;
890 }
891
892 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
893 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
894
895 if (env->cp15.c9_pmcr & PMCRD) {
896 /* Increment once every 64 processor clock cycles */
897 total_ticks /= 64;
898 }
899 env->cp15.c15_ccnt = total_ticks - value;
900 }
901
902 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
903 uint64_t value)
904 {
905 uint64_t cur_val = pmccntr_read(env, NULL);
906
907 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
908 }
909
910 #else /* CONFIG_USER_ONLY */
911
912 void pmccntr_sync(CPUARMState *env)
913 {
914 }
915
916 #endif
917
918 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
919 uint64_t value)
920 {
921 pmccntr_sync(env);
922 env->cp15.pmccfiltr_el0 = value & 0x7E000000;
923 pmccntr_sync(env);
924 }
925
926 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
927 uint64_t value)
928 {
929 value &= (1 << 31);
930 env->cp15.c9_pmcnten |= value;
931 }
932
933 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
934 uint64_t value)
935 {
936 value &= (1 << 31);
937 env->cp15.c9_pmcnten &= ~value;
938 }
939
940 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
941 uint64_t value)
942 {
943 env->cp15.c9_pmovsr &= ~value;
944 }
945
946 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
947 uint64_t value)
948 {
949 env->cp15.c9_pmxevtyper = value & 0xff;
950 }
951
952 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
953 uint64_t value)
954 {
955 env->cp15.c9_pmuserenr = value & 1;
956 }
957
958 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
959 uint64_t value)
960 {
961 /* We have no event counters so only the C bit can be changed */
962 value &= (1 << 31);
963 env->cp15.c9_pminten |= value;
964 }
965
966 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
967 uint64_t value)
968 {
969 value &= (1 << 31);
970 env->cp15.c9_pminten &= ~value;
971 }
972
973 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
974 uint64_t value)
975 {
976 /* Note that even though the AArch64 view of this register has bits
977 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
978 * architectural requirements for bits which are RES0 only in some
979 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
980 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
981 */
982 raw_write(env, ri, value & ~0x1FULL);
983 }
984
985 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
986 {
987 /* We only mask off bits that are RES0 both for AArch64 and AArch32.
988 * For bits that vary between AArch32/64, code needs to check the
989 * current execution mode before directly using the feature bit.
990 */
991 uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
992
993 if (!arm_feature(env, ARM_FEATURE_EL2)) {
994 valid_mask &= ~SCR_HCE;
995
996 /* On ARMv7, SMD (or SCD as it is called in v7) is only
997 * supported if EL2 exists. The bit is UNK/SBZP when
998 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
999 * when EL2 is unavailable.
1000 * On ARMv8, this bit is always available.
1001 */
1002 if (arm_feature(env, ARM_FEATURE_V7) &&
1003 !arm_feature(env, ARM_FEATURE_V8)) {
1004 valid_mask &= ~SCR_SMD;
1005 }
1006 }
1007
1008 /* Clear all-context RES0 bits. */
1009 value &= valid_mask;
1010 raw_write(env, ri, value);
1011 }
1012
1013 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1014 {
1015 ARMCPU *cpu = arm_env_get_cpu(env);
1016
1017 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1018 * bank
1019 */
1020 uint32_t index = A32_BANKED_REG_GET(env, csselr,
1021 ri->secure & ARM_CP_SECSTATE_S);
1022
1023 return cpu->ccsidr[index];
1024 }
1025
1026 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1027 uint64_t value)
1028 {
1029 raw_write(env, ri, value & 0xf);
1030 }
1031
1032 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1033 {
1034 CPUState *cs = ENV_GET_CPU(env);
1035 uint64_t ret = 0;
1036
1037 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1038 ret |= CPSR_I;
1039 }
1040 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1041 ret |= CPSR_F;
1042 }
1043 /* External aborts are not possible in QEMU so A bit is always clear */
1044 return ret;
1045 }
1046
1047 static const ARMCPRegInfo v7_cp_reginfo[] = {
1048 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1049 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1050 .access = PL1_W, .type = ARM_CP_NOP },
1051 /* Performance monitors are implementation defined in v7,
1052 * but with an ARM recommended set of registers, which we
1053 * follow (although we don't actually implement any counters)
1054 *
1055 * Performance registers fall into three categories:
1056 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1057 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1058 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1059 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1060 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1061 */
1062 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1063 .access = PL0_RW, .type = ARM_CP_ALIAS,
1064 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1065 .writefn = pmcntenset_write,
1066 .accessfn = pmreg_access,
1067 .raw_writefn = raw_write },
1068 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1069 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1070 .access = PL0_RW, .accessfn = pmreg_access,
1071 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1072 .writefn = pmcntenset_write, .raw_writefn = raw_write },
1073 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1074 .access = PL0_RW,
1075 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1076 .accessfn = pmreg_access,
1077 .writefn = pmcntenclr_write,
1078 .type = ARM_CP_ALIAS },
1079 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1080 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1081 .access = PL0_RW, .accessfn = pmreg_access,
1082 .type = ARM_CP_ALIAS,
1083 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1084 .writefn = pmcntenclr_write },
1085 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1086 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1087 .accessfn = pmreg_access,
1088 .writefn = pmovsr_write,
1089 .raw_writefn = raw_write },
1090 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1091 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1092 .access = PL0_RW, .accessfn = pmreg_access,
1093 .type = ARM_CP_ALIAS,
1094 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1095 .writefn = pmovsr_write,
1096 .raw_writefn = raw_write },
1097 /* Unimplemented so WI. */
1098 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1099 .access = PL0_W, .accessfn = pmreg_access, .type = ARM_CP_NOP },
1100 /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
1101 * We choose to RAZ/WI.
1102 */
1103 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1104 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1105 .accessfn = pmreg_access },
1106 #ifndef CONFIG_USER_ONLY
1107 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1108 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
1109 .readfn = pmccntr_read, .writefn = pmccntr_write32,
1110 .accessfn = pmreg_access },
1111 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1112 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1113 .access = PL0_RW, .accessfn = pmreg_access,
1114 .type = ARM_CP_IO,
1115 .readfn = pmccntr_read, .writefn = pmccntr_write, },
1116 #endif
1117 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1118 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1119 .writefn = pmccfiltr_write,
1120 .access = PL0_RW, .accessfn = pmreg_access,
1121 .type = ARM_CP_IO,
1122 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1123 .resetvalue = 0, },
1124 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1125 .access = PL0_RW,
1126 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
1127 .accessfn = pmreg_access, .writefn = pmxevtyper_write,
1128 .raw_writefn = raw_write },
1129 /* Unimplemented, RAZ/WI. */
1130 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1131 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1132 .accessfn = pmreg_access },
1133 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1134 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1135 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1136 .resetvalue = 0,
1137 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1138 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1139 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1140 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1141 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1142 .resetvalue = 0,
1143 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1144 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1145 .access = PL1_RW, .accessfn = access_tpm,
1146 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1147 .resetvalue = 0,
1148 .writefn = pmintenset_write, .raw_writefn = raw_write },
1149 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1150 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1151 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1152 .writefn = pmintenclr_write, },
1153 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1154 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1155 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1156 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1157 .writefn = pmintenclr_write },
1158 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
1159 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
1160 .access = PL1_RW, .writefn = vbar_write,
1161 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
1162 offsetof(CPUARMState, cp15.vbar_ns) },
1163 .resetvalue = 0 },
1164 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1165 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
1166 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
1167 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1168 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1169 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1170 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1171 offsetof(CPUARMState, cp15.csselr_ns) } },
1172 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1173 * just RAZ for all cores:
1174 */
1175 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1176 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1177 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1178 /* Auxiliary fault status registers: these also are IMPDEF, and we
1179 * choose to RAZ/WI for all cores.
1180 */
1181 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1182 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1183 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1184 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1185 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1186 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1187 /* MAIR can just read-as-written because we don't implement caches
1188 * and so don't need to care about memory attributes.
1189 */
1190 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1191 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1192 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1193 .resetvalue = 0 },
1194 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
1195 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1196 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1197 .resetvalue = 0 },
1198 /* For non-long-descriptor page tables these are PRRR and NMRR;
1199 * regardless they still act as reads-as-written for QEMU.
1200 */
1201 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1202 * allows them to assign the correct fieldoffset based on the endianness
1203 * handled in the field definitions.
1204 */
1205 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1206 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1207 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1208 offsetof(CPUARMState, cp15.mair0_ns) },
1209 .resetfn = arm_cp_reset_ignore },
1210 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1211 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1212 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1213 offsetof(CPUARMState, cp15.mair1_ns) },
1214 .resetfn = arm_cp_reset_ignore },
1215 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1216 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1217 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1218 /* 32 bit ITLB invalidates */
1219 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1220 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1221 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1222 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1223 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1224 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1225 /* 32 bit DTLB invalidates */
1226 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1227 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1228 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1229 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1230 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1231 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1232 /* 32 bit TLB invalidates */
1233 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1234 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1235 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1236 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1237 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1238 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1239 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1240 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1241 REGINFO_SENTINEL
1242 };
1243
1244 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1245 /* 32 bit TLB invalidates, Inner Shareable */
1246 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1247 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1248 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1249 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1250 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1251 .type = ARM_CP_NO_RAW, .access = PL1_W,
1252 .writefn = tlbiasid_is_write },
1253 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1254 .type = ARM_CP_NO_RAW, .access = PL1_W,
1255 .writefn = tlbimvaa_is_write },
1256 REGINFO_SENTINEL
1257 };
1258
1259 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1260 uint64_t value)
1261 {
1262 value &= 1;
1263 env->teecr = value;
1264 }
1265
1266 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1267 bool isread)
1268 {
1269 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1270 return CP_ACCESS_TRAP;
1271 }
1272 return CP_ACCESS_OK;
1273 }
1274
1275 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1276 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1277 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1278 .resetvalue = 0,
1279 .writefn = teecr_write },
1280 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1281 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1282 .accessfn = teehbr_access, .resetvalue = 0 },
1283 REGINFO_SENTINEL
1284 };
1285
1286 static const ARMCPRegInfo v6k_cp_reginfo[] = {
1287 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1288 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1289 .access = PL0_RW,
1290 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1291 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1292 .access = PL0_RW,
1293 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1294 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1295 .resetfn = arm_cp_reset_ignore },
1296 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1297 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1298 .access = PL0_R|PL1_W,
1299 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1300 .resetvalue = 0},
1301 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1302 .access = PL0_R|PL1_W,
1303 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1304 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1305 .resetfn = arm_cp_reset_ignore },
1306 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1307 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1308 .access = PL1_RW,
1309 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1310 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1311 .access = PL1_RW,
1312 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1313 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1314 .resetvalue = 0 },
1315 REGINFO_SENTINEL
1316 };
1317
1318 #ifndef CONFIG_USER_ONLY
1319
1320 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1321 bool isread)
1322 {
1323 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1324 * Writable only at the highest implemented exception level.
1325 */
1326 int el = arm_current_el(env);
1327
1328 switch (el) {
1329 case 0:
1330 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
1331 return CP_ACCESS_TRAP;
1332 }
1333 break;
1334 case 1:
1335 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1336 arm_is_secure_below_el3(env)) {
1337 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1338 return CP_ACCESS_TRAP_UNCATEGORIZED;
1339 }
1340 break;
1341 case 2:
1342 case 3:
1343 break;
1344 }
1345
1346 if (!isread && el < arm_highest_el(env)) {
1347 return CP_ACCESS_TRAP_UNCATEGORIZED;
1348 }
1349
1350 return CP_ACCESS_OK;
1351 }
1352
1353 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1354 bool isread)
1355 {
1356 unsigned int cur_el = arm_current_el(env);
1357 bool secure = arm_is_secure(env);
1358
1359 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1360 if (cur_el == 0 &&
1361 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1362 return CP_ACCESS_TRAP;
1363 }
1364
1365 if (arm_feature(env, ARM_FEATURE_EL2) &&
1366 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1367 !extract32(env->cp15.cnthctl_el2, 0, 1)) {
1368 return CP_ACCESS_TRAP_EL2;
1369 }
1370 return CP_ACCESS_OK;
1371 }
1372
1373 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1374 bool isread)
1375 {
1376 unsigned int cur_el = arm_current_el(env);
1377 bool secure = arm_is_secure(env);
1378
1379 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1380 * EL0[PV]TEN is zero.
1381 */
1382 if (cur_el == 0 &&
1383 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1384 return CP_ACCESS_TRAP;
1385 }
1386
1387 if (arm_feature(env, ARM_FEATURE_EL2) &&
1388 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1389 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
1390 return CP_ACCESS_TRAP_EL2;
1391 }
1392 return CP_ACCESS_OK;
1393 }
1394
1395 static CPAccessResult gt_pct_access(CPUARMState *env,
1396 const ARMCPRegInfo *ri,
1397 bool isread)
1398 {
1399 return gt_counter_access(env, GTIMER_PHYS, isread);
1400 }
1401
1402 static CPAccessResult gt_vct_access(CPUARMState *env,
1403 const ARMCPRegInfo *ri,
1404 bool isread)
1405 {
1406 return gt_counter_access(env, GTIMER_VIRT, isread);
1407 }
1408
1409 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1410 bool isread)
1411 {
1412 return gt_timer_access(env, GTIMER_PHYS, isread);
1413 }
1414
1415 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1416 bool isread)
1417 {
1418 return gt_timer_access(env, GTIMER_VIRT, isread);
1419 }
1420
1421 static CPAccessResult gt_stimer_access(CPUARMState *env,
1422 const ARMCPRegInfo *ri,
1423 bool isread)
1424 {
1425 /* The AArch64 register view of the secure physical timer is
1426 * always accessible from EL3, and configurably accessible from
1427 * Secure EL1.
1428 */
1429 switch (arm_current_el(env)) {
1430 case 1:
1431 if (!arm_is_secure(env)) {
1432 return CP_ACCESS_TRAP;
1433 }
1434 if (!(env->cp15.scr_el3 & SCR_ST)) {
1435 return CP_ACCESS_TRAP_EL3;
1436 }
1437 return CP_ACCESS_OK;
1438 case 0:
1439 case 2:
1440 return CP_ACCESS_TRAP;
1441 case 3:
1442 return CP_ACCESS_OK;
1443 default:
1444 g_assert_not_reached();
1445 }
1446 }
1447
1448 static uint64_t gt_get_countervalue(CPUARMState *env)
1449 {
1450 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1451 }
1452
1453 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1454 {
1455 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1456
1457 if (gt->ctl & 1) {
1458 /* Timer enabled: calculate and set current ISTATUS, irq, and
1459 * reset timer to when ISTATUS next has to change
1460 */
1461 uint64_t offset = timeridx == GTIMER_VIRT ?
1462 cpu->env.cp15.cntvoff_el2 : 0;
1463 uint64_t count = gt_get_countervalue(&cpu->env);
1464 /* Note that this must be unsigned 64 bit arithmetic: */
1465 int istatus = count - offset >= gt->cval;
1466 uint64_t nexttick;
1467
1468 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1469 qemu_set_irq(cpu->gt_timer_outputs[timeridx],
1470 (istatus && !(gt->ctl & 2)));
1471 if (istatus) {
1472 /* Next transition is when count rolls back over to zero */
1473 nexttick = UINT64_MAX;
1474 } else {
1475 /* Next transition is when we hit cval */
1476 nexttick = gt->cval + offset;
1477 }
1478 /* Note that the desired next expiry time might be beyond the
1479 * signed-64-bit range of a QEMUTimer -- in this case we just
1480 * set the timer for as far in the future as possible. When the
1481 * timer expires we will reset the timer for any remaining period.
1482 */
1483 if (nexttick > INT64_MAX / GTIMER_SCALE) {
1484 nexttick = INT64_MAX / GTIMER_SCALE;
1485 }
1486 timer_mod(cpu->gt_timer[timeridx], nexttick);
1487 } else {
1488 /* Timer disabled: ISTATUS and timer output always clear */
1489 gt->ctl &= ~4;
1490 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1491 timer_del(cpu->gt_timer[timeridx]);
1492 }
1493 }
1494
1495 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1496 int timeridx)
1497 {
1498 ARMCPU *cpu = arm_env_get_cpu(env);
1499
1500 timer_del(cpu->gt_timer[timeridx]);
1501 }
1502
1503 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1504 {
1505 return gt_get_countervalue(env);
1506 }
1507
1508 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1509 {
1510 return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
1511 }
1512
1513 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1514 int timeridx,
1515 uint64_t value)
1516 {
1517 env->cp15.c14_timer[timeridx].cval = value;
1518 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1519 }
1520
1521 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1522 int timeridx)
1523 {
1524 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1525
1526 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1527 (gt_get_countervalue(env) - offset));
1528 }
1529
1530 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1531 int timeridx,
1532 uint64_t value)
1533 {
1534 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1535
1536 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1537 sextract64(value, 0, 32);
1538 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1539 }
1540
1541 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1542 int timeridx,
1543 uint64_t value)
1544 {
1545 ARMCPU *cpu = arm_env_get_cpu(env);
1546 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1547
1548 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1549 if ((oldval ^ value) & 1) {
1550 /* Enable toggled */
1551 gt_recalc_timer(cpu, timeridx);
1552 } else if ((oldval ^ value) & 2) {
1553 /* IMASK toggled: don't need to recalculate,
1554 * just set the interrupt line based on ISTATUS
1555 */
1556 qemu_set_irq(cpu->gt_timer_outputs[timeridx],
1557 (oldval & 4) && !(value & 2));
1558 }
1559 }
1560
1561 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1562 {
1563 gt_timer_reset(env, ri, GTIMER_PHYS);
1564 }
1565
1566 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1567 uint64_t value)
1568 {
1569 gt_cval_write(env, ri, GTIMER_PHYS, value);
1570 }
1571
1572 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1573 {
1574 return gt_tval_read(env, ri, GTIMER_PHYS);
1575 }
1576
1577 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1578 uint64_t value)
1579 {
1580 gt_tval_write(env, ri, GTIMER_PHYS, value);
1581 }
1582
1583 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1584 uint64_t value)
1585 {
1586 gt_ctl_write(env, ri, GTIMER_PHYS, value);
1587 }
1588
1589 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1590 {
1591 gt_timer_reset(env, ri, GTIMER_VIRT);
1592 }
1593
1594 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1595 uint64_t value)
1596 {
1597 gt_cval_write(env, ri, GTIMER_VIRT, value);
1598 }
1599
1600 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1601 {
1602 return gt_tval_read(env, ri, GTIMER_VIRT);
1603 }
1604
1605 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1606 uint64_t value)
1607 {
1608 gt_tval_write(env, ri, GTIMER_VIRT, value);
1609 }
1610
1611 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1612 uint64_t value)
1613 {
1614 gt_ctl_write(env, ri, GTIMER_VIRT, value);
1615 }
1616
1617 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1618 uint64_t value)
1619 {
1620 ARMCPU *cpu = arm_env_get_cpu(env);
1621
1622 raw_write(env, ri, value);
1623 gt_recalc_timer(cpu, GTIMER_VIRT);
1624 }
1625
1626 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1627 {
1628 gt_timer_reset(env, ri, GTIMER_HYP);
1629 }
1630
1631 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1632 uint64_t value)
1633 {
1634 gt_cval_write(env, ri, GTIMER_HYP, value);
1635 }
1636
1637 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1638 {
1639 return gt_tval_read(env, ri, GTIMER_HYP);
1640 }
1641
1642 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1643 uint64_t value)
1644 {
1645 gt_tval_write(env, ri, GTIMER_HYP, value);
1646 }
1647
1648 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1649 uint64_t value)
1650 {
1651 gt_ctl_write(env, ri, GTIMER_HYP, value);
1652 }
1653
1654 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1655 {
1656 gt_timer_reset(env, ri, GTIMER_SEC);
1657 }
1658
1659 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1660 uint64_t value)
1661 {
1662 gt_cval_write(env, ri, GTIMER_SEC, value);
1663 }
1664
1665 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1666 {
1667 return gt_tval_read(env, ri, GTIMER_SEC);
1668 }
1669
1670 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1671 uint64_t value)
1672 {
1673 gt_tval_write(env, ri, GTIMER_SEC, value);
1674 }
1675
1676 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1677 uint64_t value)
1678 {
1679 gt_ctl_write(env, ri, GTIMER_SEC, value);
1680 }
1681
1682 void arm_gt_ptimer_cb(void *opaque)
1683 {
1684 ARMCPU *cpu = opaque;
1685
1686 gt_recalc_timer(cpu, GTIMER_PHYS);
1687 }
1688
1689 void arm_gt_vtimer_cb(void *opaque)
1690 {
1691 ARMCPU *cpu = opaque;
1692
1693 gt_recalc_timer(cpu, GTIMER_VIRT);
1694 }
1695
1696 void arm_gt_htimer_cb(void *opaque)
1697 {
1698 ARMCPU *cpu = opaque;
1699
1700 gt_recalc_timer(cpu, GTIMER_HYP);
1701 }
1702
1703 void arm_gt_stimer_cb(void *opaque)
1704 {
1705 ARMCPU *cpu = opaque;
1706
1707 gt_recalc_timer(cpu, GTIMER_SEC);
1708 }
1709
1710 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1711 /* Note that CNTFRQ is purely reads-as-written for the benefit
1712 * of software; writing it doesn't actually change the timer frequency.
1713 * Our reset value matches the fixed frequency we implement the timer at.
1714 */
1715 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1716 .type = ARM_CP_ALIAS,
1717 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1718 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1719 },
1720 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1721 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1722 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1723 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1724 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
1725 },
1726 /* overall control: mostly access permissions */
1727 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
1728 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
1729 .access = PL1_RW,
1730 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
1731 .resetvalue = 0,
1732 },
1733 /* per-timer control */
1734 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1735 .secure = ARM_CP_SECSTATE_NS,
1736 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1737 .accessfn = gt_ptimer_access,
1738 .fieldoffset = offsetoflow32(CPUARMState,
1739 cp15.c14_timer[GTIMER_PHYS].ctl),
1740 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1741 },
1742 { .name = "CNTP_CTL(S)",
1743 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1744 .secure = ARM_CP_SECSTATE_S,
1745 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1746 .accessfn = gt_ptimer_access,
1747 .fieldoffset = offsetoflow32(CPUARMState,
1748 cp15.c14_timer[GTIMER_SEC].ctl),
1749 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
1750 },
1751 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
1752 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
1753 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1754 .accessfn = gt_ptimer_access,
1755 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
1756 .resetvalue = 0,
1757 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1758 },
1759 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
1760 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1761 .accessfn = gt_vtimer_access,
1762 .fieldoffset = offsetoflow32(CPUARMState,
1763 cp15.c14_timer[GTIMER_VIRT].ctl),
1764 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1765 },
1766 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
1767 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
1768 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1769 .accessfn = gt_vtimer_access,
1770 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
1771 .resetvalue = 0,
1772 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1773 },
1774 /* TimerValue views: a 32 bit downcounting view of the underlying state */
1775 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1776 .secure = ARM_CP_SECSTATE_NS,
1777 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1778 .accessfn = gt_ptimer_access,
1779 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1780 },
1781 { .name = "CNTP_TVAL(S)",
1782 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1783 .secure = ARM_CP_SECSTATE_S,
1784 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1785 .accessfn = gt_ptimer_access,
1786 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
1787 },
1788 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1789 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
1790 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1791 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
1792 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1793 },
1794 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
1795 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1796 .accessfn = gt_vtimer_access,
1797 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
1798 },
1799 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1800 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
1801 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1802 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
1803 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
1804 },
1805 /* The counter itself */
1806 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
1807 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1808 .accessfn = gt_pct_access,
1809 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
1810 },
1811 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
1812 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
1813 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1814 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
1815 },
1816 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
1817 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1818 .accessfn = gt_vct_access,
1819 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
1820 },
1821 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
1822 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
1823 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1824 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
1825 },
1826 /* Comparison value, indicating when the timer goes off */
1827 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
1828 .secure = ARM_CP_SECSTATE_NS,
1829 .access = PL1_RW | PL0_R,
1830 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1831 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1832 .accessfn = gt_ptimer_access,
1833 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
1834 },
1835 { .name = "CNTP_CVAL(S)", .cp = 15, .crm = 14, .opc1 = 2,
1836 .secure = ARM_CP_SECSTATE_S,
1837 .access = PL1_RW | PL0_R,
1838 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1839 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
1840 .accessfn = gt_ptimer_access,
1841 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
1842 },
1843 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1844 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
1845 .access = PL1_RW | PL0_R,
1846 .type = ARM_CP_IO,
1847 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1848 .resetvalue = 0, .accessfn = gt_ptimer_access,
1849 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
1850 },
1851 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
1852 .access = PL1_RW | PL0_R,
1853 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1854 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1855 .accessfn = gt_vtimer_access,
1856 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
1857 },
1858 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1859 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
1860 .access = PL1_RW | PL0_R,
1861 .type = ARM_CP_IO,
1862 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1863 .resetvalue = 0, .accessfn = gt_vtimer_access,
1864 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
1865 },
1866 /* Secure timer -- this is actually restricted to only EL3
1867 * and configurably Secure-EL1 via the accessfn.
1868 */
1869 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
1870 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
1871 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
1872 .accessfn = gt_stimer_access,
1873 .readfn = gt_sec_tval_read,
1874 .writefn = gt_sec_tval_write,
1875 .resetfn = gt_sec_timer_reset,
1876 },
1877 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
1878 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
1879 .type = ARM_CP_IO, .access = PL1_RW,
1880 .accessfn = gt_stimer_access,
1881 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
1882 .resetvalue = 0,
1883 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
1884 },
1885 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
1886 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
1887 .type = ARM_CP_IO, .access = PL1_RW,
1888 .accessfn = gt_stimer_access,
1889 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
1890 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
1891 },
1892 REGINFO_SENTINEL
1893 };
1894
1895 #else
1896 /* In user-mode none of the generic timer registers are accessible,
1897 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
1898 * so instead just don't register any of them.
1899 */
1900 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1901 REGINFO_SENTINEL
1902 };
1903
1904 #endif
1905
1906 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1907 {
1908 if (arm_feature(env, ARM_FEATURE_LPAE)) {
1909 raw_write(env, ri, value);
1910 } else if (arm_feature(env, ARM_FEATURE_V7)) {
1911 raw_write(env, ri, value & 0xfffff6ff);
1912 } else {
1913 raw_write(env, ri, value & 0xfffff1ff);
1914 }
1915 }
1916
1917 #ifndef CONFIG_USER_ONLY
1918 /* get_phys_addr() isn't present for user-mode-only targets */
1919
1920 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
1921 bool isread)
1922 {
1923 if (ri->opc2 & 4) {
1924 /* The ATS12NSO* operations must trap to EL3 if executed in
1925 * Secure EL1 (which can only happen if EL3 is AArch64).
1926 * They are simply UNDEF if executed from NS EL1.
1927 * They function normally from EL2 or EL3.
1928 */
1929 if (arm_current_el(env) == 1) {
1930 if (arm_is_secure_below_el3(env)) {
1931 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
1932 }
1933 return CP_ACCESS_TRAP_UNCATEGORIZED;
1934 }
1935 }
1936 return CP_ACCESS_OK;
1937 }
1938
1939 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
1940 int access_type, ARMMMUIdx mmu_idx)
1941 {
1942 hwaddr phys_addr;
1943 target_ulong page_size;
1944 int prot;
1945 uint32_t fsr;
1946 bool ret;
1947 uint64_t par64;
1948 MemTxAttrs attrs = {};
1949 ARMMMUFaultInfo fi = {};
1950
1951 ret = get_phys_addr(env, value, access_type, mmu_idx,
1952 &phys_addr, &attrs, &prot, &page_size, &fsr, &fi);
1953 if (extended_addresses_enabled(env)) {
1954 /* fsr is a DFSR/IFSR value for the long descriptor
1955 * translation table format, but with WnR always clear.
1956 * Convert it to a 64-bit PAR.
1957 */
1958 par64 = (1 << 11); /* LPAE bit always set */
1959 if (!ret) {
1960 par64 |= phys_addr & ~0xfffULL;
1961 if (!attrs.secure) {
1962 par64 |= (1 << 9); /* NS */
1963 }
1964 /* We don't set the ATTR or SH fields in the PAR. */
1965 } else {
1966 par64 |= 1; /* F */
1967 par64 |= (fsr & 0x3f) << 1; /* FS */
1968 /* Note that S2WLK and FSTAGE are always zero, because we don't
1969 * implement virtualization and therefore there can't be a stage 2
1970 * fault.
1971 */
1972 }
1973 } else {
1974 /* fsr is a DFSR/IFSR value for the short descriptor
1975 * translation table format (with WnR always clear).
1976 * Convert it to a 32-bit PAR.
1977 */
1978 if (!ret) {
1979 /* We do not set any attribute bits in the PAR */
1980 if (page_size == (1 << 24)
1981 && arm_feature(env, ARM_FEATURE_V7)) {
1982 par64 = (phys_addr & 0xff000000) | (1 << 1);
1983 } else {
1984 par64 = phys_addr & 0xfffff000;
1985 }
1986 if (!attrs.secure) {
1987 par64 |= (1 << 9); /* NS */
1988 }
1989 } else {
1990 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
1991 ((fsr & 0xf) << 1) | 1;
1992 }
1993 }
1994 return par64;
1995 }
1996
1997 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1998 {
1999 int access_type = ri->opc2 & 1;
2000 uint64_t par64;
2001 ARMMMUIdx mmu_idx;
2002 int el = arm_current_el(env);
2003 bool secure = arm_is_secure_below_el3(env);
2004
2005 switch (ri->opc2 & 6) {
2006 case 0:
2007 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2008 switch (el) {
2009 case 3:
2010 mmu_idx = ARMMMUIdx_S1E3;
2011 break;
2012 case 2:
2013 mmu_idx = ARMMMUIdx_S1NSE1;
2014 break;
2015 case 1:
2016 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2017 break;
2018 default:
2019 g_assert_not_reached();
2020 }
2021 break;
2022 case 2:
2023 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2024 switch (el) {
2025 case 3:
2026 mmu_idx = ARMMMUIdx_S1SE0;
2027 break;
2028 case 2:
2029 mmu_idx = ARMMMUIdx_S1NSE0;
2030 break;
2031 case 1:
2032 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2033 break;
2034 default:
2035 g_assert_not_reached();
2036 }
2037 break;
2038 case 4:
2039 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2040 mmu_idx = ARMMMUIdx_S12NSE1;
2041 break;
2042 case 6:
2043 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2044 mmu_idx = ARMMMUIdx_S12NSE0;
2045 break;
2046 default:
2047 g_assert_not_reached();
2048 }
2049
2050 par64 = do_ats_write(env, value, access_type, mmu_idx);
2051
2052 A32_BANKED_CURRENT_REG_SET(env, par, par64);
2053 }
2054
2055 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
2056 uint64_t value)
2057 {
2058 int access_type = ri->opc2 & 1;
2059 uint64_t par64;
2060
2061 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
2062
2063 A32_BANKED_CURRENT_REG_SET(env, par, par64);
2064 }
2065
2066 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
2067 bool isread)
2068 {
2069 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
2070 return CP_ACCESS_TRAP;
2071 }
2072 return CP_ACCESS_OK;
2073 }
2074
2075 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
2076 uint64_t value)
2077 {
2078 int access_type = ri->opc2 & 1;
2079 ARMMMUIdx mmu_idx;
2080 int secure = arm_is_secure_below_el3(env);
2081
2082 switch (ri->opc2 & 6) {
2083 case 0:
2084 switch (ri->opc1) {
2085 case 0: /* AT S1E1R, AT S1E1W */
2086 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2087 break;
2088 case 4: /* AT S1E2R, AT S1E2W */
2089 mmu_idx = ARMMMUIdx_S1E2;
2090 break;
2091 case 6: /* AT S1E3R, AT S1E3W */
2092 mmu_idx = ARMMMUIdx_S1E3;
2093 break;
2094 default:
2095 g_assert_not_reached();
2096 }
2097 break;
2098 case 2: /* AT S1E0R, AT S1E0W */
2099 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2100 break;
2101 case 4: /* AT S12E1R, AT S12E1W */
2102 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
2103 break;
2104 case 6: /* AT S12E0R, AT S12E0W */
2105 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
2106 break;
2107 default:
2108 g_assert_not_reached();
2109 }
2110
2111 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
2112 }
2113 #endif
2114
2115 static const ARMCPRegInfo vapa_cp_reginfo[] = {
2116 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
2117 .access = PL1_RW, .resetvalue = 0,
2118 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
2119 offsetoflow32(CPUARMState, cp15.par_ns) },
2120 .writefn = par_write },
2121 #ifndef CONFIG_USER_ONLY
2122 /* This underdecoding is safe because the reginfo is NO_RAW. */
2123 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
2124 .access = PL1_W, .accessfn = ats_access,
2125 .writefn = ats_write, .type = ARM_CP_NO_RAW },
2126 #endif
2127 REGINFO_SENTINEL
2128 };
2129
2130 /* Return basic MPU access permission bits. */
2131 static uint32_t simple_mpu_ap_bits(uint32_t val)
2132 {
2133 uint32_t ret;
2134 uint32_t mask;
2135 int i;
2136 ret = 0;
2137 mask = 3;
2138 for (i = 0; i < 16; i += 2) {
2139 ret |= (val >> i) & mask;
2140 mask <<= 2;
2141 }
2142 return ret;
2143 }
2144
2145 /* Pad basic MPU access permission bits to extended format. */
2146 static uint32_t extended_mpu_ap_bits(uint32_t val)
2147 {
2148 uint32_t ret;
2149 uint32_t mask;
2150 int i;
2151 ret = 0;
2152 mask = 3;
2153 for (i = 0; i < 16; i += 2) {
2154 ret |= (val & mask) << i;
2155 mask <<= 2;
2156 }
2157 return ret;
2158 }
2159
2160 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2161 uint64_t value)
2162 {
2163 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2164 }
2165
2166 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2167 {
2168 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2169 }
2170
2171 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2172 uint64_t value)
2173 {
2174 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2175 }
2176
2177 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2178 {
2179 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2180 }
2181
2182 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2183 {
2184 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2185
2186 if (!u32p) {
2187 return 0;
2188 }
2189
2190 u32p += env->cp15.c6_rgnr;
2191 return *u32p;
2192 }
2193
2194 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2195 uint64_t value)
2196 {
2197 ARMCPU *cpu = arm_env_get_cpu(env);
2198 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2199
2200 if (!u32p) {
2201 return;
2202 }
2203
2204 u32p += env->cp15.c6_rgnr;
2205 tlb_flush(CPU(cpu), 1); /* Mappings may have changed - purge! */
2206 *u32p = value;
2207 }
2208
2209 static void pmsav7_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2210 {
2211 ARMCPU *cpu = arm_env_get_cpu(env);
2212 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2213
2214 if (!u32p) {
2215 return;
2216 }
2217
2218 memset(u32p, 0, sizeof(*u32p) * cpu->pmsav7_dregion);
2219 }
2220
2221 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2222 uint64_t value)
2223 {
2224 ARMCPU *cpu = arm_env_get_cpu(env);
2225 uint32_t nrgs = cpu->pmsav7_dregion;
2226
2227 if (value >= nrgs) {
2228 qemu_log_mask(LOG_GUEST_ERROR,
2229 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2230 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2231 return;
2232 }
2233
2234 raw_write(env, ri, value);
2235 }
2236
2237 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2238 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2239 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2240 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2241 .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2242 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2243 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2244 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2245 .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2246 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2247 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2248 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2249 .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2250 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2251 .access = PL1_RW,
2252 .fieldoffset = offsetof(CPUARMState, cp15.c6_rgnr),
2253 .writefn = pmsav7_rgnr_write },
2254 REGINFO_SENTINEL
2255 };
2256
2257 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2258 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2259 .access = PL1_RW, .type = ARM_CP_ALIAS,
2260 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2261 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2262 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2263 .access = PL1_RW, .type = ARM_CP_ALIAS,
2264 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2265 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2266 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2267 .access = PL1_RW,
2268 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2269 .resetvalue = 0, },
2270 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2271 .access = PL1_RW,
2272 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2273 .resetvalue = 0, },
2274 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2275 .access = PL1_RW,
2276 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2277 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2278 .access = PL1_RW,
2279 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2280 /* Protection region base and size registers */
2281 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2282 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2283 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2284 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2285 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2286 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2287 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2288 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2289 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2290 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2291 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2292 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2293 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2294 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2295 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2296 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2297 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2298 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2299 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2300 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2301 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2302 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2303 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2304 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2305 REGINFO_SENTINEL
2306 };
2307
2308 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
2309 uint64_t value)
2310 {
2311 TCR *tcr = raw_ptr(env, ri);
2312 int maskshift = extract32(value, 0, 3);
2313
2314 if (!arm_feature(env, ARM_FEATURE_V8)) {
2315 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2316 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2317 * using Long-desciptor translation table format */
2318 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2319 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2320 /* In an implementation that includes the Security Extensions
2321 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2322 * Short-descriptor translation table format.
2323 */
2324 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2325 } else {
2326 value &= TTBCR_N;
2327 }
2328 }
2329
2330 /* Update the masks corresponding to the TCR bank being written
2331 * Note that we always calculate mask and base_mask, but
2332 * they are only used for short-descriptor tables (ie if EAE is 0);
2333 * for long-descriptor tables the TCR fields are used differently
2334 * and the mask and base_mask values are meaningless.
2335 */
2336 tcr->raw_tcr = value;
2337 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
2338 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
2339 }
2340
2341 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2342 uint64_t value)
2343 {
2344 ARMCPU *cpu = arm_env_get_cpu(env);
2345
2346 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2347 /* With LPAE the TTBCR could result in a change of ASID
2348 * via the TTBCR.A1 bit, so do a TLB flush.
2349 */
2350 tlb_flush(CPU(cpu), 1);
2351 }
2352 vmsa_ttbcr_raw_write(env, ri, value);
2353 }
2354
2355 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2356 {
2357 TCR *tcr = raw_ptr(env, ri);
2358
2359 /* Reset both the TCR as well as the masks corresponding to the bank of
2360 * the TCR being reset.
2361 */
2362 tcr->raw_tcr = 0;
2363 tcr->mask = 0;
2364 tcr->base_mask = 0xffffc000u;
2365 }
2366
2367 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2368 uint64_t value)
2369 {
2370 ARMCPU *cpu = arm_env_get_cpu(env);
2371 TCR *tcr = raw_ptr(env, ri);
2372
2373 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2374 tlb_flush(CPU(cpu), 1);
2375 tcr->raw_tcr = value;
2376 }
2377
2378 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2379 uint64_t value)
2380 {
2381 /* 64 bit accesses to the TTBRs can change the ASID and so we
2382 * must flush the TLB.
2383 */
2384 if (cpreg_field_is_64bit(ri)) {
2385 ARMCPU *cpu = arm_env_get_cpu(env);
2386
2387 tlb_flush(CPU(cpu), 1);
2388 }
2389 raw_write(env, ri, value);
2390 }
2391
2392 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2393 uint64_t value)
2394 {
2395 ARMCPU *cpu = arm_env_get_cpu(env);
2396 CPUState *cs = CPU(cpu);
2397
2398 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
2399 if (raw_read(env, ri) != value) {
2400 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
2401 ARMMMUIdx_S2NS, -1);
2402 raw_write(env, ri, value);
2403 }
2404 }
2405
2406 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2407 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2408 .access = PL1_RW, .type = ARM_CP_ALIAS,
2409 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2410 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2411 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2412 .access = PL1_RW, .resetvalue = 0,
2413 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2414 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2415 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2416 .access = PL1_RW, .resetvalue = 0,
2417 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2418 offsetof(CPUARMState, cp15.dfar_ns) } },
2419 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2420 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2421 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2422 .resetvalue = 0, },
2423 REGINFO_SENTINEL
2424 };
2425
2426 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2427 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2428 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2429 .access = PL1_RW,
2430 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2431 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2432 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2433 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2434 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2435 offsetof(CPUARMState, cp15.ttbr0_ns) } },
2436 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2437 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2438 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2439 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2440 offsetof(CPUARMState, cp15.ttbr1_ns) } },
2441 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2442 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2443 .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
2444 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2445 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2446 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2447 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2448 .raw_writefn = vmsa_ttbcr_raw_write,
2449 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2450 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2451 REGINFO_SENTINEL
2452 };
2453
2454 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2455 uint64_t value)
2456 {
2457 env->cp15.c15_ticonfig = value & 0xe7;
2458 /* The OS_TYPE bit in this register changes the reported CPUID! */
2459 env->cp15.c0_cpuid = (value & (1 << 5)) ?
2460 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2461 }
2462
2463 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2464 uint64_t value)
2465 {
2466 env->cp15.c15_threadid = value & 0xffff;
2467 }
2468
2469 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2470 uint64_t value)
2471 {
2472 /* Wait-for-interrupt (deprecated) */
2473 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
2474 }
2475
2476 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2477 uint64_t value)
2478 {
2479 /* On OMAP there are registers indicating the max/min index of dcache lines
2480 * containing a dirty line; cache flush operations have to reset these.
2481 */
2482 env->cp15.c15_i_max = 0x000;
2483 env->cp15.c15_i_min = 0xff0;
2484 }
2485
2486 static const ARMCPRegInfo omap_cp_reginfo[] = {
2487 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2488 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2489 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2490 .resetvalue = 0, },
2491 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2492 .access = PL1_RW, .type = ARM_CP_NOP },
2493 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2494 .access = PL1_RW,
2495 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2496 .writefn = omap_ticonfig_write },
2497 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2498 .access = PL1_RW,
2499 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2500 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2501 .access = PL1_RW, .resetvalue = 0xff0,
2502 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2503 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2504 .access = PL1_RW,
2505 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2506 .writefn = omap_threadid_write },
2507 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2508 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2509 .type = ARM_CP_NO_RAW,
2510 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2511 /* TODO: Peripheral port remap register:
2512 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2513 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2514 * when MMU is off.
2515 */
2516 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2517 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2518 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2519 .writefn = omap_cachemaint_write },
2520 { .name = "C9", .cp = 15, .crn = 9,
2521 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2522 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2523 REGINFO_SENTINEL
2524 };
2525
2526 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2527 uint64_t value)
2528 {
2529 env->cp15.c15_cpar = value & 0x3fff;
2530 }
2531
2532 static const ARMCPRegInfo xscale_cp_reginfo[] = {
2533 { .name = "XSCALE_CPAR",
2534 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2535 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2536 .writefn = xscale_cpar_write, },
2537 { .name = "XSCALE_AUXCR",
2538 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2539 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2540 .resetvalue = 0, },
2541 /* XScale specific cache-lockdown: since we have no cache we NOP these
2542 * and hope the guest does not really rely on cache behaviour.
2543 */
2544 { .name = "XSCALE_LOCK_ICACHE_LINE",
2545 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2546 .access = PL1_W, .type = ARM_CP_NOP },
2547 { .name = "XSCALE_UNLOCK_ICACHE",
2548 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2549 .access = PL1_W, .type = ARM_CP_NOP },
2550 { .name = "XSCALE_DCACHE_LOCK",
2551 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2552 .access = PL1_RW, .type = ARM_CP_NOP },
2553 { .name = "XSCALE_UNLOCK_DCACHE",
2554 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2555 .access = PL1_W, .type = ARM_CP_NOP },
2556 REGINFO_SENTINEL
2557 };
2558
2559 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2560 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2561 * implementation of this implementation-defined space.
2562 * Ideally this should eventually disappear in favour of actually
2563 * implementing the correct behaviour for all cores.
2564 */
2565 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2566 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2567 .access = PL1_RW,
2568 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2569 .resetvalue = 0 },
2570 REGINFO_SENTINEL
2571 };
2572
2573 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2574 /* Cache status: RAZ because we have no cache so it's always clean */
2575 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2576 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2577 .resetvalue = 0 },
2578 REGINFO_SENTINEL
2579 };
2580
2581 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2582 /* We never have a a block transfer operation in progress */
2583 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2584 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2585 .resetvalue = 0 },
2586 /* The cache ops themselves: these all NOP for QEMU */
2587 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2588 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2589 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2590 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2591 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2592 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2593 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2594 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2595 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2596 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2597 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2598 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2599 REGINFO_SENTINEL
2600 };
2601
2602 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2603 /* The cache test-and-clean instructions always return (1 << 30)
2604 * to indicate that there are no dirty cache lines.
2605 */
2606 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2607 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2608 .resetvalue = (1 << 30) },
2609 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2610 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2611 .resetvalue = (1 << 30) },
2612 REGINFO_SENTINEL
2613 };
2614
2615 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2616 /* Ignore ReadBuffer accesses */
2617 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
2618 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2619 .access = PL1_RW, .resetvalue = 0,
2620 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
2621 REGINFO_SENTINEL
2622 };
2623
2624 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2625 {
2626 ARMCPU *cpu = arm_env_get_cpu(env);
2627 unsigned int cur_el = arm_current_el(env);
2628 bool secure = arm_is_secure(env);
2629
2630 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2631 return env->cp15.vpidr_el2;
2632 }
2633 return raw_read(env, ri);
2634 }
2635
2636 static uint64_t mpidr_read_val(CPUARMState *env)
2637 {
2638 ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
2639 uint64_t mpidr = cpu->mp_affinity;
2640
2641 if (arm_feature(env, ARM_FEATURE_V7MP)) {
2642 mpidr |= (1U << 31);
2643 /* Cores which are uniprocessor (non-coherent)
2644 * but still implement the MP extensions set
2645 * bit 30. (For instance, Cortex-R5).
2646 */
2647 if (cpu->mp_is_up) {
2648 mpidr |= (1u << 30);
2649 }
2650 }
2651 return mpidr;
2652 }
2653
2654 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2655 {
2656 unsigned int cur_el = arm_current_el(env);
2657 bool secure = arm_is_secure(env);
2658
2659 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2660 return env->cp15.vmpidr_el2;
2661 }
2662 return mpidr_read_val(env);
2663 }
2664
2665 static const ARMCPRegInfo mpidr_cp_reginfo[] = {
2666 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
2667 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
2668 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
2669 REGINFO_SENTINEL
2670 };
2671
2672 static const ARMCPRegInfo lpae_cp_reginfo[] = {
2673 /* NOP AMAIR0/1 */
2674 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
2675 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
2676 .access = PL1_RW, .type = ARM_CP_CONST,
2677 .resetvalue = 0 },
2678 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2679 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
2680 .access = PL1_RW, .type = ARM_CP_CONST,
2681 .resetvalue = 0 },
2682 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
2683 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
2684 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
2685 offsetof(CPUARMState, cp15.par_ns)} },
2686 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
2687 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2688 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2689 offsetof(CPUARMState, cp15.ttbr0_ns) },
2690 .writefn = vmsa_ttbr_write, },
2691 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
2692 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2693 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2694 offsetof(CPUARMState, cp15.ttbr1_ns) },
2695 .writefn = vmsa_ttbr_write, },
2696 REGINFO_SENTINEL
2697 };
2698
2699 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2700 {
2701 return vfp_get_fpcr(env);
2702 }
2703
2704 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2705 uint64_t value)
2706 {
2707 vfp_set_fpcr(env, value);
2708 }
2709
2710 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2711 {
2712 return vfp_get_fpsr(env);
2713 }
2714
2715 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2716 uint64_t value)
2717 {
2718 vfp_set_fpsr(env, value);
2719 }
2720
2721 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
2722 bool isread)
2723 {
2724 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
2725 return CP_ACCESS_TRAP;
2726 }
2727 return CP_ACCESS_OK;
2728 }
2729
2730 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
2731 uint64_t value)
2732 {
2733 env->daif = value & PSTATE_DAIF;
2734 }
2735
2736 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
2737 const ARMCPRegInfo *ri,
2738 bool isread)
2739 {
2740 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
2741 * SCTLR_EL1.UCI is set.
2742 */
2743 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
2744 return CP_ACCESS_TRAP;
2745 }
2746 return CP_ACCESS_OK;
2747 }
2748
2749 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
2750 * Page D4-1736 (DDI0487A.b)
2751 */
2752
2753 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2754 uint64_t value)
2755 {
2756 ARMCPU *cpu = arm_env_get_cpu(env);
2757 CPUState *cs = CPU(cpu);
2758
2759 if (arm_is_secure_below_el3(env)) {
2760 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2761 } else {
2762 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
2763 }
2764 }
2765
2766 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2767 uint64_t value)
2768 {
2769 bool sec = arm_is_secure_below_el3(env);
2770 CPUState *other_cs;
2771
2772 CPU_FOREACH(other_cs) {
2773 if (sec) {
2774 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2775 } else {
2776 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
2777 ARMMMUIdx_S12NSE0, -1);
2778 }
2779 }
2780 }
2781
2782 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2783 uint64_t value)
2784 {
2785 /* Note that the 'ALL' scope must invalidate both stage 1 and
2786 * stage 2 translations, whereas most other scopes only invalidate
2787 * stage 1 translations.
2788 */
2789 ARMCPU *cpu = arm_env_get_cpu(env);
2790 CPUState *cs = CPU(cpu);
2791
2792 if (arm_is_secure_below_el3(env)) {
2793 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2794 } else {
2795 if (arm_feature(env, ARM_FEATURE_EL2)) {
2796 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
2797 ARMMMUIdx_S2NS, -1);
2798 } else {
2799 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
2800 }
2801 }
2802 }
2803
2804 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
2805 uint64_t value)
2806 {
2807 ARMCPU *cpu = arm_env_get_cpu(env);
2808 CPUState *cs = CPU(cpu);
2809
2810 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1);
2811 }
2812
2813 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
2814 uint64_t value)
2815 {
2816 ARMCPU *cpu = arm_env_get_cpu(env);
2817 CPUState *cs = CPU(cpu);
2818
2819 tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E3, -1);
2820 }
2821
2822 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2823 uint64_t value)
2824 {
2825 /* Note that the 'ALL' scope must invalidate both stage 1 and
2826 * stage 2 translations, whereas most other scopes only invalidate
2827 * stage 1 translations.
2828 */
2829 bool sec = arm_is_secure_below_el3(env);
2830 bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
2831 CPUState *other_cs;
2832
2833 CPU_FOREACH(other_cs) {
2834 if (sec) {
2835 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2836 } else if (has_el2) {
2837 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
2838 ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1);
2839 } else {
2840 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
2841 ARMMMUIdx_S12NSE0, -1);
2842 }
2843 }
2844 }
2845
2846 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2847 uint64_t value)
2848 {
2849 CPUState *other_cs;
2850
2851 CPU_FOREACH(other_cs) {
2852 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1);
2853 }
2854 }
2855
2856 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2857 uint64_t value)
2858 {
2859 CPUState *other_cs;
2860
2861 CPU_FOREACH(other_cs) {
2862 tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E3, -1);
2863 }
2864 }
2865
2866 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2867 uint64_t value)
2868 {
2869 /* Invalidate by VA, EL1&0 (AArch64 version).
2870 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
2871 * since we don't support flush-for-specific-ASID-only or
2872 * flush-last-level-only.
2873 */
2874 ARMCPU *cpu = arm_env_get_cpu(env);
2875 CPUState *cs = CPU(cpu);
2876 uint64_t pageaddr = sextract64(value << 12, 0, 56);
2877
2878 if (arm_is_secure_below_el3(env)) {
2879 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1SE1,
2880 ARMMMUIdx_S1SE0, -1);
2881 } else {
2882 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S12NSE1,
2883 ARMMMUIdx_S12NSE0, -1);
2884 }
2885 }
2886
2887 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
2888 uint64_t value)
2889 {
2890 /* Invalidate by VA, EL2
2891 * Currently handles both VAE2 and VALE2, since we don't support
2892 * flush-last-level-only.
2893 */
2894 ARMCPU *cpu = arm_env_get_cpu(env);
2895 CPUState *cs = CPU(cpu);
2896 uint64_t pageaddr = sextract64(value << 12, 0, 56);
2897
2898 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1);
2899 }
2900
2901 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
2902 uint64_t value)
2903 {
2904 /* Invalidate by VA, EL3
2905 * Currently handles both VAE3 and VALE3, since we don't support
2906 * flush-last-level-only.
2907 */
2908 ARMCPU *cpu = arm_env_get_cpu(env);
2909 CPUState *cs = CPU(cpu);
2910 uint64_t pageaddr = sextract64(value << 12, 0, 56);
2911
2912 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E3, -1);
2913 }
2914
2915 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2916 uint64_t value)
2917 {
2918 bool sec = arm_is_secure_below_el3(env);
2919 CPUState *other_cs;
2920 uint64_t pageaddr = sextract64(value << 12, 0, 56);
2921
2922 CPU_FOREACH(other_cs) {
2923 if (sec) {
2924 tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1SE1,
2925 ARMMMUIdx_S1SE0, -1);
2926 } else {
2927 tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S12NSE1,
2928 ARMMMUIdx_S12NSE0, -1);
2929 }
2930 }
2931 }
2932
2933 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2934 uint64_t value)
2935 {
2936 CPUState *other_cs;
2937 uint64_t pageaddr = sextract64(value << 12, 0, 56);
2938
2939 CPU_FOREACH(other_cs) {
2940 tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1);
2941 }
2942 }
2943
2944 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2945 uint64_t value)
2946 {
2947 CPUState *other_cs;
2948 uint64_t pageaddr = sextract64(value << 12, 0, 56);
2949
2950 CPU_FOREACH(other_cs) {
2951 tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E3, -1);
2952 }
2953 }
2954
2955 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2956 uint64_t value)
2957 {
2958 /* Invalidate by IPA. This has to invalidate any structures that
2959 * contain only stage 2 translation information, but does not need
2960 * to apply to structures that contain combined stage 1 and stage 2
2961 * translation information.
2962 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
2963 */
2964 ARMCPU *cpu = arm_env_get_cpu(env);
2965 CPUState *cs = CPU(cpu);
2966 uint64_t pageaddr;
2967
2968 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
2969 return;
2970 }
2971
2972 pageaddr = sextract64(value << 12, 0, 48);
2973
2974 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1);
2975 }
2976
2977 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2978 uint64_t value)
2979 {
2980 CPUState *other_cs;
2981 uint64_t pageaddr;
2982
2983 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
2984 return;
2985 }
2986