target-arm: Fix broken SCTLR_EL3 reset
[qemu.git] / target-arm / helper.c
1 #include "cpu.h"
2 #include "internals.h"
3 #include "exec/gdbstub.h"
4 #include "exec/helper-proto.h"
5 #include "qemu/host-utils.h"
6 #include "sysemu/arch_init.h"
7 #include "sysemu/sysemu.h"
8 #include "qemu/bitops.h"
9 #include "qemu/crc32c.h"
10 #include "exec/cpu_ldst.h"
11 #include "arm_ldst.h"
12 #include <zlib.h> /* For crc32 */
13 #include "exec/semihost.h"
14
15 #ifndef CONFIG_USER_ONLY
16 static inline bool get_phys_addr(CPUARMState *env, target_ulong address,
17 int access_type, ARMMMUIdx mmu_idx,
18 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
19 target_ulong *page_size, uint32_t *fsr);
20
21 /* Definitions for the PMCCNTR and PMCR registers */
22 #define PMCRD 0x8
23 #define PMCRC 0x4
24 #define PMCRE 0x1
25 #endif
26
27 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
28 {
29 int nregs;
30
31 /* VFP data registers are always little-endian. */
32 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
33 if (reg < nregs) {
34 stfq_le_p(buf, env->vfp.regs[reg]);
35 return 8;
36 }
37 if (arm_feature(env, ARM_FEATURE_NEON)) {
38 /* Aliases for Q regs. */
39 nregs += 16;
40 if (reg < nregs) {
41 stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
42 stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
43 return 16;
44 }
45 }
46 switch (reg - nregs) {
47 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
48 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
49 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
50 }
51 return 0;
52 }
53
54 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
55 {
56 int nregs;
57
58 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
59 if (reg < nregs) {
60 env->vfp.regs[reg] = ldfq_le_p(buf);
61 return 8;
62 }
63 if (arm_feature(env, ARM_FEATURE_NEON)) {
64 nregs += 16;
65 if (reg < nregs) {
66 env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
67 env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
68 return 16;
69 }
70 }
71 switch (reg - nregs) {
72 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
73 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
74 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
75 }
76 return 0;
77 }
78
79 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
80 {
81 switch (reg) {
82 case 0 ... 31:
83 /* 128 bit FP register */
84 stfq_le_p(buf, env->vfp.regs[reg * 2]);
85 stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
86 return 16;
87 case 32:
88 /* FPSR */
89 stl_p(buf, vfp_get_fpsr(env));
90 return 4;
91 case 33:
92 /* FPCR */
93 stl_p(buf, vfp_get_fpcr(env));
94 return 4;
95 default:
96 return 0;
97 }
98 }
99
100 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
101 {
102 switch (reg) {
103 case 0 ... 31:
104 /* 128 bit FP register */
105 env->vfp.regs[reg * 2] = ldfq_le_p(buf);
106 env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
107 return 16;
108 case 32:
109 /* FPSR */
110 vfp_set_fpsr(env, ldl_p(buf));
111 return 4;
112 case 33:
113 /* FPCR */
114 vfp_set_fpcr(env, ldl_p(buf));
115 return 4;
116 default:
117 return 0;
118 }
119 }
120
121 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
122 {
123 assert(ri->fieldoffset);
124 if (cpreg_field_is_64bit(ri)) {
125 return CPREG_FIELD64(env, ri);
126 } else {
127 return CPREG_FIELD32(env, ri);
128 }
129 }
130
131 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
132 uint64_t value)
133 {
134 assert(ri->fieldoffset);
135 if (cpreg_field_is_64bit(ri)) {
136 CPREG_FIELD64(env, ri) = value;
137 } else {
138 CPREG_FIELD32(env, ri) = value;
139 }
140 }
141
142 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
143 {
144 return (char *)env + ri->fieldoffset;
145 }
146
147 static uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
148 {
149 /* Raw read of a coprocessor register (as needed for migration, etc). */
150 if (ri->type & ARM_CP_CONST) {
151 return ri->resetvalue;
152 } else if (ri->raw_readfn) {
153 return ri->raw_readfn(env, ri);
154 } else if (ri->readfn) {
155 return ri->readfn(env, ri);
156 } else {
157 return raw_read(env, ri);
158 }
159 }
160
161 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
162 uint64_t v)
163 {
164 /* Raw write of a coprocessor register (as needed for migration, etc).
165 * Note that constant registers are treated as write-ignored; the
166 * caller should check for success by whether a readback gives the
167 * value written.
168 */
169 if (ri->type & ARM_CP_CONST) {
170 return;
171 } else if (ri->raw_writefn) {
172 ri->raw_writefn(env, ri, v);
173 } else if (ri->writefn) {
174 ri->writefn(env, ri, v);
175 } else {
176 raw_write(env, ri, v);
177 }
178 }
179
180 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
181 {
182 /* Return true if the regdef would cause an assertion if you called
183 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
184 * program bug for it not to have the NO_RAW flag).
185 * NB that returning false here doesn't necessarily mean that calling
186 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
187 * read/write access functions which are safe for raw use" from "has
188 * read/write access functions which have side effects but has forgotten
189 * to provide raw access functions".
190 * The tests here line up with the conditions in read/write_raw_cp_reg()
191 * and assertions in raw_read()/raw_write().
192 */
193 if ((ri->type & ARM_CP_CONST) ||
194 ri->fieldoffset ||
195 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
196 return false;
197 }
198 return true;
199 }
200
201 bool write_cpustate_to_list(ARMCPU *cpu)
202 {
203 /* Write the coprocessor state from cpu->env to the (index,value) list. */
204 int i;
205 bool ok = true;
206
207 for (i = 0; i < cpu->cpreg_array_len; i++) {
208 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
209 const ARMCPRegInfo *ri;
210
211 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
212 if (!ri) {
213 ok = false;
214 continue;
215 }
216 if (ri->type & ARM_CP_NO_RAW) {
217 continue;
218 }
219 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
220 }
221 return ok;
222 }
223
224 bool write_list_to_cpustate(ARMCPU *cpu)
225 {
226 int i;
227 bool ok = true;
228
229 for (i = 0; i < cpu->cpreg_array_len; i++) {
230 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
231 uint64_t v = cpu->cpreg_values[i];
232 const ARMCPRegInfo *ri;
233
234 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
235 if (!ri) {
236 ok = false;
237 continue;
238 }
239 if (ri->type & ARM_CP_NO_RAW) {
240 continue;
241 }
242 /* Write value and confirm it reads back as written
243 * (to catch read-only registers and partially read-only
244 * registers where the incoming migration value doesn't match)
245 */
246 write_raw_cp_reg(&cpu->env, ri, v);
247 if (read_raw_cp_reg(&cpu->env, ri) != v) {
248 ok = false;
249 }
250 }
251 return ok;
252 }
253
254 static void add_cpreg_to_list(gpointer key, gpointer opaque)
255 {
256 ARMCPU *cpu = opaque;
257 uint64_t regidx;
258 const ARMCPRegInfo *ri;
259
260 regidx = *(uint32_t *)key;
261 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
262
263 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
264 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
265 /* The value array need not be initialized at this point */
266 cpu->cpreg_array_len++;
267 }
268 }
269
270 static void count_cpreg(gpointer key, gpointer opaque)
271 {
272 ARMCPU *cpu = opaque;
273 uint64_t regidx;
274 const ARMCPRegInfo *ri;
275
276 regidx = *(uint32_t *)key;
277 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
278
279 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
280 cpu->cpreg_array_len++;
281 }
282 }
283
284 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
285 {
286 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
287 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
288
289 if (aidx > bidx) {
290 return 1;
291 }
292 if (aidx < bidx) {
293 return -1;
294 }
295 return 0;
296 }
297
298 void init_cpreg_list(ARMCPU *cpu)
299 {
300 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
301 * Note that we require cpreg_tuples[] to be sorted by key ID.
302 */
303 GList *keys;
304 int arraylen;
305
306 keys = g_hash_table_get_keys(cpu->cp_regs);
307 keys = g_list_sort(keys, cpreg_key_compare);
308
309 cpu->cpreg_array_len = 0;
310
311 g_list_foreach(keys, count_cpreg, cpu);
312
313 arraylen = cpu->cpreg_array_len;
314 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
315 cpu->cpreg_values = g_new(uint64_t, arraylen);
316 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
317 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
318 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
319 cpu->cpreg_array_len = 0;
320
321 g_list_foreach(keys, add_cpreg_to_list, cpu);
322
323 assert(cpu->cpreg_array_len == arraylen);
324
325 g_list_free(keys);
326 }
327
328 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
329 {
330 ARMCPU *cpu = arm_env_get_cpu(env);
331
332 raw_write(env, ri, value);
333 tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
334 }
335
336 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
337 {
338 ARMCPU *cpu = arm_env_get_cpu(env);
339
340 if (raw_read(env, ri) != value) {
341 /* Unlike real hardware the qemu TLB uses virtual addresses,
342 * not modified virtual addresses, so this causes a TLB flush.
343 */
344 tlb_flush(CPU(cpu), 1);
345 raw_write(env, ri, value);
346 }
347 }
348
349 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
350 uint64_t value)
351 {
352 ARMCPU *cpu = arm_env_get_cpu(env);
353
354 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU)
355 && !extended_addresses_enabled(env)) {
356 /* For VMSA (when not using the LPAE long descriptor page table
357 * format) this register includes the ASID, so do a TLB flush.
358 * For PMSA it is purely a process ID and no action is needed.
359 */
360 tlb_flush(CPU(cpu), 1);
361 }
362 raw_write(env, ri, value);
363 }
364
365 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
366 uint64_t value)
367 {
368 /* Invalidate all (TLBIALL) */
369 ARMCPU *cpu = arm_env_get_cpu(env);
370
371 tlb_flush(CPU(cpu), 1);
372 }
373
374 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
375 uint64_t value)
376 {
377 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
378 ARMCPU *cpu = arm_env_get_cpu(env);
379
380 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
381 }
382
383 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
384 uint64_t value)
385 {
386 /* Invalidate by ASID (TLBIASID) */
387 ARMCPU *cpu = arm_env_get_cpu(env);
388
389 tlb_flush(CPU(cpu), value == 0);
390 }
391
392 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
393 uint64_t value)
394 {
395 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
396 ARMCPU *cpu = arm_env_get_cpu(env);
397
398 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
399 }
400
401 /* IS variants of TLB operations must affect all cores */
402 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
403 uint64_t value)
404 {
405 CPUState *other_cs;
406
407 CPU_FOREACH(other_cs) {
408 tlb_flush(other_cs, 1);
409 }
410 }
411
412 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
413 uint64_t value)
414 {
415 CPUState *other_cs;
416
417 CPU_FOREACH(other_cs) {
418 tlb_flush(other_cs, value == 0);
419 }
420 }
421
422 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
423 uint64_t value)
424 {
425 CPUState *other_cs;
426
427 CPU_FOREACH(other_cs) {
428 tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
429 }
430 }
431
432 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
433 uint64_t value)
434 {
435 CPUState *other_cs;
436
437 CPU_FOREACH(other_cs) {
438 tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
439 }
440 }
441
442 static const ARMCPRegInfo cp_reginfo[] = {
443 /* Define the secure and non-secure FCSE identifier CP registers
444 * separately because there is no secure bank in V8 (no _EL3). This allows
445 * the secure register to be properly reset and migrated. There is also no
446 * v8 EL1 version of the register so the non-secure instance stands alone.
447 */
448 { .name = "FCSEIDR(NS)",
449 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
450 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
451 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
452 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
453 { .name = "FCSEIDR(S)",
454 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
455 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
456 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
457 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
458 /* Define the secure and non-secure context identifier CP registers
459 * separately because there is no secure bank in V8 (no _EL3). This allows
460 * the secure register to be properly reset and migrated. In the
461 * non-secure case, the 32-bit register will have reset and migration
462 * disabled during registration as it is handled by the 64-bit instance.
463 */
464 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
465 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
466 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
467 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
468 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
469 { .name = "CONTEXTIDR(S)", .state = ARM_CP_STATE_AA32,
470 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
471 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
472 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
473 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
474 REGINFO_SENTINEL
475 };
476
477 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
478 /* NB: Some of these registers exist in v8 but with more precise
479 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
480 */
481 /* MMU Domain access control / MPU write buffer control */
482 { .name = "DACR",
483 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
484 .access = PL1_RW, .resetvalue = 0,
485 .writefn = dacr_write, .raw_writefn = raw_write,
486 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
487 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
488 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
489 * For v6 and v5, these mappings are overly broad.
490 */
491 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
492 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
493 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
494 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
495 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
496 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
497 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
498 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
499 /* Cache maintenance ops; some of this space may be overridden later. */
500 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
501 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
502 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
503 REGINFO_SENTINEL
504 };
505
506 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
507 /* Not all pre-v6 cores implemented this WFI, so this is slightly
508 * over-broad.
509 */
510 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
511 .access = PL1_W, .type = ARM_CP_WFI },
512 REGINFO_SENTINEL
513 };
514
515 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
516 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
517 * is UNPREDICTABLE; we choose to NOP as most implementations do).
518 */
519 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
520 .access = PL1_W, .type = ARM_CP_WFI },
521 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
522 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
523 * OMAPCP will override this space.
524 */
525 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
526 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
527 .resetvalue = 0 },
528 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
529 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
530 .resetvalue = 0 },
531 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
532 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
533 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
534 .resetvalue = 0 },
535 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
536 * implementing it as RAZ means the "debug architecture version" bits
537 * will read as a reserved value, which should cause Linux to not try
538 * to use the debug hardware.
539 */
540 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
541 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
542 /* MMU TLB control. Note that the wildcarding means we cover not just
543 * the unified TLB ops but also the dside/iside/inner-shareable variants.
544 */
545 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
546 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
547 .type = ARM_CP_NO_RAW },
548 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
549 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
550 .type = ARM_CP_NO_RAW },
551 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
552 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
553 .type = ARM_CP_NO_RAW },
554 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
555 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
556 .type = ARM_CP_NO_RAW },
557 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
558 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
559 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
560 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
561 REGINFO_SENTINEL
562 };
563
564 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
565 uint64_t value)
566 {
567 uint32_t mask = 0;
568
569 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
570 if (!arm_feature(env, ARM_FEATURE_V8)) {
571 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
572 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
573 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
574 */
575 if (arm_feature(env, ARM_FEATURE_VFP)) {
576 /* VFP coprocessor: cp10 & cp11 [23:20] */
577 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
578
579 if (!arm_feature(env, ARM_FEATURE_NEON)) {
580 /* ASEDIS [31] bit is RAO/WI */
581 value |= (1 << 31);
582 }
583
584 /* VFPv3 and upwards with NEON implement 32 double precision
585 * registers (D0-D31).
586 */
587 if (!arm_feature(env, ARM_FEATURE_NEON) ||
588 !arm_feature(env, ARM_FEATURE_VFP3)) {
589 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
590 value |= (1 << 30);
591 }
592 }
593 value &= mask;
594 }
595 env->cp15.cpacr_el1 = value;
596 }
597
598 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri)
599 {
600 if (arm_feature(env, ARM_FEATURE_V8)) {
601 /* Check if CPACR accesses are to be trapped to EL2 */
602 if (arm_current_el(env) == 1 &&
603 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
604 return CP_ACCESS_TRAP_EL2;
605 /* Check if CPACR accesses are to be trapped to EL3 */
606 } else if (arm_current_el(env) < 3 &&
607 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
608 return CP_ACCESS_TRAP_EL3;
609 }
610 }
611
612 return CP_ACCESS_OK;
613 }
614
615 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri)
616 {
617 /* Check if CPTR accesses are set to trap to EL3 */
618 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
619 return CP_ACCESS_TRAP_EL3;
620 }
621
622 return CP_ACCESS_OK;
623 }
624
625 static const ARMCPRegInfo v6_cp_reginfo[] = {
626 /* prefetch by MVA in v6, NOP in v7 */
627 { .name = "MVA_prefetch",
628 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
629 .access = PL1_W, .type = ARM_CP_NOP },
630 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
631 .access = PL0_W, .type = ARM_CP_NOP },
632 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
633 .access = PL0_W, .type = ARM_CP_NOP },
634 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
635 .access = PL0_W, .type = ARM_CP_NOP },
636 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
637 .access = PL1_RW,
638 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
639 offsetof(CPUARMState, cp15.ifar_ns) },
640 .resetvalue = 0, },
641 /* Watchpoint Fault Address Register : should actually only be present
642 * for 1136, 1176, 11MPCore.
643 */
644 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
645 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
646 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
647 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
648 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
649 .resetvalue = 0, .writefn = cpacr_write },
650 REGINFO_SENTINEL
651 };
652
653 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri)
654 {
655 /* Performance monitor registers user accessibility is controlled
656 * by PMUSERENR.
657 */
658 if (arm_current_el(env) == 0 && !env->cp15.c9_pmuserenr) {
659 return CP_ACCESS_TRAP;
660 }
661 return CP_ACCESS_OK;
662 }
663
664 #ifndef CONFIG_USER_ONLY
665
666 static inline bool arm_ccnt_enabled(CPUARMState *env)
667 {
668 /* This does not support checking PMCCFILTR_EL0 register */
669
670 if (!(env->cp15.c9_pmcr & PMCRE)) {
671 return false;
672 }
673
674 return true;
675 }
676
677 void pmccntr_sync(CPUARMState *env)
678 {
679 uint64_t temp_ticks;
680
681 temp_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
682 get_ticks_per_sec(), 1000000);
683
684 if (env->cp15.c9_pmcr & PMCRD) {
685 /* Increment once every 64 processor clock cycles */
686 temp_ticks /= 64;
687 }
688
689 if (arm_ccnt_enabled(env)) {
690 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
691 }
692 }
693
694 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
695 uint64_t value)
696 {
697 pmccntr_sync(env);
698
699 if (value & PMCRC) {
700 /* The counter has been reset */
701 env->cp15.c15_ccnt = 0;
702 }
703
704 /* only the DP, X, D and E bits are writable */
705 env->cp15.c9_pmcr &= ~0x39;
706 env->cp15.c9_pmcr |= (value & 0x39);
707
708 pmccntr_sync(env);
709 }
710
711 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
712 {
713 uint64_t total_ticks;
714
715 if (!arm_ccnt_enabled(env)) {
716 /* Counter is disabled, do not change value */
717 return env->cp15.c15_ccnt;
718 }
719
720 total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
721 get_ticks_per_sec(), 1000000);
722
723 if (env->cp15.c9_pmcr & PMCRD) {
724 /* Increment once every 64 processor clock cycles */
725 total_ticks /= 64;
726 }
727 return total_ticks - env->cp15.c15_ccnt;
728 }
729
730 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
731 uint64_t value)
732 {
733 uint64_t total_ticks;
734
735 if (!arm_ccnt_enabled(env)) {
736 /* Counter is disabled, set the absolute value */
737 env->cp15.c15_ccnt = value;
738 return;
739 }
740
741 total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
742 get_ticks_per_sec(), 1000000);
743
744 if (env->cp15.c9_pmcr & PMCRD) {
745 /* Increment once every 64 processor clock cycles */
746 total_ticks /= 64;
747 }
748 env->cp15.c15_ccnt = total_ticks - value;
749 }
750
751 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
752 uint64_t value)
753 {
754 uint64_t cur_val = pmccntr_read(env, NULL);
755
756 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
757 }
758
759 #else /* CONFIG_USER_ONLY */
760
761 void pmccntr_sync(CPUARMState *env)
762 {
763 }
764
765 #endif
766
767 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
768 uint64_t value)
769 {
770 pmccntr_sync(env);
771 env->cp15.pmccfiltr_el0 = value & 0x7E000000;
772 pmccntr_sync(env);
773 }
774
775 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
776 uint64_t value)
777 {
778 value &= (1 << 31);
779 env->cp15.c9_pmcnten |= value;
780 }
781
782 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
783 uint64_t value)
784 {
785 value &= (1 << 31);
786 env->cp15.c9_pmcnten &= ~value;
787 }
788
789 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
790 uint64_t value)
791 {
792 env->cp15.c9_pmovsr &= ~value;
793 }
794
795 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
796 uint64_t value)
797 {
798 env->cp15.c9_pmxevtyper = value & 0xff;
799 }
800
801 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
802 uint64_t value)
803 {
804 env->cp15.c9_pmuserenr = value & 1;
805 }
806
807 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
808 uint64_t value)
809 {
810 /* We have no event counters so only the C bit can be changed */
811 value &= (1 << 31);
812 env->cp15.c9_pminten |= value;
813 }
814
815 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
816 uint64_t value)
817 {
818 value &= (1 << 31);
819 env->cp15.c9_pminten &= ~value;
820 }
821
822 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
823 uint64_t value)
824 {
825 /* Note that even though the AArch64 view of this register has bits
826 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
827 * architectural requirements for bits which are RES0 only in some
828 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
829 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
830 */
831 raw_write(env, ri, value & ~0x1FULL);
832 }
833
834 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
835 {
836 /* We only mask off bits that are RES0 both for AArch64 and AArch32.
837 * For bits that vary between AArch32/64, code needs to check the
838 * current execution mode before directly using the feature bit.
839 */
840 uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
841
842 if (!arm_feature(env, ARM_FEATURE_EL2)) {
843 valid_mask &= ~SCR_HCE;
844
845 /* On ARMv7, SMD (or SCD as it is called in v7) is only
846 * supported if EL2 exists. The bit is UNK/SBZP when
847 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
848 * when EL2 is unavailable.
849 * On ARMv8, this bit is always available.
850 */
851 if (arm_feature(env, ARM_FEATURE_V7) &&
852 !arm_feature(env, ARM_FEATURE_V8)) {
853 valid_mask &= ~SCR_SMD;
854 }
855 }
856
857 /* Clear all-context RES0 bits. */
858 value &= valid_mask;
859 raw_write(env, ri, value);
860 }
861
862 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
863 {
864 ARMCPU *cpu = arm_env_get_cpu(env);
865
866 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
867 * bank
868 */
869 uint32_t index = A32_BANKED_REG_GET(env, csselr,
870 ri->secure & ARM_CP_SECSTATE_S);
871
872 return cpu->ccsidr[index];
873 }
874
875 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
876 uint64_t value)
877 {
878 raw_write(env, ri, value & 0xf);
879 }
880
881 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
882 {
883 CPUState *cs = ENV_GET_CPU(env);
884 uint64_t ret = 0;
885
886 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
887 ret |= CPSR_I;
888 }
889 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
890 ret |= CPSR_F;
891 }
892 /* External aborts are not possible in QEMU so A bit is always clear */
893 return ret;
894 }
895
896 static const ARMCPRegInfo v7_cp_reginfo[] = {
897 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
898 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
899 .access = PL1_W, .type = ARM_CP_NOP },
900 /* Performance monitors are implementation defined in v7,
901 * but with an ARM recommended set of registers, which we
902 * follow (although we don't actually implement any counters)
903 *
904 * Performance registers fall into three categories:
905 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
906 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
907 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
908 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
909 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
910 */
911 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
912 .access = PL0_RW, .type = ARM_CP_ALIAS,
913 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
914 .writefn = pmcntenset_write,
915 .accessfn = pmreg_access,
916 .raw_writefn = raw_write },
917 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
918 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
919 .access = PL0_RW, .accessfn = pmreg_access,
920 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
921 .writefn = pmcntenset_write, .raw_writefn = raw_write },
922 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
923 .access = PL0_RW,
924 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
925 .accessfn = pmreg_access,
926 .writefn = pmcntenclr_write,
927 .type = ARM_CP_ALIAS },
928 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
929 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
930 .access = PL0_RW, .accessfn = pmreg_access,
931 .type = ARM_CP_ALIAS,
932 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
933 .writefn = pmcntenclr_write },
934 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
935 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
936 .accessfn = pmreg_access,
937 .writefn = pmovsr_write,
938 .raw_writefn = raw_write },
939 /* Unimplemented so WI. */
940 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
941 .access = PL0_W, .accessfn = pmreg_access, .type = ARM_CP_NOP },
942 /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
943 * We choose to RAZ/WI.
944 */
945 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
946 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
947 .accessfn = pmreg_access },
948 #ifndef CONFIG_USER_ONLY
949 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
950 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
951 .readfn = pmccntr_read, .writefn = pmccntr_write32,
952 .accessfn = pmreg_access },
953 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
954 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
955 .access = PL0_RW, .accessfn = pmreg_access,
956 .type = ARM_CP_IO,
957 .readfn = pmccntr_read, .writefn = pmccntr_write, },
958 #endif
959 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
960 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
961 .writefn = pmccfiltr_write,
962 .access = PL0_RW, .accessfn = pmreg_access,
963 .type = ARM_CP_IO,
964 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
965 .resetvalue = 0, },
966 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
967 .access = PL0_RW,
968 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
969 .accessfn = pmreg_access, .writefn = pmxevtyper_write,
970 .raw_writefn = raw_write },
971 /* Unimplemented, RAZ/WI. */
972 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
973 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
974 .accessfn = pmreg_access },
975 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
976 .access = PL0_R | PL1_RW,
977 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
978 .resetvalue = 0,
979 .writefn = pmuserenr_write, .raw_writefn = raw_write },
980 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
981 .access = PL1_RW,
982 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
983 .resetvalue = 0,
984 .writefn = pmintenset_write, .raw_writefn = raw_write },
985 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
986 .access = PL1_RW, .type = ARM_CP_ALIAS,
987 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
988 .writefn = pmintenclr_write, },
989 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
990 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
991 .access = PL1_RW, .writefn = vbar_write,
992 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
993 offsetof(CPUARMState, cp15.vbar_ns) },
994 .resetvalue = 0 },
995 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
996 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
997 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
998 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
999 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1000 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1001 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1002 offsetof(CPUARMState, cp15.csselr_ns) } },
1003 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1004 * just RAZ for all cores:
1005 */
1006 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1007 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1008 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1009 /* Auxiliary fault status registers: these also are IMPDEF, and we
1010 * choose to RAZ/WI for all cores.
1011 */
1012 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1013 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1014 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1015 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1016 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1017 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1018 /* MAIR can just read-as-written because we don't implement caches
1019 * and so don't need to care about memory attributes.
1020 */
1021 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1022 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1023 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1024 .resetvalue = 0 },
1025 /* For non-long-descriptor page tables these are PRRR and NMRR;
1026 * regardless they still act as reads-as-written for QEMU.
1027 */
1028 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1029 * allows them to assign the correct fieldoffset based on the endianness
1030 * handled in the field definitions.
1031 */
1032 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1033 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1034 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1035 offsetof(CPUARMState, cp15.mair0_ns) },
1036 .resetfn = arm_cp_reset_ignore },
1037 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1038 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1039 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1040 offsetof(CPUARMState, cp15.mair1_ns) },
1041 .resetfn = arm_cp_reset_ignore },
1042 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1043 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1044 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1045 /* 32 bit ITLB invalidates */
1046 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1047 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1048 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1049 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1050 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1051 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1052 /* 32 bit DTLB invalidates */
1053 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1054 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1055 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1056 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1057 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1058 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1059 /* 32 bit TLB invalidates */
1060 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1061 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1062 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1063 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1064 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1065 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1066 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1067 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1068 REGINFO_SENTINEL
1069 };
1070
1071 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1072 /* 32 bit TLB invalidates, Inner Shareable */
1073 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1074 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1075 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1076 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1077 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1078 .type = ARM_CP_NO_RAW, .access = PL1_W,
1079 .writefn = tlbiasid_is_write },
1080 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1081 .type = ARM_CP_NO_RAW, .access = PL1_W,
1082 .writefn = tlbimvaa_is_write },
1083 REGINFO_SENTINEL
1084 };
1085
1086 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1087 uint64_t value)
1088 {
1089 value &= 1;
1090 env->teecr = value;
1091 }
1092
1093 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri)
1094 {
1095 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1096 return CP_ACCESS_TRAP;
1097 }
1098 return CP_ACCESS_OK;
1099 }
1100
1101 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1102 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1103 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1104 .resetvalue = 0,
1105 .writefn = teecr_write },
1106 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1107 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1108 .accessfn = teehbr_access, .resetvalue = 0 },
1109 REGINFO_SENTINEL
1110 };
1111
1112 static const ARMCPRegInfo v6k_cp_reginfo[] = {
1113 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1114 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1115 .access = PL0_RW,
1116 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1117 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1118 .access = PL0_RW,
1119 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1120 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1121 .resetfn = arm_cp_reset_ignore },
1122 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1123 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1124 .access = PL0_R|PL1_W,
1125 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1126 .resetvalue = 0},
1127 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1128 .access = PL0_R|PL1_W,
1129 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1130 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1131 .resetfn = arm_cp_reset_ignore },
1132 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1133 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1134 .access = PL1_RW,
1135 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1136 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1137 .access = PL1_RW,
1138 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1139 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1140 .resetvalue = 0 },
1141 REGINFO_SENTINEL
1142 };
1143
1144 #ifndef CONFIG_USER_ONLY
1145
1146 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri)
1147 {
1148 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
1149 if (arm_current_el(env) == 0 && !extract32(env->cp15.c14_cntkctl, 0, 2)) {
1150 return CP_ACCESS_TRAP;
1151 }
1152 return CP_ACCESS_OK;
1153 }
1154
1155 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx)
1156 {
1157 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1158 if (arm_current_el(env) == 0 &&
1159 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1160 return CP_ACCESS_TRAP;
1161 }
1162 return CP_ACCESS_OK;
1163 }
1164
1165 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx)
1166 {
1167 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1168 * EL0[PV]TEN is zero.
1169 */
1170 if (arm_current_el(env) == 0 &&
1171 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1172 return CP_ACCESS_TRAP;
1173 }
1174 return CP_ACCESS_OK;
1175 }
1176
1177 static CPAccessResult gt_pct_access(CPUARMState *env,
1178 const ARMCPRegInfo *ri)
1179 {
1180 return gt_counter_access(env, GTIMER_PHYS);
1181 }
1182
1183 static CPAccessResult gt_vct_access(CPUARMState *env,
1184 const ARMCPRegInfo *ri)
1185 {
1186 return gt_counter_access(env, GTIMER_VIRT);
1187 }
1188
1189 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri)
1190 {
1191 return gt_timer_access(env, GTIMER_PHYS);
1192 }
1193
1194 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri)
1195 {
1196 return gt_timer_access(env, GTIMER_VIRT);
1197 }
1198
1199 static uint64_t gt_get_countervalue(CPUARMState *env)
1200 {
1201 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1202 }
1203
1204 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1205 {
1206 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1207
1208 if (gt->ctl & 1) {
1209 /* Timer enabled: calculate and set current ISTATUS, irq, and
1210 * reset timer to when ISTATUS next has to change
1211 */
1212 uint64_t count = gt_get_countervalue(&cpu->env);
1213 /* Note that this must be unsigned 64 bit arithmetic: */
1214 int istatus = count >= gt->cval;
1215 uint64_t nexttick;
1216
1217 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1218 qemu_set_irq(cpu->gt_timer_outputs[timeridx],
1219 (istatus && !(gt->ctl & 2)));
1220 if (istatus) {
1221 /* Next transition is when count rolls back over to zero */
1222 nexttick = UINT64_MAX;
1223 } else {
1224 /* Next transition is when we hit cval */
1225 nexttick = gt->cval;
1226 }
1227 /* Note that the desired next expiry time might be beyond the
1228 * signed-64-bit range of a QEMUTimer -- in this case we just
1229 * set the timer for as far in the future as possible. When the
1230 * timer expires we will reset the timer for any remaining period.
1231 */
1232 if (nexttick > INT64_MAX / GTIMER_SCALE) {
1233 nexttick = INT64_MAX / GTIMER_SCALE;
1234 }
1235 timer_mod(cpu->gt_timer[timeridx], nexttick);
1236 } else {
1237 /* Timer disabled: ISTATUS and timer output always clear */
1238 gt->ctl &= ~4;
1239 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1240 timer_del(cpu->gt_timer[timeridx]);
1241 }
1242 }
1243
1244 static void gt_cnt_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1245 {
1246 ARMCPU *cpu = arm_env_get_cpu(env);
1247 int timeridx = ri->opc1 & 1;
1248
1249 timer_del(cpu->gt_timer[timeridx]);
1250 }
1251
1252 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1253 {
1254 return gt_get_countervalue(env);
1255 }
1256
1257 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1258 uint64_t value)
1259 {
1260 int timeridx = ri->opc1 & 1;
1261
1262 env->cp15.c14_timer[timeridx].cval = value;
1263 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1264 }
1265
1266 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1267 {
1268 int timeridx = ri->crm & 1;
1269
1270 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1271 gt_get_countervalue(env));
1272 }
1273
1274 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1275 uint64_t value)
1276 {
1277 int timeridx = ri->crm & 1;
1278
1279 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) +
1280 sextract64(value, 0, 32);
1281 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1282 }
1283
1284 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1285 uint64_t value)
1286 {
1287 ARMCPU *cpu = arm_env_get_cpu(env);
1288 int timeridx = ri->crm & 1;
1289 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1290
1291 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1292 if ((oldval ^ value) & 1) {
1293 /* Enable toggled */
1294 gt_recalc_timer(cpu, timeridx);
1295 } else if ((oldval ^ value) & 2) {
1296 /* IMASK toggled: don't need to recalculate,
1297 * just set the interrupt line based on ISTATUS
1298 */
1299 qemu_set_irq(cpu->gt_timer_outputs[timeridx],
1300 (oldval & 4) && !(value & 2));
1301 }
1302 }
1303
1304 void arm_gt_ptimer_cb(void *opaque)
1305 {
1306 ARMCPU *cpu = opaque;
1307
1308 gt_recalc_timer(cpu, GTIMER_PHYS);
1309 }
1310
1311 void arm_gt_vtimer_cb(void *opaque)
1312 {
1313 ARMCPU *cpu = opaque;
1314
1315 gt_recalc_timer(cpu, GTIMER_VIRT);
1316 }
1317
1318 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1319 /* Note that CNTFRQ is purely reads-as-written for the benefit
1320 * of software; writing it doesn't actually change the timer frequency.
1321 * Our reset value matches the fixed frequency we implement the timer at.
1322 */
1323 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1324 .type = ARM_CP_ALIAS,
1325 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1326 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1327 },
1328 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1329 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1330 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1331 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1332 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
1333 },
1334 /* overall control: mostly access permissions */
1335 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
1336 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
1337 .access = PL1_RW,
1338 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
1339 .resetvalue = 0,
1340 },
1341 /* per-timer control */
1342 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1343 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1344 .accessfn = gt_ptimer_access,
1345 .fieldoffset = offsetoflow32(CPUARMState,
1346 cp15.c14_timer[GTIMER_PHYS].ctl),
1347 .writefn = gt_ctl_write, .raw_writefn = raw_write,
1348 },
1349 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
1350 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
1351 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1352 .accessfn = gt_ptimer_access,
1353 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
1354 .resetvalue = 0,
1355 .writefn = gt_ctl_write, .raw_writefn = raw_write,
1356 },
1357 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
1358 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1359 .accessfn = gt_vtimer_access,
1360 .fieldoffset = offsetoflow32(CPUARMState,
1361 cp15.c14_timer[GTIMER_VIRT].ctl),
1362 .writefn = gt_ctl_write, .raw_writefn = raw_write,
1363 },
1364 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
1365 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
1366 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1367 .accessfn = gt_vtimer_access,
1368 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
1369 .resetvalue = 0,
1370 .writefn = gt_ctl_write, .raw_writefn = raw_write,
1371 },
1372 /* TimerValue views: a 32 bit downcounting view of the underlying state */
1373 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1374 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1375 .accessfn = gt_ptimer_access,
1376 .readfn = gt_tval_read, .writefn = gt_tval_write,
1377 },
1378 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1379 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
1380 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1381 .accessfn = gt_ptimer_access,
1382 .readfn = gt_tval_read, .writefn = gt_tval_write,
1383 },
1384 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
1385 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1386 .accessfn = gt_vtimer_access,
1387 .readfn = gt_tval_read, .writefn = gt_tval_write,
1388 },
1389 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1390 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
1391 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1392 .accessfn = gt_vtimer_access,
1393 .readfn = gt_tval_read, .writefn = gt_tval_write,
1394 },
1395 /* The counter itself */
1396 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
1397 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1398 .accessfn = gt_pct_access,
1399 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
1400 },
1401 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
1402 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
1403 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1404 .accessfn = gt_pct_access,
1405 .readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
1406 },
1407 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
1408 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1409 .accessfn = gt_vct_access,
1410 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
1411 },
1412 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
1413 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
1414 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1415 .accessfn = gt_vct_access,
1416 .readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
1417 },
1418 /* Comparison value, indicating when the timer goes off */
1419 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
1420 .access = PL1_RW | PL0_R,
1421 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1422 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1423 .accessfn = gt_ptimer_access,
1424 .writefn = gt_cval_write, .raw_writefn = raw_write,
1425 },
1426 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1427 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
1428 .access = PL1_RW | PL0_R,
1429 .type = ARM_CP_IO,
1430 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1431 .resetvalue = 0, .accessfn = gt_ptimer_access,
1432 .writefn = gt_cval_write, .raw_writefn = raw_write,
1433 },
1434 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
1435 .access = PL1_RW | PL0_R,
1436 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1437 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1438 .accessfn = gt_vtimer_access,
1439 .writefn = gt_cval_write, .raw_writefn = raw_write,
1440 },
1441 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1442 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
1443 .access = PL1_RW | PL0_R,
1444 .type = ARM_CP_IO,
1445 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1446 .resetvalue = 0, .accessfn = gt_vtimer_access,
1447 .writefn = gt_cval_write, .raw_writefn = raw_write,
1448 },
1449 REGINFO_SENTINEL
1450 };
1451
1452 #else
1453 /* In user-mode none of the generic timer registers are accessible,
1454 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
1455 * so instead just don't register any of them.
1456 */
1457 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1458 REGINFO_SENTINEL
1459 };
1460
1461 #endif
1462
1463 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1464 {
1465 if (arm_feature(env, ARM_FEATURE_LPAE)) {
1466 raw_write(env, ri, value);
1467 } else if (arm_feature(env, ARM_FEATURE_V7)) {
1468 raw_write(env, ri, value & 0xfffff6ff);
1469 } else {
1470 raw_write(env, ri, value & 0xfffff1ff);
1471 }
1472 }
1473
1474 #ifndef CONFIG_USER_ONLY
1475 /* get_phys_addr() isn't present for user-mode-only targets */
1476
1477 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri)
1478 {
1479 if (ri->opc2 & 4) {
1480 /* Other states are only available with TrustZone; in
1481 * a non-TZ implementation these registers don't exist
1482 * at all, which is an Uncategorized trap. This underdecoding
1483 * is safe because the reginfo is NO_RAW.
1484 */
1485 return CP_ACCESS_TRAP_UNCATEGORIZED;
1486 }
1487 return CP_ACCESS_OK;
1488 }
1489
1490 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
1491 int access_type, ARMMMUIdx mmu_idx)
1492 {
1493 hwaddr phys_addr;
1494 target_ulong page_size;
1495 int prot;
1496 uint32_t fsr;
1497 bool ret;
1498 uint64_t par64;
1499 MemTxAttrs attrs = {};
1500
1501 ret = get_phys_addr(env, value, access_type, mmu_idx,
1502 &phys_addr, &attrs, &prot, &page_size, &fsr);
1503 if (extended_addresses_enabled(env)) {
1504 /* fsr is a DFSR/IFSR value for the long descriptor
1505 * translation table format, but with WnR always clear.
1506 * Convert it to a 64-bit PAR.
1507 */
1508 par64 = (1 << 11); /* LPAE bit always set */
1509 if (!ret) {
1510 par64 |= phys_addr & ~0xfffULL;
1511 if (!attrs.secure) {
1512 par64 |= (1 << 9); /* NS */
1513 }
1514 /* We don't set the ATTR or SH fields in the PAR. */
1515 } else {
1516 par64 |= 1; /* F */
1517 par64 |= (fsr & 0x3f) << 1; /* FS */
1518 /* Note that S2WLK and FSTAGE are always zero, because we don't
1519 * implement virtualization and therefore there can't be a stage 2
1520 * fault.
1521 */
1522 }
1523 } else {
1524 /* fsr is a DFSR/IFSR value for the short descriptor
1525 * translation table format (with WnR always clear).
1526 * Convert it to a 32-bit PAR.
1527 */
1528 if (!ret) {
1529 /* We do not set any attribute bits in the PAR */
1530 if (page_size == (1 << 24)
1531 && arm_feature(env, ARM_FEATURE_V7)) {
1532 par64 = (phys_addr & 0xff000000) | (1 << 1);
1533 } else {
1534 par64 = phys_addr & 0xfffff000;
1535 }
1536 if (!attrs.secure) {
1537 par64 |= (1 << 9); /* NS */
1538 }
1539 } else {
1540 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
1541 ((fsr & 0xf) << 1) | 1;
1542 }
1543 }
1544 return par64;
1545 }
1546
1547 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1548 {
1549 int access_type = ri->opc2 & 1;
1550 uint64_t par64;
1551 ARMMMUIdx mmu_idx;
1552 int el = arm_current_el(env);
1553 bool secure = arm_is_secure_below_el3(env);
1554
1555 switch (ri->opc2 & 6) {
1556 case 0:
1557 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
1558 switch (el) {
1559 case 3:
1560 mmu_idx = ARMMMUIdx_S1E3;
1561 break;
1562 case 2:
1563 mmu_idx = ARMMMUIdx_S1NSE1;
1564 break;
1565 case 1:
1566 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
1567 break;
1568 default:
1569 g_assert_not_reached();
1570 }
1571 break;
1572 case 2:
1573 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
1574 switch (el) {
1575 case 3:
1576 mmu_idx = ARMMMUIdx_S1SE0;
1577 break;
1578 case 2:
1579 mmu_idx = ARMMMUIdx_S1NSE0;
1580 break;
1581 case 1:
1582 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
1583 break;
1584 default:
1585 g_assert_not_reached();
1586 }
1587 break;
1588 case 4:
1589 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
1590 mmu_idx = ARMMMUIdx_S12NSE1;
1591 break;
1592 case 6:
1593 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
1594 mmu_idx = ARMMMUIdx_S12NSE0;
1595 break;
1596 default:
1597 g_assert_not_reached();
1598 }
1599
1600 par64 = do_ats_write(env, value, access_type, mmu_idx);
1601
1602 A32_BANKED_CURRENT_REG_SET(env, par, par64);
1603 }
1604
1605 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
1606 uint64_t value)
1607 {
1608 int access_type = ri->opc2 & 1;
1609 ARMMMUIdx mmu_idx;
1610 int secure = arm_is_secure_below_el3(env);
1611
1612 switch (ri->opc2 & 6) {
1613 case 0:
1614 switch (ri->opc1) {
1615 case 0: /* AT S1E1R, AT S1E1W */
1616 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
1617 break;
1618 case 4: /* AT S1E2R, AT S1E2W */
1619 mmu_idx = ARMMMUIdx_S1E2;
1620 break;
1621 case 6: /* AT S1E3R, AT S1E3W */
1622 mmu_idx = ARMMMUIdx_S1E3;
1623 break;
1624 default:
1625 g_assert_not_reached();
1626 }
1627 break;
1628 case 2: /* AT S1E0R, AT S1E0W */
1629 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
1630 break;
1631 case 4: /* AT S12E1R, AT S12E1W */
1632 mmu_idx = ARMMMUIdx_S12NSE1;
1633 break;
1634 case 6: /* AT S12E0R, AT S12E0W */
1635 mmu_idx = ARMMMUIdx_S12NSE0;
1636 break;
1637 default:
1638 g_assert_not_reached();
1639 }
1640
1641 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
1642 }
1643 #endif
1644
1645 static const ARMCPRegInfo vapa_cp_reginfo[] = {
1646 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
1647 .access = PL1_RW, .resetvalue = 0,
1648 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
1649 offsetoflow32(CPUARMState, cp15.par_ns) },
1650 .writefn = par_write },
1651 #ifndef CONFIG_USER_ONLY
1652 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
1653 .access = PL1_W, .accessfn = ats_access,
1654 .writefn = ats_write, .type = ARM_CP_NO_RAW },
1655 #endif
1656 REGINFO_SENTINEL
1657 };
1658
1659 /* Return basic MPU access permission bits. */
1660 static uint32_t simple_mpu_ap_bits(uint32_t val)
1661 {
1662 uint32_t ret;
1663 uint32_t mask;
1664 int i;
1665 ret = 0;
1666 mask = 3;
1667 for (i = 0; i < 16; i += 2) {
1668 ret |= (val >> i) & mask;
1669 mask <<= 2;
1670 }
1671 return ret;
1672 }
1673
1674 /* Pad basic MPU access permission bits to extended format. */
1675 static uint32_t extended_mpu_ap_bits(uint32_t val)
1676 {
1677 uint32_t ret;
1678 uint32_t mask;
1679 int i;
1680 ret = 0;
1681 mask = 3;
1682 for (i = 0; i < 16; i += 2) {
1683 ret |= (val & mask) << i;
1684 mask <<= 2;
1685 }
1686 return ret;
1687 }
1688
1689 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1690 uint64_t value)
1691 {
1692 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
1693 }
1694
1695 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1696 {
1697 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
1698 }
1699
1700 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1701 uint64_t value)
1702 {
1703 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
1704 }
1705
1706 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1707 {
1708 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
1709 }
1710
1711 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
1712 {
1713 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
1714
1715 if (!u32p) {
1716 return 0;
1717 }
1718
1719 u32p += env->cp15.c6_rgnr;
1720 return *u32p;
1721 }
1722
1723 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
1724 uint64_t value)
1725 {
1726 ARMCPU *cpu = arm_env_get_cpu(env);
1727 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
1728
1729 if (!u32p) {
1730 return;
1731 }
1732
1733 u32p += env->cp15.c6_rgnr;
1734 tlb_flush(CPU(cpu), 1); /* Mappings may have changed - purge! */
1735 *u32p = value;
1736 }
1737
1738 static void pmsav7_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1739 {
1740 ARMCPU *cpu = arm_env_get_cpu(env);
1741 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
1742
1743 if (!u32p) {
1744 return;
1745 }
1746
1747 memset(u32p, 0, sizeof(*u32p) * cpu->pmsav7_dregion);
1748 }
1749
1750 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1751 uint64_t value)
1752 {
1753 ARMCPU *cpu = arm_env_get_cpu(env);
1754 uint32_t nrgs = cpu->pmsav7_dregion;
1755
1756 if (value >= nrgs) {
1757 qemu_log_mask(LOG_GUEST_ERROR,
1758 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
1759 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
1760 return;
1761 }
1762
1763 raw_write(env, ri, value);
1764 }
1765
1766 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
1767 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
1768 .access = PL1_RW, .type = ARM_CP_NO_RAW,
1769 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
1770 .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
1771 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
1772 .access = PL1_RW, .type = ARM_CP_NO_RAW,
1773 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
1774 .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
1775 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
1776 .access = PL1_RW, .type = ARM_CP_NO_RAW,
1777 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
1778 .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
1779 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
1780 .access = PL1_RW,
1781 .fieldoffset = offsetof(CPUARMState, cp15.c6_rgnr),
1782 .writefn = pmsav7_rgnr_write },
1783 REGINFO_SENTINEL
1784 };
1785
1786 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
1787 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
1788 .access = PL1_RW, .type = ARM_CP_ALIAS,
1789 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
1790 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
1791 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
1792 .access = PL1_RW, .type = ARM_CP_ALIAS,
1793 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
1794 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
1795 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
1796 .access = PL1_RW,
1797 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
1798 .resetvalue = 0, },
1799 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
1800 .access = PL1_RW,
1801 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
1802 .resetvalue = 0, },
1803 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
1804 .access = PL1_RW,
1805 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
1806 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
1807 .access = PL1_RW,
1808 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
1809 /* Protection region base and size registers */
1810 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
1811 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1812 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
1813 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
1814 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1815 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
1816 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
1817 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1818 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
1819 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
1820 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1821 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
1822 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
1823 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1824 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
1825 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
1826 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1827 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
1828 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
1829 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1830 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
1831 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
1832 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1833 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
1834 REGINFO_SENTINEL
1835 };
1836
1837 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
1838 uint64_t value)
1839 {
1840 TCR *tcr = raw_ptr(env, ri);
1841 int maskshift = extract32(value, 0, 3);
1842
1843 if (!arm_feature(env, ARM_FEATURE_V8)) {
1844 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
1845 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
1846 * using Long-desciptor translation table format */
1847 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
1848 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
1849 /* In an implementation that includes the Security Extensions
1850 * TTBCR has additional fields PD0 [4] and PD1 [5] for
1851 * Short-descriptor translation table format.
1852 */
1853 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
1854 } else {
1855 value &= TTBCR_N;
1856 }
1857 }
1858
1859 /* Update the masks corresponding to the the TCR bank being written
1860 * Note that we always calculate mask and base_mask, but
1861 * they are only used for short-descriptor tables (ie if EAE is 0);
1862 * for long-descriptor tables the TCR fields are used differently
1863 * and the mask and base_mask values are meaningless.
1864 */
1865 tcr->raw_tcr = value;
1866 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
1867 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
1868 }
1869
1870 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1871 uint64_t value)
1872 {
1873 ARMCPU *cpu = arm_env_get_cpu(env);
1874
1875 if (arm_feature(env, ARM_FEATURE_LPAE)) {
1876 /* With LPAE the TTBCR could result in a change of ASID
1877 * via the TTBCR.A1 bit, so do a TLB flush.
1878 */
1879 tlb_flush(CPU(cpu), 1);
1880 }
1881 vmsa_ttbcr_raw_write(env, ri, value);
1882 }
1883
1884 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1885 {
1886 TCR *tcr = raw_ptr(env, ri);
1887
1888 /* Reset both the TCR as well as the masks corresponding to the bank of
1889 * the TCR being reset.
1890 */
1891 tcr->raw_tcr = 0;
1892 tcr->mask = 0;
1893 tcr->base_mask = 0xffffc000u;
1894 }
1895
1896 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
1897 uint64_t value)
1898 {
1899 ARMCPU *cpu = arm_env_get_cpu(env);
1900 TCR *tcr = raw_ptr(env, ri);
1901
1902 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
1903 tlb_flush(CPU(cpu), 1);
1904 tcr->raw_tcr = value;
1905 }
1906
1907 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1908 uint64_t value)
1909 {
1910 /* 64 bit accesses to the TTBRs can change the ASID and so we
1911 * must flush the TLB.
1912 */
1913 if (cpreg_field_is_64bit(ri)) {
1914 ARMCPU *cpu = arm_env_get_cpu(env);
1915
1916 tlb_flush(CPU(cpu), 1);
1917 }
1918 raw_write(env, ri, value);
1919 }
1920
1921 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
1922 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
1923 .access = PL1_RW, .type = ARM_CP_ALIAS,
1924 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
1925 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
1926 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
1927 .access = PL1_RW, .resetvalue = 0,
1928 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
1929 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
1930 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
1931 .access = PL1_RW, .resetvalue = 0,
1932 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
1933 offsetof(CPUARMState, cp15.dfar_ns) } },
1934 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
1935 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
1936 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
1937 .resetvalue = 0, },
1938 REGINFO_SENTINEL
1939 };
1940
1941 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
1942 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
1943 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
1944 .access = PL1_RW,
1945 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
1946 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
1947 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
1948 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
1949 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
1950 offsetof(CPUARMState, cp15.ttbr0_ns) } },
1951 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
1952 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
1953 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
1954 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
1955 offsetof(CPUARMState, cp15.ttbr1_ns) } },
1956 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
1957 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
1958 .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
1959 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
1960 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
1961 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
1962 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
1963 .raw_writefn = vmsa_ttbcr_raw_write,
1964 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
1965 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
1966 REGINFO_SENTINEL
1967 };
1968
1969 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
1970 uint64_t value)
1971 {
1972 env->cp15.c15_ticonfig = value & 0xe7;
1973 /* The OS_TYPE bit in this register changes the reported CPUID! */
1974 env->cp15.c0_cpuid = (value & (1 << 5)) ?
1975 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1976 }
1977
1978 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
1979 uint64_t value)
1980 {
1981 env->cp15.c15_threadid = value & 0xffff;
1982 }
1983
1984 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
1985 uint64_t value)
1986 {
1987 /* Wait-for-interrupt (deprecated) */
1988 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
1989 }
1990
1991 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
1992 uint64_t value)
1993 {
1994 /* On OMAP there are registers indicating the max/min index of dcache lines
1995 * containing a dirty line; cache flush operations have to reset these.
1996 */
1997 env->cp15.c15_i_max = 0x000;
1998 env->cp15.c15_i_min = 0xff0;
1999 }
2000
2001 static const ARMCPRegInfo omap_cp_reginfo[] = {
2002 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2003 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2004 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2005 .resetvalue = 0, },
2006 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2007 .access = PL1_RW, .type = ARM_CP_NOP },
2008 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2009 .access = PL1_RW,
2010 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2011 .writefn = omap_ticonfig_write },
2012 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2013 .access = PL1_RW,
2014 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2015 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2016 .access = PL1_RW, .resetvalue = 0xff0,
2017 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2018 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2019 .access = PL1_RW,
2020 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2021 .writefn = omap_threadid_write },
2022 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2023 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2024 .type = ARM_CP_NO_RAW,
2025 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2026 /* TODO: Peripheral port remap register:
2027 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2028 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2029 * when MMU is off.
2030 */
2031 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2032 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2033 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2034 .writefn = omap_cachemaint_write },
2035 { .name = "C9", .cp = 15, .crn = 9,
2036 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2037 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2038 REGINFO_SENTINEL
2039 };
2040
2041 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2042 uint64_t value)
2043 {
2044 env->cp15.c15_cpar = value & 0x3fff;
2045 }
2046
2047 static const ARMCPRegInfo xscale_cp_reginfo[] = {
2048 { .name = "XSCALE_CPAR",
2049 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2050 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2051 .writefn = xscale_cpar_write, },
2052 { .name = "XSCALE_AUXCR",
2053 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2054 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2055 .resetvalue = 0, },
2056 /* XScale specific cache-lockdown: since we have no cache we NOP these
2057 * and hope the guest does not really rely on cache behaviour.
2058 */
2059 { .name = "XSCALE_LOCK_ICACHE_LINE",
2060 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2061 .access = PL1_W, .type = ARM_CP_NOP },
2062 { .name = "XSCALE_UNLOCK_ICACHE",
2063 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2064 .access = PL1_W, .type = ARM_CP_NOP },
2065 { .name = "XSCALE_DCACHE_LOCK",
2066 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2067 .access = PL1_RW, .type = ARM_CP_NOP },
2068 { .name = "XSCALE_UNLOCK_DCACHE",
2069 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2070 .access = PL1_W, .type = ARM_CP_NOP },
2071 REGINFO_SENTINEL
2072 };
2073
2074 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2075 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2076 * implementation of this implementation-defined space.
2077 * Ideally this should eventually disappear in favour of actually
2078 * implementing the correct behaviour for all cores.
2079 */
2080 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2081 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2082 .access = PL1_RW,
2083 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2084 .resetvalue = 0 },
2085 REGINFO_SENTINEL
2086 };
2087
2088 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2089 /* Cache status: RAZ because we have no cache so it's always clean */
2090 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2091 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2092 .resetvalue = 0 },
2093 REGINFO_SENTINEL
2094 };
2095
2096 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2097 /* We never have a a block transfer operation in progress */
2098 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2099 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2100 .resetvalue = 0 },
2101 /* The cache ops themselves: these all NOP for QEMU */
2102 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2103 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2104 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2105 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2106 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2107 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2108 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2109 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2110 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2111 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2112 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2113 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2114 REGINFO_SENTINEL
2115 };
2116
2117 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2118 /* The cache test-and-clean instructions always return (1 << 30)
2119 * to indicate that there are no dirty cache lines.
2120 */
2121 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2122 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2123 .resetvalue = (1 << 30) },
2124 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2125 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2126 .resetvalue = (1 << 30) },
2127 REGINFO_SENTINEL
2128 };
2129
2130 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2131 /* Ignore ReadBuffer accesses */
2132 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
2133 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2134 .access = PL1_RW, .resetvalue = 0,
2135 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
2136 REGINFO_SENTINEL
2137 };
2138
2139 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2140 {
2141 ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
2142 uint64_t mpidr = cpu->mp_affinity;
2143
2144 if (arm_feature(env, ARM_FEATURE_V7MP)) {
2145 mpidr |= (1U << 31);
2146 /* Cores which are uniprocessor (non-coherent)
2147 * but still implement the MP extensions set
2148 * bit 30. (For instance, Cortex-R5).
2149 */
2150 if (cpu->mp_is_up) {
2151 mpidr |= (1u << 30);
2152 }
2153 }
2154 return mpidr;
2155 }
2156
2157 static const ARMCPRegInfo mpidr_cp_reginfo[] = {
2158 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
2159 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
2160 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
2161 REGINFO_SENTINEL
2162 };
2163
2164 static const ARMCPRegInfo lpae_cp_reginfo[] = {
2165 /* NOP AMAIR0/1 */
2166 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
2167 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
2168 .access = PL1_RW, .type = ARM_CP_CONST,
2169 .resetvalue = 0 },
2170 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2171 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
2172 .access = PL1_RW, .type = ARM_CP_CONST,
2173 .resetvalue = 0 },
2174 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
2175 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
2176 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
2177 offsetof(CPUARMState, cp15.par_ns)} },
2178 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
2179 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2180 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2181 offsetof(CPUARMState, cp15.ttbr0_ns) },
2182 .writefn = vmsa_ttbr_write, },
2183 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
2184 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2185 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2186 offsetof(CPUARMState, cp15.ttbr1_ns) },
2187 .writefn = vmsa_ttbr_write, },
2188 REGINFO_SENTINEL
2189 };
2190
2191 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2192 {
2193 return vfp_get_fpcr(env);
2194 }
2195
2196 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2197 uint64_t value)
2198 {
2199 vfp_set_fpcr(env, value);
2200 }
2201
2202 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2203 {
2204 return vfp_get_fpsr(env);
2205 }
2206
2207 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2208 uint64_t value)
2209 {
2210 vfp_set_fpsr(env, value);
2211 }
2212
2213 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri)
2214 {
2215 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
2216 return CP_ACCESS_TRAP;
2217 }
2218 return CP_ACCESS_OK;
2219 }
2220
2221 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
2222 uint64_t value)
2223 {
2224 env->daif = value & PSTATE_DAIF;
2225 }
2226
2227 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
2228 const ARMCPRegInfo *ri)
2229 {
2230 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
2231 * SCTLR_EL1.UCI is set.
2232 */
2233 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
2234 return CP_ACCESS_TRAP;
2235 }
2236 return CP_ACCESS_OK;
2237 }
2238
2239 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
2240 * Page D4-1736 (DDI0487A.b)
2241 */
2242
2243 static void tlbi_aa64_va_write(CPUARMState *env, const ARMCPRegInfo *ri,
2244 uint64_t value)
2245 {
2246 /* Invalidate by VA (AArch64 version) */
2247 ARMCPU *cpu = arm_env_get_cpu(env);
2248 uint64_t pageaddr = sextract64(value << 12, 0, 56);
2249
2250 tlb_flush_page(CPU(cpu), pageaddr);
2251 }
2252
2253 static void tlbi_aa64_vaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
2254 uint64_t value)
2255 {
2256 /* Invalidate by VA, all ASIDs (AArch64 version) */
2257 ARMCPU *cpu = arm_env_get_cpu(env);
2258 uint64_t pageaddr = sextract64(value << 12, 0, 56);
2259
2260 tlb_flush_page(CPU(cpu), pageaddr);
2261 }
2262
2263 static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2264 uint64_t value)
2265 {
2266 /* Invalidate by ASID (AArch64 version) */
2267 ARMCPU *cpu = arm_env_get_cpu(env);
2268 int asid = extract64(value, 48, 16);
2269 tlb_flush(CPU(cpu), asid == 0);
2270 }
2271
2272 static void tlbi_aa64_va_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2273 uint64_t value)
2274 {
2275 CPUState *other_cs;
2276 uint64_t pageaddr = sextract64(value << 12, 0, 56);
2277
2278 CPU_FOREACH(other_cs) {
2279 tlb_flush_page(other_cs, pageaddr);
2280 }
2281 }
2282
2283 static void tlbi_aa64_vaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2284 uint64_t value)
2285 {
2286 CPUState *other_cs;
2287 uint64_t pageaddr = sextract64(value << 12, 0, 56);
2288
2289 CPU_FOREACH(other_cs) {
2290 tlb_flush_page(other_cs, pageaddr);
2291 }
2292 }
2293
2294 static void tlbi_aa64_asid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2295 uint64_t value)
2296 {
2297 CPUState *other_cs;
2298 int asid = extract64(value, 48, 16);
2299
2300 CPU_FOREACH(other_cs) {
2301 tlb_flush(other_cs, asid == 0);
2302 }
2303 }
2304
2305 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri)
2306 {
2307 /* We don't implement EL2, so the only control on DC ZVA is the
2308 * bit in the SCTLR which can prohibit access for EL0.
2309 */
2310 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
2311 return CP_ACCESS_TRAP;
2312 }
2313 return CP_ACCESS_OK;
2314 }
2315
2316 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
2317 {
2318 ARMCPU *cpu = arm_env_get_cpu(env);
2319 int dzp_bit = 1 << 4;
2320
2321 /* DZP indicates whether DC ZVA access is allowed */
2322 if (aa64_zva_access(env, NULL) == CP_ACCESS_OK) {
2323 dzp_bit = 0;
2324 }
2325 return cpu->dcz_blocksize | dzp_bit;
2326 }
2327
2328 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
2329 {
2330 if (!(env->pstate & PSTATE_SP)) {
2331 /* Access to SP_EL0 is undefined if it's being used as
2332 * the stack pointer.
2333 */
2334 return CP_ACCESS_TRAP_UNCATEGORIZED;
2335 }
2336 return CP_ACCESS_OK;
2337 }
2338
2339 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
2340 {
2341 return env->pstate & PSTATE_SP;
2342 }
2343
2344 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
2345 {
2346 update_spsel(env, val);
2347 }
2348
2349 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2350 uint64_t value)
2351 {
2352 ARMCPU *cpu = arm_env_get_cpu(env);
2353
2354 if (raw_read(env, ri) == value) {
2355 /* Skip the TLB flush if nothing actually changed; Linux likes
2356 * to do a lot of pointless SCTLR writes.
2357 */
2358 return;
2359 }
2360
2361 raw_write(env, ri, value);
2362 /* ??? Lots of these bits are not implemented. */
2363 /* This may enable/disable the MMU, so do a TLB flush. */
2364 tlb_flush(CPU(cpu), 1);
2365 }
2366
2367 static const ARMCPRegInfo v8_cp_reginfo[] = {
2368 /* Minimal set of EL0-visible registers. This will need to be expanded
2369 * significantly for system emulation of AArch64 CPUs.
2370 */
2371 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
2372 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
2373 .access = PL0_RW, .type = ARM_CP_NZCV },
2374 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
2375 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
2376 .type = ARM_CP_NO_RAW,
2377 .access = PL0_RW, .accessfn = aa64_daif_access,
2378 .fieldoffset = offsetof(CPUARMState, daif),
2379 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
2380 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
2381 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
2382 .access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
2383 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
2384 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
2385 .access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
2386 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
2387 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
2388 .access = PL0_R, .type = ARM_CP_NO_RAW,
2389 .readfn = aa64_dczid_read },
2390 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
2391 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
2392 .access = PL0_W, .type = ARM_CP_DC_ZVA,
2393 #ifndef CONFIG_USER_ONLY
2394 /* Avoid overhead of an access check that always passes in user-mode */
2395 .accessfn = aa64_zva_access,
2396 #endif
2397 },
2398 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
2399 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
2400 .access = PL1_R, .type = ARM_CP_CURRENTEL },
2401 /* Cache ops: all NOPs since we don't emulate caches */
2402 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
2403 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
2404 .access = PL1_W, .type = ARM_CP_NOP },
2405 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
2406 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
2407 .access = PL1_W, .type = ARM_CP_NOP },
2408 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
2409 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
2410 .access = PL0_W, .type = ARM_CP_NOP,
2411 .accessfn = aa64_cacheop_access },
2412 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
2413 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
2414 .access = PL1_W, .type = ARM_CP_NOP },
2415 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
2416 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
2417 .access = PL1_W, .type = ARM_CP_NOP },
2418 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
2419 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
2420 .access = PL0_W, .type = ARM_CP_NOP,
2421 .accessfn = aa64_cacheop_access },
2422 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
2423 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
2424 .access = PL1_W, .type = ARM_CP_NOP },
2425 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
2426 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
2427 .access = PL0_W, .type = ARM_CP_NOP,
2428 .accessfn = aa64_cacheop_access },
2429 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
2430 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
2431 .access = PL0_W, .type = ARM_CP_NOP,
2432 .accessfn = aa64_cacheop_access },
2433 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
2434 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
2435 .access = PL1_W, .type = ARM_CP_NOP },
2436 /* TLBI operations */
2437 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
2438 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
2439 .access = PL2_W, .type = ARM_CP_NO_RAW,
2440 .writefn = tlbiall_write },
2441 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
2442 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
2443 .access = PL2_W, .type = ARM_CP_NO_RAW,
2444 .writefn = tlbiall_is_write },
2445 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
2446 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2447 .access = PL1_W, .type = ARM_CP_NO_RAW,
2448 .writefn = tlbiall_is_write },
2449 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
2450 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2451 .access = PL1_W, .type = ARM_CP_NO_RAW,
2452 .writefn = tlbi_aa64_va_is_write },
2453 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
2454 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2455 .access = PL1_W, .type = ARM_CP_NO_RAW,
2456 .writefn = tlbi_aa64_asid_is_write },
2457 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
2458 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2459 .access = PL1_W, .type = ARM_CP_NO_RAW,
2460 .writefn = tlbi_aa64_vaa_is_write },
2461 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
2462 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
2463 .access = PL1_W, .type = ARM_CP_NO_RAW,
2464 .writefn = tlbi_aa64_va_is_write },
2465 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
2466 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
2467 .access = PL1_W, .type = ARM_CP_NO_RAW,
2468 .writefn = tlbi_aa64_vaa_is_write },
2469 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
2470 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2471 .access = PL1_W, .type = ARM_CP_NO_RAW,
2472 .writefn = tlbiall_write },
2473 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
2474 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2475 .access = PL1_W, .type = ARM_CP_NO_RAW,
2476 .writefn = tlbi_aa64_va_write },
2477 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
2478 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2479 .access = PL1_W, .type = ARM_CP_NO_RAW,
2480 .writefn = tlbi_aa64_asid_write },
2481 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
2482 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2483 .access = PL1_W, .type = ARM_CP_NO_RAW,
2484 .writefn = tlbi_aa64_vaa_write },
2485 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
2486 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
2487 .access = PL1_W, .type = ARM_CP_NO_RAW,
2488 .writefn = tlbi_aa64_va_write },
2489 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
2490 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
2491 .access = PL1_W, .type = ARM_CP_NO_RAW,
2492 .writefn = tlbi_aa64_vaa_write },
2493 #ifndef CONFIG_USER_ONLY
2494 /* 64 bit address translation operations */
2495 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
2496 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
2497 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
2498 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
2499 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
2500 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
2501 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
2502 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
2503 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
2504 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
2505 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
2506 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
2507 #endif
2508 /* TLB invalidate last level of translation table walk */
2509 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
2510 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
2511 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
2512 .type = ARM_CP_NO_RAW, .access = PL1_W,
2513 .writefn = tlbimvaa_is_write },
2514 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
2515 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2516 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
2517 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
2518 /* 32 bit cache operations */
2519 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
2520 .type = ARM_CP_NOP, .access = PL1_W },
2521 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
2522 .type = ARM_CP_NOP, .access = PL1_W },
2523 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
2524 .type = ARM_CP_NOP, .access = PL1_W },
2525 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
2526 .type = ARM_CP_NOP, .access = PL1_W },
2527 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
2528 .type = ARM_CP_NOP, .access = PL1_W },
2529 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
2530 .type = ARM_CP_NOP, .access = PL1_W },
2531 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
2532 .type = ARM_CP_NOP, .access = PL1_W },
2533 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
2534 .type = ARM_CP_NOP, .access = PL1_W },
2535 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
2536 .type = ARM_CP_NOP, .access = PL1_W },
2537 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
2538 .type = ARM_CP_NOP, .access = PL1_W },
2539 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
2540 .type = ARM_CP_NOP, .access = PL1_W },
2541 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
2542 .type = ARM_CP_NOP, .access = PL1_W },
2543 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
2544 .type = ARM_CP_NOP, .access = PL1_W },
2545 /* MMU Domain access control / MPU write buffer control */
2546 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
2547 .access = PL1_RW, .resetvalue = 0,
2548 .writefn = dacr_write, .raw_writefn = raw_write,
2549 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
2550 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
2551 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
2552 .type = ARM_CP_ALIAS,
2553 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
2554 .access = PL1_RW,
2555 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
2556 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
2557 .type = ARM_CP_ALIAS,
2558 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
2559 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[1]) },
2560 /* We rely on the access checks not allowing the guest to write to the
2561 * state field when SPSel indicates that it's being used as the stack
2562 * pointer.
2563 */
2564 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
2565 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
2566 .access = PL1_RW, .accessfn = sp_el0_access,
2567 .type = ARM_CP_ALIAS,
2568 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
2569 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
2570 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
2571 .access = PL2_RW, .type = ARM_CP_ALIAS,
2572 .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
2573 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
2574 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
2575 .type = ARM_CP_NO_RAW,
2576 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
2577 REGINFO_SENTINEL
2578 };
2579
2580 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
2581 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
2582 { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
2583 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
2584 .access = PL2_RW,
2585 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
2586 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
2587 .type = ARM_CP_NO_RAW,
2588 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
2589 .access = PL2_RW,
2590 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
2591 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
2592 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
2593 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2594 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
2595 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
2596 .access = PL2_RW, .type = ARM_CP_CONST,
2597 .resetvalue = 0 },
2598 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
2599 .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
2600 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2601 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
2602 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
2603 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2604 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
2605 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
2606 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2607 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
2608 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
2609 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2610 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
2611 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
2612 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2613 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
2614 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
2615 .resetvalue = 0 },
2616 REGINFO_SENTINEL
2617 };
2618
2619 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2620 {
2621 ARMCPU *cpu = arm_env_get_cpu(env);
2622 uint64_t valid_mask = HCR_MASK;
2623
2624 if (arm_feature(env, ARM_FEATURE_EL3)) {
2625 valid_mask &= ~HCR_HCD;
2626 } else {
2627 valid_mask &= ~HCR_TSC;
2628 }
2629
2630 /* Clear RES0 bits. */
2631 value &= valid_mask;
2632
2633 /* These bits change the MMU setup:
2634 * HCR_VM enables stage 2 translation
2635 * HCR_PTW forbids certain page-table setups
2636 * HCR_DC Disables stage1 and enables stage2 translation
2637 */
2638 if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
2639 tlb_flush(CPU(cpu), 1);
2640 }
2641 raw_write(env, ri, value);
2642 }
2643
2644 static const ARMCPRegInfo el2_cp_reginfo[] = {
2645 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
2646 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
2647 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
2648 .writefn = hcr_write },
2649 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
2650 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
2651 .access = PL2_RW, .resetvalue = 0,
2652 .writefn = dacr_write, .raw_writefn = raw_write,
2653 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
2654 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
2655 .type = ARM_CP_ALIAS,
2656 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
2657 .access = PL2_RW,
2658 .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
2659 { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64,
2660 .type = ARM_CP_ALIAS,
2661 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
2662 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
2663 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
2664 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
2665 .access = PL2_RW, .resetvalue = 0,
2666 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
2667 { .name = "FAR_EL2", .state = ARM_CP_STATE_AA64,
2668 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
2669 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
2670 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
2671 .type = ARM_CP_ALIAS,
2672 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
2673 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[6]) },
2674 { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
2675 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
2676 .access = PL2_RW, .writefn = vbar_write,
2677 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
2678 .resetvalue = 0 },
2679 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
2680 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
2681 .access = PL3_RW, .type = ARM_CP_ALIAS,
2682 .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
2683 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
2684 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
2685 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
2686 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
2687 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
2688 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
2689 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
2690 .resetvalue = 0 },
2691 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
2692 .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
2693 .access = PL2_RW, .type = ARM_CP_ALIAS,
2694 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
2695 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
2696 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
2697 .access = PL2_RW, .writefn = vmsa_tcr_el1_write,
2698 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2699 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
2700 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
2701 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
2702 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
2703 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
2704 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
2705 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
2706 .access = PL2_RW, .resetvalue = 0,
2707 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
2708 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
2709 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
2710 .access = PL2_RW, .resetvalue = 0,
2711 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
2712 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
2713 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2714 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
2715 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
2716 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
2717 .type = ARM_CP_NO_RAW, .access = PL2_W,
2718 .writefn = tlbiall_write },
2719 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
2720 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
2721 .type = ARM_CP_NO_RAW, .access = PL2_W,
2722 .writefn = tlbi_aa64_vaa_write },
2723 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
2724 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
2725 .type = ARM_CP_NO_RAW, .access = PL2_W,
2726 .writefn = tlbi_aa64_vaa_write },
2727 REGINFO_SENTINEL
2728 };
2729
2730 static const ARMCPRegInfo el3_cp_reginfo[] = {
2731 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
2732 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
2733 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
2734 .resetvalue = 0, .writefn = scr_write },
2735 { .name = "SCR", .type = ARM_CP_ALIAS,
2736 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
2737 .access = PL3_RW, .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
2738 .writefn = scr_write },
2739 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
2740 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
2741 .access = PL3_RW, .resetvalue = 0,
2742 .fieldoffset = offsetof(CPUARMState, cp15.sder) },
2743 { .name = "SDER",
2744 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
2745 .access = PL3_RW, .resetvalue = 0,
2746 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
2747 /* TODO: Implement NSACR trapping of secure EL1 accesses to EL3 */
2748 { .name = "NSACR", .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
2749 .access = PL3_W | PL1_R, .resetvalue = 0,
2750 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) },
2751 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
2752 .access = PL3_RW, .writefn = vbar_write, .resetvalue = 0,
2753 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
2754 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
2755 .type = ARM_CP_ALIAS, /* reset handled by AArch32 view */
2756 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
2757 .access = PL3_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
2758 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]) },
2759 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
2760 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
2761 .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2762 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
2763 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
2764 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
2765 .access = PL3_RW, .writefn = vmsa_tcr_el1_write,
2766 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2767 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
2768 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
2769 .type = ARM_CP_ALIAS,
2770 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
2771 .access = PL3_RW,
2772 .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
2773 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
2774 .type = ARM_CP_ALIAS,
2775 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
2776 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
2777 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
2778 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
2779 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
2780 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
2781 .type = ARM_CP_ALIAS,
2782 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
2783 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[7]) },
2784 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
2785 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
2786 .access = PL3_RW, .writefn = vbar_write,
2787 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
2788 .resetvalue = 0 },
2789 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
2790 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
2791 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
2792 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
2793 REGINFO_SENTINEL
2794 };
2795
2796 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo