target-arm: Break down TLB_LOCKDOWN
[qemu.git] / target-arm / helper.c
1 #include "cpu.h"
2 #include "internals.h"
3 #include "exec/gdbstub.h"
4 #include "exec/helper-proto.h"
5 #include "qemu/host-utils.h"
6 #include "sysemu/arch_init.h"
7 #include "sysemu/sysemu.h"
8 #include "qemu/bitops.h"
9 #include "qemu/crc32c.h"
10 #include "exec/cpu_ldst.h"
11 #include "arm_ldst.h"
12 #include <zlib.h> /* For crc32 */
13
14 #ifndef CONFIG_USER_ONLY
15 static inline int get_phys_addr(CPUARMState *env, target_ulong address,
16 int access_type, ARMMMUIdx mmu_idx,
17 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
18 target_ulong *page_size);
19
20 /* Definitions for the PMCCNTR and PMCR registers */
21 #define PMCRD 0x8
22 #define PMCRC 0x4
23 #define PMCRE 0x1
24 #endif
25
26 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
27 {
28 int nregs;
29
30 /* VFP data registers are always little-endian. */
31 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
32 if (reg < nregs) {
33 stfq_le_p(buf, env->vfp.regs[reg]);
34 return 8;
35 }
36 if (arm_feature(env, ARM_FEATURE_NEON)) {
37 /* Aliases for Q regs. */
38 nregs += 16;
39 if (reg < nregs) {
40 stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
41 stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
42 return 16;
43 }
44 }
45 switch (reg - nregs) {
46 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
47 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
48 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
49 }
50 return 0;
51 }
52
53 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
54 {
55 int nregs;
56
57 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
58 if (reg < nregs) {
59 env->vfp.regs[reg] = ldfq_le_p(buf);
60 return 8;
61 }
62 if (arm_feature(env, ARM_FEATURE_NEON)) {
63 nregs += 16;
64 if (reg < nregs) {
65 env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
66 env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
67 return 16;
68 }
69 }
70 switch (reg - nregs) {
71 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
72 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
73 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
74 }
75 return 0;
76 }
77
78 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
79 {
80 switch (reg) {
81 case 0 ... 31:
82 /* 128 bit FP register */
83 stfq_le_p(buf, env->vfp.regs[reg * 2]);
84 stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
85 return 16;
86 case 32:
87 /* FPSR */
88 stl_p(buf, vfp_get_fpsr(env));
89 return 4;
90 case 33:
91 /* FPCR */
92 stl_p(buf, vfp_get_fpcr(env));
93 return 4;
94 default:
95 return 0;
96 }
97 }
98
99 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
100 {
101 switch (reg) {
102 case 0 ... 31:
103 /* 128 bit FP register */
104 env->vfp.regs[reg * 2] = ldfq_le_p(buf);
105 env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
106 return 16;
107 case 32:
108 /* FPSR */
109 vfp_set_fpsr(env, ldl_p(buf));
110 return 4;
111 case 33:
112 /* FPCR */
113 vfp_set_fpcr(env, ldl_p(buf));
114 return 4;
115 default:
116 return 0;
117 }
118 }
119
120 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
121 {
122 assert(ri->fieldoffset);
123 if (cpreg_field_is_64bit(ri)) {
124 return CPREG_FIELD64(env, ri);
125 } else {
126 return CPREG_FIELD32(env, ri);
127 }
128 }
129
130 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
131 uint64_t value)
132 {
133 assert(ri->fieldoffset);
134 if (cpreg_field_is_64bit(ri)) {
135 CPREG_FIELD64(env, ri) = value;
136 } else {
137 CPREG_FIELD32(env, ri) = value;
138 }
139 }
140
141 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
142 {
143 return (char *)env + ri->fieldoffset;
144 }
145
146 static uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
147 {
148 /* Raw read of a coprocessor register (as needed for migration, etc). */
149 if (ri->type & ARM_CP_CONST) {
150 return ri->resetvalue;
151 } else if (ri->raw_readfn) {
152 return ri->raw_readfn(env, ri);
153 } else if (ri->readfn) {
154 return ri->readfn(env, ri);
155 } else {
156 return raw_read(env, ri);
157 }
158 }
159
160 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
161 uint64_t v)
162 {
163 /* Raw write of a coprocessor register (as needed for migration, etc).
164 * Note that constant registers are treated as write-ignored; the
165 * caller should check for success by whether a readback gives the
166 * value written.
167 */
168 if (ri->type & ARM_CP_CONST) {
169 return;
170 } else if (ri->raw_writefn) {
171 ri->raw_writefn(env, ri, v);
172 } else if (ri->writefn) {
173 ri->writefn(env, ri, v);
174 } else {
175 raw_write(env, ri, v);
176 }
177 }
178
179 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
180 {
181 /* Return true if the regdef would cause an assertion if you called
182 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
183 * program bug for it not to have the NO_RAW flag).
184 * NB that returning false here doesn't necessarily mean that calling
185 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
186 * read/write access functions which are safe for raw use" from "has
187 * read/write access functions which have side effects but has forgotten
188 * to provide raw access functions".
189 * The tests here line up with the conditions in read/write_raw_cp_reg()
190 * and assertions in raw_read()/raw_write().
191 */
192 if ((ri->type & ARM_CP_CONST) ||
193 ri->fieldoffset ||
194 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
195 return false;
196 }
197 return true;
198 }
199
200 bool write_cpustate_to_list(ARMCPU *cpu)
201 {
202 /* Write the coprocessor state from cpu->env to the (index,value) list. */
203 int i;
204 bool ok = true;
205
206 for (i = 0; i < cpu->cpreg_array_len; i++) {
207 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
208 const ARMCPRegInfo *ri;
209
210 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
211 if (!ri) {
212 ok = false;
213 continue;
214 }
215 if (ri->type & ARM_CP_NO_RAW) {
216 continue;
217 }
218 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
219 }
220 return ok;
221 }
222
223 bool write_list_to_cpustate(ARMCPU *cpu)
224 {
225 int i;
226 bool ok = true;
227
228 for (i = 0; i < cpu->cpreg_array_len; i++) {
229 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
230 uint64_t v = cpu->cpreg_values[i];
231 const ARMCPRegInfo *ri;
232
233 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
234 if (!ri) {
235 ok = false;
236 continue;
237 }
238 if (ri->type & ARM_CP_NO_RAW) {
239 continue;
240 }
241 /* Write value and confirm it reads back as written
242 * (to catch read-only registers and partially read-only
243 * registers where the incoming migration value doesn't match)
244 */
245 write_raw_cp_reg(&cpu->env, ri, v);
246 if (read_raw_cp_reg(&cpu->env, ri) != v) {
247 ok = false;
248 }
249 }
250 return ok;
251 }
252
253 static void add_cpreg_to_list(gpointer key, gpointer opaque)
254 {
255 ARMCPU *cpu = opaque;
256 uint64_t regidx;
257 const ARMCPRegInfo *ri;
258
259 regidx = *(uint32_t *)key;
260 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
261
262 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
263 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
264 /* The value array need not be initialized at this point */
265 cpu->cpreg_array_len++;
266 }
267 }
268
269 static void count_cpreg(gpointer key, gpointer opaque)
270 {
271 ARMCPU *cpu = opaque;
272 uint64_t regidx;
273 const ARMCPRegInfo *ri;
274
275 regidx = *(uint32_t *)key;
276 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
277
278 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
279 cpu->cpreg_array_len++;
280 }
281 }
282
283 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
284 {
285 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
286 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
287
288 if (aidx > bidx) {
289 return 1;
290 }
291 if (aidx < bidx) {
292 return -1;
293 }
294 return 0;
295 }
296
297 static void cpreg_make_keylist(gpointer key, gpointer value, gpointer udata)
298 {
299 GList **plist = udata;
300
301 *plist = g_list_prepend(*plist, key);
302 }
303
304 void init_cpreg_list(ARMCPU *cpu)
305 {
306 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
307 * Note that we require cpreg_tuples[] to be sorted by key ID.
308 */
309 GList *keys = NULL;
310 int arraylen;
311
312 g_hash_table_foreach(cpu->cp_regs, cpreg_make_keylist, &keys);
313
314 keys = g_list_sort(keys, cpreg_key_compare);
315
316 cpu->cpreg_array_len = 0;
317
318 g_list_foreach(keys, count_cpreg, cpu);
319
320 arraylen = cpu->cpreg_array_len;
321 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
322 cpu->cpreg_values = g_new(uint64_t, arraylen);
323 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
324 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
325 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
326 cpu->cpreg_array_len = 0;
327
328 g_list_foreach(keys, add_cpreg_to_list, cpu);
329
330 assert(cpu->cpreg_array_len == arraylen);
331
332 g_list_free(keys);
333 }
334
335 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
336 {
337 ARMCPU *cpu = arm_env_get_cpu(env);
338
339 raw_write(env, ri, value);
340 tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
341 }
342
343 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
344 {
345 ARMCPU *cpu = arm_env_get_cpu(env);
346
347 if (raw_read(env, ri) != value) {
348 /* Unlike real hardware the qemu TLB uses virtual addresses,
349 * not modified virtual addresses, so this causes a TLB flush.
350 */
351 tlb_flush(CPU(cpu), 1);
352 raw_write(env, ri, value);
353 }
354 }
355
356 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
357 uint64_t value)
358 {
359 ARMCPU *cpu = arm_env_get_cpu(env);
360
361 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU)
362 && !extended_addresses_enabled(env)) {
363 /* For VMSA (when not using the LPAE long descriptor page table
364 * format) this register includes the ASID, so do a TLB flush.
365 * For PMSA it is purely a process ID and no action is needed.
366 */
367 tlb_flush(CPU(cpu), 1);
368 }
369 raw_write(env, ri, value);
370 }
371
372 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
373 uint64_t value)
374 {
375 /* Invalidate all (TLBIALL) */
376 ARMCPU *cpu = arm_env_get_cpu(env);
377
378 tlb_flush(CPU(cpu), 1);
379 }
380
381 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
382 uint64_t value)
383 {
384 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
385 ARMCPU *cpu = arm_env_get_cpu(env);
386
387 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
388 }
389
390 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
391 uint64_t value)
392 {
393 /* Invalidate by ASID (TLBIASID) */
394 ARMCPU *cpu = arm_env_get_cpu(env);
395
396 tlb_flush(CPU(cpu), value == 0);
397 }
398
399 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
400 uint64_t value)
401 {
402 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
403 ARMCPU *cpu = arm_env_get_cpu(env);
404
405 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
406 }
407
408 /* IS variants of TLB operations must affect all cores */
409 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
410 uint64_t value)
411 {
412 CPUState *other_cs;
413
414 CPU_FOREACH(other_cs) {
415 tlb_flush(other_cs, 1);
416 }
417 }
418
419 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
420 uint64_t value)
421 {
422 CPUState *other_cs;
423
424 CPU_FOREACH(other_cs) {
425 tlb_flush(other_cs, value == 0);
426 }
427 }
428
429 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
430 uint64_t value)
431 {
432 CPUState *other_cs;
433
434 CPU_FOREACH(other_cs) {
435 tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
436 }
437 }
438
439 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
440 uint64_t value)
441 {
442 CPUState *other_cs;
443
444 CPU_FOREACH(other_cs) {
445 tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
446 }
447 }
448
449 static const ARMCPRegInfo cp_reginfo[] = {
450 /* Define the secure and non-secure FCSE identifier CP registers
451 * separately because there is no secure bank in V8 (no _EL3). This allows
452 * the secure register to be properly reset and migrated. There is also no
453 * v8 EL1 version of the register so the non-secure instance stands alone.
454 */
455 { .name = "FCSEIDR(NS)",
456 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
457 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
458 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
459 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
460 { .name = "FCSEIDR(S)",
461 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
462 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
463 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
464 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
465 /* Define the secure and non-secure context identifier CP registers
466 * separately because there is no secure bank in V8 (no _EL3). This allows
467 * the secure register to be properly reset and migrated. In the
468 * non-secure case, the 32-bit register will have reset and migration
469 * disabled during registration as it is handled by the 64-bit instance.
470 */
471 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
472 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
473 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
474 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
475 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
476 { .name = "CONTEXTIDR(S)", .state = ARM_CP_STATE_AA32,
477 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
478 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
479 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
480 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
481 REGINFO_SENTINEL
482 };
483
484 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
485 /* NB: Some of these registers exist in v8 but with more precise
486 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
487 */
488 /* MMU Domain access control / MPU write buffer control */
489 { .name = "DACR",
490 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
491 .access = PL1_RW, .resetvalue = 0,
492 .writefn = dacr_write, .raw_writefn = raw_write,
493 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
494 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
495 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
496 * For v6 and v5, these mappings are overly broad.
497 */
498 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
499 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
500 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
501 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
502 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
503 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
504 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
505 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
506 /* Cache maintenance ops; some of this space may be overridden later. */
507 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
508 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
509 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
510 REGINFO_SENTINEL
511 };
512
513 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
514 /* Not all pre-v6 cores implemented this WFI, so this is slightly
515 * over-broad.
516 */
517 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
518 .access = PL1_W, .type = ARM_CP_WFI },
519 REGINFO_SENTINEL
520 };
521
522 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
523 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
524 * is UNPREDICTABLE; we choose to NOP as most implementations do).
525 */
526 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
527 .access = PL1_W, .type = ARM_CP_WFI },
528 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
529 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
530 * OMAPCP will override this space.
531 */
532 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
533 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
534 .resetvalue = 0 },
535 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
536 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
537 .resetvalue = 0 },
538 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
539 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
540 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
541 .resetvalue = 0 },
542 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
543 * implementing it as RAZ means the "debug architecture version" bits
544 * will read as a reserved value, which should cause Linux to not try
545 * to use the debug hardware.
546 */
547 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
548 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
549 /* MMU TLB control. Note that the wildcarding means we cover not just
550 * the unified TLB ops but also the dside/iside/inner-shareable variants.
551 */
552 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
553 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
554 .type = ARM_CP_NO_RAW },
555 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
556 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
557 .type = ARM_CP_NO_RAW },
558 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
559 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
560 .type = ARM_CP_NO_RAW },
561 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
562 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
563 .type = ARM_CP_NO_RAW },
564 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
565 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
566 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
567 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
568 REGINFO_SENTINEL
569 };
570
571 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
572 uint64_t value)
573 {
574 uint32_t mask = 0;
575
576 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
577 if (!arm_feature(env, ARM_FEATURE_V8)) {
578 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
579 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
580 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
581 */
582 if (arm_feature(env, ARM_FEATURE_VFP)) {
583 /* VFP coprocessor: cp10 & cp11 [23:20] */
584 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
585
586 if (!arm_feature(env, ARM_FEATURE_NEON)) {
587 /* ASEDIS [31] bit is RAO/WI */
588 value |= (1 << 31);
589 }
590
591 /* VFPv3 and upwards with NEON implement 32 double precision
592 * registers (D0-D31).
593 */
594 if (!arm_feature(env, ARM_FEATURE_NEON) ||
595 !arm_feature(env, ARM_FEATURE_VFP3)) {
596 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
597 value |= (1 << 30);
598 }
599 }
600 value &= mask;
601 }
602 env->cp15.cpacr_el1 = value;
603 }
604
605 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri)
606 {
607 if (arm_feature(env, ARM_FEATURE_V8)) {
608 /* Check if CPACR accesses are to be trapped to EL2 */
609 if (arm_current_el(env) == 1 &&
610 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
611 return CP_ACCESS_TRAP_EL2;
612 /* Check if CPACR accesses are to be trapped to EL3 */
613 } else if (arm_current_el(env) < 3 &&
614 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
615 return CP_ACCESS_TRAP_EL3;
616 }
617 }
618
619 return CP_ACCESS_OK;
620 }
621
622 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri)
623 {
624 /* Check if CPTR accesses are set to trap to EL3 */
625 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
626 return CP_ACCESS_TRAP_EL3;
627 }
628
629 return CP_ACCESS_OK;
630 }
631
632 static const ARMCPRegInfo v6_cp_reginfo[] = {
633 /* prefetch by MVA in v6, NOP in v7 */
634 { .name = "MVA_prefetch",
635 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
636 .access = PL1_W, .type = ARM_CP_NOP },
637 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
638 .access = PL0_W, .type = ARM_CP_NOP },
639 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
640 .access = PL0_W, .type = ARM_CP_NOP },
641 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
642 .access = PL0_W, .type = ARM_CP_NOP },
643 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
644 .access = PL1_RW,
645 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
646 offsetof(CPUARMState, cp15.ifar_ns) },
647 .resetvalue = 0, },
648 /* Watchpoint Fault Address Register : should actually only be present
649 * for 1136, 1176, 11MPCore.
650 */
651 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
652 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
653 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
654 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
655 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
656 .resetvalue = 0, .writefn = cpacr_write },
657 REGINFO_SENTINEL
658 };
659
660 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri)
661 {
662 /* Performance monitor registers user accessibility is controlled
663 * by PMUSERENR.
664 */
665 if (arm_current_el(env) == 0 && !env->cp15.c9_pmuserenr) {
666 return CP_ACCESS_TRAP;
667 }
668 return CP_ACCESS_OK;
669 }
670
671 #ifndef CONFIG_USER_ONLY
672
673 static inline bool arm_ccnt_enabled(CPUARMState *env)
674 {
675 /* This does not support checking PMCCFILTR_EL0 register */
676
677 if (!(env->cp15.c9_pmcr & PMCRE)) {
678 return false;
679 }
680
681 return true;
682 }
683
684 void pmccntr_sync(CPUARMState *env)
685 {
686 uint64_t temp_ticks;
687
688 temp_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
689 get_ticks_per_sec(), 1000000);
690
691 if (env->cp15.c9_pmcr & PMCRD) {
692 /* Increment once every 64 processor clock cycles */
693 temp_ticks /= 64;
694 }
695
696 if (arm_ccnt_enabled(env)) {
697 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
698 }
699 }
700
701 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
702 uint64_t value)
703 {
704 pmccntr_sync(env);
705
706 if (value & PMCRC) {
707 /* The counter has been reset */
708 env->cp15.c15_ccnt = 0;
709 }
710
711 /* only the DP, X, D and E bits are writable */
712 env->cp15.c9_pmcr &= ~0x39;
713 env->cp15.c9_pmcr |= (value & 0x39);
714
715 pmccntr_sync(env);
716 }
717
718 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
719 {
720 uint64_t total_ticks;
721
722 if (!arm_ccnt_enabled(env)) {
723 /* Counter is disabled, do not change value */
724 return env->cp15.c15_ccnt;
725 }
726
727 total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
728 get_ticks_per_sec(), 1000000);
729
730 if (env->cp15.c9_pmcr & PMCRD) {
731 /* Increment once every 64 processor clock cycles */
732 total_ticks /= 64;
733 }
734 return total_ticks - env->cp15.c15_ccnt;
735 }
736
737 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
738 uint64_t value)
739 {
740 uint64_t total_ticks;
741
742 if (!arm_ccnt_enabled(env)) {
743 /* Counter is disabled, set the absolute value */
744 env->cp15.c15_ccnt = value;
745 return;
746 }
747
748 total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
749 get_ticks_per_sec(), 1000000);
750
751 if (env->cp15.c9_pmcr & PMCRD) {
752 /* Increment once every 64 processor clock cycles */
753 total_ticks /= 64;
754 }
755 env->cp15.c15_ccnt = total_ticks - value;
756 }
757
758 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
759 uint64_t value)
760 {
761 uint64_t cur_val = pmccntr_read(env, NULL);
762
763 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
764 }
765
766 #else /* CONFIG_USER_ONLY */
767
768 void pmccntr_sync(CPUARMState *env)
769 {
770 }
771
772 #endif
773
774 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
775 uint64_t value)
776 {
777 pmccntr_sync(env);
778 env->cp15.pmccfiltr_el0 = value & 0x7E000000;
779 pmccntr_sync(env);
780 }
781
782 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
783 uint64_t value)
784 {
785 value &= (1 << 31);
786 env->cp15.c9_pmcnten |= value;
787 }
788
789 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
790 uint64_t value)
791 {
792 value &= (1 << 31);
793 env->cp15.c9_pmcnten &= ~value;
794 }
795
796 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
797 uint64_t value)
798 {
799 env->cp15.c9_pmovsr &= ~value;
800 }
801
802 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
803 uint64_t value)
804 {
805 env->cp15.c9_pmxevtyper = value & 0xff;
806 }
807
808 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
809 uint64_t value)
810 {
811 env->cp15.c9_pmuserenr = value & 1;
812 }
813
814 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
815 uint64_t value)
816 {
817 /* We have no event counters so only the C bit can be changed */
818 value &= (1 << 31);
819 env->cp15.c9_pminten |= value;
820 }
821
822 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
823 uint64_t value)
824 {
825 value &= (1 << 31);
826 env->cp15.c9_pminten &= ~value;
827 }
828
829 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
830 uint64_t value)
831 {
832 /* Note that even though the AArch64 view of this register has bits
833 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
834 * architectural requirements for bits which are RES0 only in some
835 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
836 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
837 */
838 raw_write(env, ri, value & ~0x1FULL);
839 }
840
841 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
842 {
843 /* We only mask off bits that are RES0 both for AArch64 and AArch32.
844 * For bits that vary between AArch32/64, code needs to check the
845 * current execution mode before directly using the feature bit.
846 */
847 uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
848
849 if (!arm_feature(env, ARM_FEATURE_EL2)) {
850 valid_mask &= ~SCR_HCE;
851
852 /* On ARMv7, SMD (or SCD as it is called in v7) is only
853 * supported if EL2 exists. The bit is UNK/SBZP when
854 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
855 * when EL2 is unavailable.
856 * On ARMv8, this bit is always available.
857 */
858 if (arm_feature(env, ARM_FEATURE_V7) &&
859 !arm_feature(env, ARM_FEATURE_V8)) {
860 valid_mask &= ~SCR_SMD;
861 }
862 }
863
864 /* Clear all-context RES0 bits. */
865 value &= valid_mask;
866 raw_write(env, ri, value);
867 }
868
869 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
870 {
871 ARMCPU *cpu = arm_env_get_cpu(env);
872
873 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
874 * bank
875 */
876 uint32_t index = A32_BANKED_REG_GET(env, csselr,
877 ri->secure & ARM_CP_SECSTATE_S);
878
879 return cpu->ccsidr[index];
880 }
881
882 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
883 uint64_t value)
884 {
885 raw_write(env, ri, value & 0xf);
886 }
887
888 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
889 {
890 CPUState *cs = ENV_GET_CPU(env);
891 uint64_t ret = 0;
892
893 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
894 ret |= CPSR_I;
895 }
896 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
897 ret |= CPSR_F;
898 }
899 /* External aborts are not possible in QEMU so A bit is always clear */
900 return ret;
901 }
902
903 static const ARMCPRegInfo v7_cp_reginfo[] = {
904 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
905 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
906 .access = PL1_W, .type = ARM_CP_NOP },
907 /* Performance monitors are implementation defined in v7,
908 * but with an ARM recommended set of registers, which we
909 * follow (although we don't actually implement any counters)
910 *
911 * Performance registers fall into three categories:
912 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
913 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
914 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
915 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
916 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
917 */
918 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
919 .access = PL0_RW, .type = ARM_CP_ALIAS,
920 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
921 .writefn = pmcntenset_write,
922 .accessfn = pmreg_access,
923 .raw_writefn = raw_write },
924 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
925 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
926 .access = PL0_RW, .accessfn = pmreg_access,
927 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
928 .writefn = pmcntenset_write, .raw_writefn = raw_write },
929 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
930 .access = PL0_RW,
931 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
932 .accessfn = pmreg_access,
933 .writefn = pmcntenclr_write,
934 .type = ARM_CP_ALIAS },
935 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
936 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
937 .access = PL0_RW, .accessfn = pmreg_access,
938 .type = ARM_CP_ALIAS,
939 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
940 .writefn = pmcntenclr_write },
941 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
942 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
943 .accessfn = pmreg_access,
944 .writefn = pmovsr_write,
945 .raw_writefn = raw_write },
946 /* Unimplemented so WI. */
947 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
948 .access = PL0_W, .accessfn = pmreg_access, .type = ARM_CP_NOP },
949 /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
950 * We choose to RAZ/WI.
951 */
952 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
953 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
954 .accessfn = pmreg_access },
955 #ifndef CONFIG_USER_ONLY
956 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
957 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
958 .readfn = pmccntr_read, .writefn = pmccntr_write32,
959 .accessfn = pmreg_access },
960 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
961 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
962 .access = PL0_RW, .accessfn = pmreg_access,
963 .type = ARM_CP_IO,
964 .readfn = pmccntr_read, .writefn = pmccntr_write, },
965 #endif
966 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
967 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
968 .writefn = pmccfiltr_write,
969 .access = PL0_RW, .accessfn = pmreg_access,
970 .type = ARM_CP_IO,
971 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
972 .resetvalue = 0, },
973 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
974 .access = PL0_RW,
975 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
976 .accessfn = pmreg_access, .writefn = pmxevtyper_write,
977 .raw_writefn = raw_write },
978 /* Unimplemented, RAZ/WI. */
979 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
980 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
981 .accessfn = pmreg_access },
982 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
983 .access = PL0_R | PL1_RW,
984 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
985 .resetvalue = 0,
986 .writefn = pmuserenr_write, .raw_writefn = raw_write },
987 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
988 .access = PL1_RW,
989 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
990 .resetvalue = 0,
991 .writefn = pmintenset_write, .raw_writefn = raw_write },
992 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
993 .access = PL1_RW, .type = ARM_CP_ALIAS,
994 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
995 .resetvalue = 0, .writefn = pmintenclr_write, },
996 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
997 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
998 .access = PL1_RW, .writefn = vbar_write,
999 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
1000 offsetof(CPUARMState, cp15.vbar_ns) },
1001 .resetvalue = 0 },
1002 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1003 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
1004 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
1005 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1006 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1007 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1008 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1009 offsetof(CPUARMState, cp15.csselr_ns) } },
1010 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1011 * just RAZ for all cores:
1012 */
1013 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1014 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1015 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1016 /* Auxiliary fault status registers: these also are IMPDEF, and we
1017 * choose to RAZ/WI for all cores.
1018 */
1019 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1020 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1021 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1022 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1023 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1024 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1025 /* MAIR can just read-as-written because we don't implement caches
1026 * and so don't need to care about memory attributes.
1027 */
1028 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1029 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1030 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1031 .resetvalue = 0 },
1032 /* For non-long-descriptor page tables these are PRRR and NMRR;
1033 * regardless they still act as reads-as-written for QEMU.
1034 */
1035 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1036 * allows them to assign the correct fieldoffset based on the endianness
1037 * handled in the field definitions.
1038 */
1039 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1040 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1041 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1042 offsetof(CPUARMState, cp15.mair0_ns) },
1043 .resetfn = arm_cp_reset_ignore },
1044 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1045 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1046 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1047 offsetof(CPUARMState, cp15.mair1_ns) },
1048 .resetfn = arm_cp_reset_ignore },
1049 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1050 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1051 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1052 /* 32 bit ITLB invalidates */
1053 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1054 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1055 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1056 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1057 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1058 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1059 /* 32 bit DTLB invalidates */
1060 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1061 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1062 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1063 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1064 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1065 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1066 /* 32 bit TLB invalidates */
1067 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1068 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1069 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1070 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1071 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1072 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1073 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1074 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1075 REGINFO_SENTINEL
1076 };
1077
1078 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1079 /* 32 bit TLB invalidates, Inner Shareable */
1080 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1081 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1082 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1083 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1084 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1085 .type = ARM_CP_NO_RAW, .access = PL1_W,
1086 .writefn = tlbiasid_is_write },
1087 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1088 .type = ARM_CP_NO_RAW, .access = PL1_W,
1089 .writefn = tlbimvaa_is_write },
1090 REGINFO_SENTINEL
1091 };
1092
1093 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1094 uint64_t value)
1095 {
1096 value &= 1;
1097 env->teecr = value;
1098 }
1099
1100 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri)
1101 {
1102 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1103 return CP_ACCESS_TRAP;
1104 }
1105 return CP_ACCESS_OK;
1106 }
1107
1108 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1109 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1110 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1111 .resetvalue = 0,
1112 .writefn = teecr_write },
1113 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1114 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1115 .accessfn = teehbr_access, .resetvalue = 0 },
1116 REGINFO_SENTINEL
1117 };
1118
1119 static const ARMCPRegInfo v6k_cp_reginfo[] = {
1120 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1121 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1122 .access = PL0_RW,
1123 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1124 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1125 .access = PL0_RW,
1126 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1127 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1128 .resetfn = arm_cp_reset_ignore },
1129 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1130 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1131 .access = PL0_R|PL1_W,
1132 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1133 .resetvalue = 0},
1134 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1135 .access = PL0_R|PL1_W,
1136 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1137 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1138 .resetfn = arm_cp_reset_ignore },
1139 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1140 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1141 .access = PL1_RW,
1142 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1143 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1144 .access = PL1_RW,
1145 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1146 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1147 .resetvalue = 0 },
1148 REGINFO_SENTINEL
1149 };
1150
1151 #ifndef CONFIG_USER_ONLY
1152
1153 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri)
1154 {
1155 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
1156 if (arm_current_el(env) == 0 && !extract32(env->cp15.c14_cntkctl, 0, 2)) {
1157 return CP_ACCESS_TRAP;
1158 }
1159 return CP_ACCESS_OK;
1160 }
1161
1162 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx)
1163 {
1164 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1165 if (arm_current_el(env) == 0 &&
1166 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1167 return CP_ACCESS_TRAP;
1168 }
1169 return CP_ACCESS_OK;
1170 }
1171
1172 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx)
1173 {
1174 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1175 * EL0[PV]TEN is zero.
1176 */
1177 if (arm_current_el(env) == 0 &&
1178 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1179 return CP_ACCESS_TRAP;
1180 }
1181 return CP_ACCESS_OK;
1182 }
1183
1184 static CPAccessResult gt_pct_access(CPUARMState *env,
1185 const ARMCPRegInfo *ri)
1186 {
1187 return gt_counter_access(env, GTIMER_PHYS);
1188 }
1189
1190 static CPAccessResult gt_vct_access(CPUARMState *env,
1191 const ARMCPRegInfo *ri)
1192 {
1193 return gt_counter_access(env, GTIMER_VIRT);
1194 }
1195
1196 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri)
1197 {
1198 return gt_timer_access(env, GTIMER_PHYS);
1199 }
1200
1201 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri)
1202 {
1203 return gt_timer_access(env, GTIMER_VIRT);
1204 }
1205
1206 static uint64_t gt_get_countervalue(CPUARMState *env)
1207 {
1208 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1209 }
1210
1211 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1212 {
1213 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1214
1215 if (gt->ctl & 1) {
1216 /* Timer enabled: calculate and set current ISTATUS, irq, and
1217 * reset timer to when ISTATUS next has to change
1218 */
1219 uint64_t count = gt_get_countervalue(&cpu->env);
1220 /* Note that this must be unsigned 64 bit arithmetic: */
1221 int istatus = count >= gt->cval;
1222 uint64_t nexttick;
1223
1224 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1225 qemu_set_irq(cpu->gt_timer_outputs[timeridx],
1226 (istatus && !(gt->ctl & 2)));
1227 if (istatus) {
1228 /* Next transition is when count rolls back over to zero */
1229 nexttick = UINT64_MAX;
1230 } else {
1231 /* Next transition is when we hit cval */
1232 nexttick = gt->cval;
1233 }
1234 /* Note that the desired next expiry time might be beyond the
1235 * signed-64-bit range of a QEMUTimer -- in this case we just
1236 * set the timer for as far in the future as possible. When the
1237 * timer expires we will reset the timer for any remaining period.
1238 */
1239 if (nexttick > INT64_MAX / GTIMER_SCALE) {
1240 nexttick = INT64_MAX / GTIMER_SCALE;
1241 }
1242 timer_mod(cpu->gt_timer[timeridx], nexttick);
1243 } else {
1244 /* Timer disabled: ISTATUS and timer output always clear */
1245 gt->ctl &= ~4;
1246 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1247 timer_del(cpu->gt_timer[timeridx]);
1248 }
1249 }
1250
1251 static void gt_cnt_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1252 {
1253 ARMCPU *cpu = arm_env_get_cpu(env);
1254 int timeridx = ri->opc1 & 1;
1255
1256 timer_del(cpu->gt_timer[timeridx]);
1257 }
1258
1259 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1260 {
1261 return gt_get_countervalue(env);
1262 }
1263
1264 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1265 uint64_t value)
1266 {
1267 int timeridx = ri->opc1 & 1;
1268
1269 env->cp15.c14_timer[timeridx].cval = value;
1270 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1271 }
1272
1273 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1274 {
1275 int timeridx = ri->crm & 1;
1276
1277 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1278 gt_get_countervalue(env));
1279 }
1280
1281 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1282 uint64_t value)
1283 {
1284 int timeridx = ri->crm & 1;
1285
1286 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) +
1287 sextract64(value, 0, 32);
1288 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1289 }
1290
1291 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1292 uint64_t value)
1293 {
1294 ARMCPU *cpu = arm_env_get_cpu(env);
1295 int timeridx = ri->crm & 1;
1296 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1297
1298 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1299 if ((oldval ^ value) & 1) {
1300 /* Enable toggled */
1301 gt_recalc_timer(cpu, timeridx);
1302 } else if ((oldval ^ value) & 2) {
1303 /* IMASK toggled: don't need to recalculate,
1304 * just set the interrupt line based on ISTATUS
1305 */
1306 qemu_set_irq(cpu->gt_timer_outputs[timeridx],
1307 (oldval & 4) && !(value & 2));
1308 }
1309 }
1310
1311 void arm_gt_ptimer_cb(void *opaque)
1312 {
1313 ARMCPU *cpu = opaque;
1314
1315 gt_recalc_timer(cpu, GTIMER_PHYS);
1316 }
1317
1318 void arm_gt_vtimer_cb(void *opaque)
1319 {
1320 ARMCPU *cpu = opaque;
1321
1322 gt_recalc_timer(cpu, GTIMER_VIRT);
1323 }
1324
1325 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1326 /* Note that CNTFRQ is purely reads-as-written for the benefit
1327 * of software; writing it doesn't actually change the timer frequency.
1328 * Our reset value matches the fixed frequency we implement the timer at.
1329 */
1330 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1331 .type = ARM_CP_ALIAS,
1332 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1333 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1334 .resetfn = arm_cp_reset_ignore,
1335 },
1336 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1337 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1338 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1339 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1340 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
1341 },
1342 /* overall control: mostly access permissions */
1343 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
1344 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
1345 .access = PL1_RW,
1346 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
1347 .resetvalue = 0,
1348 },
1349 /* per-timer control */
1350 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1351 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1352 .accessfn = gt_ptimer_access,
1353 .fieldoffset = offsetoflow32(CPUARMState,
1354 cp15.c14_timer[GTIMER_PHYS].ctl),
1355 .resetfn = arm_cp_reset_ignore,
1356 .writefn = gt_ctl_write, .raw_writefn = raw_write,
1357 },
1358 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
1359 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
1360 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1361 .accessfn = gt_ptimer_access,
1362 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
1363 .resetvalue = 0,
1364 .writefn = gt_ctl_write, .raw_writefn = raw_write,
1365 },
1366 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
1367 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1368 .accessfn = gt_vtimer_access,
1369 .fieldoffset = offsetoflow32(CPUARMState,
1370 cp15.c14_timer[GTIMER_VIRT].ctl),
1371 .resetfn = arm_cp_reset_ignore,
1372 .writefn = gt_ctl_write, .raw_writefn = raw_write,
1373 },
1374 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
1375 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
1376 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1377 .accessfn = gt_vtimer_access,
1378 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
1379 .resetvalue = 0,
1380 .writefn = gt_ctl_write, .raw_writefn = raw_write,
1381 },
1382 /* TimerValue views: a 32 bit downcounting view of the underlying state */
1383 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1384 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1385 .accessfn = gt_ptimer_access,
1386 .readfn = gt_tval_read, .writefn = gt_tval_write,
1387 },
1388 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1389 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
1390 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1391 .accessfn = gt_ptimer_access,
1392 .readfn = gt_tval_read, .writefn = gt_tval_write,
1393 },
1394 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
1395 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1396 .accessfn = gt_vtimer_access,
1397 .readfn = gt_tval_read, .writefn = gt_tval_write,
1398 },
1399 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1400 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
1401 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1402 .accessfn = gt_vtimer_access,
1403 .readfn = gt_tval_read, .writefn = gt_tval_write,
1404 },
1405 /* The counter itself */
1406 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
1407 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1408 .accessfn = gt_pct_access,
1409 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
1410 },
1411 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
1412 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
1413 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1414 .accessfn = gt_pct_access,
1415 .readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
1416 },
1417 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
1418 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1419 .accessfn = gt_vct_access,
1420 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
1421 },
1422 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
1423 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
1424 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1425 .accessfn = gt_vct_access,
1426 .readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
1427 },
1428 /* Comparison value, indicating when the timer goes off */
1429 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
1430 .access = PL1_RW | PL0_R,
1431 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1432 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1433 .accessfn = gt_ptimer_access, .resetfn = arm_cp_reset_ignore,
1434 .writefn = gt_cval_write, .raw_writefn = raw_write,
1435 },
1436 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1437 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
1438 .access = PL1_RW | PL0_R,
1439 .type = ARM_CP_IO,
1440 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1441 .resetvalue = 0, .accessfn = gt_ptimer_access,
1442 .writefn = gt_cval_write, .raw_writefn = raw_write,
1443 },
1444 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
1445 .access = PL1_RW | PL0_R,
1446 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1447 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1448 .accessfn = gt_vtimer_access, .resetfn = arm_cp_reset_ignore,
1449 .writefn = gt_cval_write, .raw_writefn = raw_write,
1450 },
1451 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1452 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
1453 .access = PL1_RW | PL0_R,
1454 .type = ARM_CP_IO,
1455 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1456 .resetvalue = 0, .accessfn = gt_vtimer_access,
1457 .writefn = gt_cval_write, .raw_writefn = raw_write,
1458 },
1459 REGINFO_SENTINEL
1460 };
1461
1462 #else
1463 /* In user-mode none of the generic timer registers are accessible,
1464 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
1465 * so instead just don't register any of them.
1466 */
1467 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1468 REGINFO_SENTINEL
1469 };
1470
1471 #endif
1472
1473 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1474 {
1475 if (arm_feature(env, ARM_FEATURE_LPAE)) {
1476 raw_write(env, ri, value);
1477 } else if (arm_feature(env, ARM_FEATURE_V7)) {
1478 raw_write(env, ri, value & 0xfffff6ff);
1479 } else {
1480 raw_write(env, ri, value & 0xfffff1ff);
1481 }
1482 }
1483
1484 #ifndef CONFIG_USER_ONLY
1485 /* get_phys_addr() isn't present for user-mode-only targets */
1486
1487 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri)
1488 {
1489 if (ri->opc2 & 4) {
1490 /* Other states are only available with TrustZone; in
1491 * a non-TZ implementation these registers don't exist
1492 * at all, which is an Uncategorized trap. This underdecoding
1493 * is safe because the reginfo is NO_RAW.
1494 */
1495 return CP_ACCESS_TRAP_UNCATEGORIZED;
1496 }
1497 return CP_ACCESS_OK;
1498 }
1499
1500 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
1501 int access_type, ARMMMUIdx mmu_idx)
1502 {
1503 hwaddr phys_addr;
1504 target_ulong page_size;
1505 int prot;
1506 int ret;
1507 uint64_t par64;
1508 MemTxAttrs attrs = {};
1509
1510 ret = get_phys_addr(env, value, access_type, mmu_idx,
1511 &phys_addr, &attrs, &prot, &page_size);
1512 if (extended_addresses_enabled(env)) {
1513 /* ret is a DFSR/IFSR value for the long descriptor
1514 * translation table format, but with WnR always clear.
1515 * Convert it to a 64-bit PAR.
1516 */
1517 par64 = (1 << 11); /* LPAE bit always set */
1518 if (ret == 0) {
1519 par64 |= phys_addr & ~0xfffULL;
1520 if (!attrs.secure) {
1521 par64 |= (1 << 9); /* NS */
1522 }
1523 /* We don't set the ATTR or SH fields in the PAR. */
1524 } else {
1525 par64 |= 1; /* F */
1526 par64 |= (ret & 0x3f) << 1; /* FS */
1527 /* Note that S2WLK and FSTAGE are always zero, because we don't
1528 * implement virtualization and therefore there can't be a stage 2
1529 * fault.
1530 */
1531 }
1532 } else {
1533 /* ret is a DFSR/IFSR value for the short descriptor
1534 * translation table format (with WnR always clear).
1535 * Convert it to a 32-bit PAR.
1536 */
1537 if (ret == 0) {
1538 /* We do not set any attribute bits in the PAR */
1539 if (page_size == (1 << 24)
1540 && arm_feature(env, ARM_FEATURE_V7)) {
1541 par64 = (phys_addr & 0xff000000) | (1 << 1);
1542 } else {
1543 par64 = phys_addr & 0xfffff000;
1544 }
1545 if (!attrs.secure) {
1546 par64 |= (1 << 9); /* NS */
1547 }
1548 } else {
1549 par64 = ((ret & (1 << 10)) >> 5) | ((ret & (1 << 12)) >> 6) |
1550 ((ret & 0xf) << 1) | 1;
1551 }
1552 }
1553 return par64;
1554 }
1555
1556 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1557 {
1558 int access_type = ri->opc2 & 1;
1559 uint64_t par64;
1560 ARMMMUIdx mmu_idx;
1561 int el = arm_current_el(env);
1562 bool secure = arm_is_secure_below_el3(env);
1563
1564 switch (ri->opc2 & 6) {
1565 case 0:
1566 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
1567 switch (el) {
1568 case 3:
1569 mmu_idx = ARMMMUIdx_S1E3;
1570 break;
1571 case 2:
1572 mmu_idx = ARMMMUIdx_S1NSE1;
1573 break;
1574 case 1:
1575 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
1576 break;
1577 default:
1578 g_assert_not_reached();
1579 }
1580 break;
1581 case 2:
1582 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
1583 switch (el) {
1584 case 3:
1585 mmu_idx = ARMMMUIdx_S1SE0;
1586 break;
1587 case 2:
1588 mmu_idx = ARMMMUIdx_S1NSE0;
1589 break;
1590 case 1:
1591 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
1592 break;
1593 default:
1594 g_assert_not_reached();
1595 }
1596 break;
1597 case 4:
1598 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
1599 mmu_idx = ARMMMUIdx_S12NSE1;
1600 break;
1601 case 6:
1602 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
1603 mmu_idx = ARMMMUIdx_S12NSE0;
1604 break;
1605 default:
1606 g_assert_not_reached();
1607 }
1608
1609 par64 = do_ats_write(env, value, access_type, mmu_idx);
1610
1611 A32_BANKED_CURRENT_REG_SET(env, par, par64);
1612 }
1613
1614 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
1615 uint64_t value)
1616 {
1617 int access_type = ri->opc2 & 1;
1618 ARMMMUIdx mmu_idx;
1619 int secure = arm_is_secure_below_el3(env);
1620
1621 switch (ri->opc2 & 6) {
1622 case 0:
1623 switch (ri->opc1) {
1624 case 0: /* AT S1E1R, AT S1E1W */
1625 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
1626 break;
1627 case 4: /* AT S1E2R, AT S1E2W */
1628 mmu_idx = ARMMMUIdx_S1E2;
1629 break;
1630 case 6: /* AT S1E3R, AT S1E3W */
1631 mmu_idx = ARMMMUIdx_S1E3;
1632 break;
1633 default:
1634 g_assert_not_reached();
1635 }
1636 break;
1637 case 2: /* AT S1E0R, AT S1E0W */
1638 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
1639 break;
1640 case 4: /* AT S12E1R, AT S12E1W */
1641 mmu_idx = ARMMMUIdx_S12NSE1;
1642 break;
1643 case 6: /* AT S12E0R, AT S12E0W */
1644 mmu_idx = ARMMMUIdx_S12NSE0;
1645 break;
1646 default:
1647 g_assert_not_reached();
1648 }
1649
1650 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
1651 }
1652 #endif
1653
1654 static const ARMCPRegInfo vapa_cp_reginfo[] = {
1655 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
1656 .access = PL1_RW, .resetvalue = 0,
1657 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
1658 offsetoflow32(CPUARMState, cp15.par_ns) },
1659 .writefn = par_write },
1660 #ifndef CONFIG_USER_ONLY
1661 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
1662 .access = PL1_W, .accessfn = ats_access,
1663 .writefn = ats_write, .type = ARM_CP_NO_RAW },
1664 #endif
1665 REGINFO_SENTINEL
1666 };
1667
1668 /* Return basic MPU access permission bits. */
1669 static uint32_t simple_mpu_ap_bits(uint32_t val)
1670 {
1671 uint32_t ret;
1672 uint32_t mask;
1673 int i;
1674 ret = 0;
1675 mask = 3;
1676 for (i = 0; i < 16; i += 2) {
1677 ret |= (val >> i) & mask;
1678 mask <<= 2;
1679 }
1680 return ret;
1681 }
1682
1683 /* Pad basic MPU access permission bits to extended format. */
1684 static uint32_t extended_mpu_ap_bits(uint32_t val)
1685 {
1686 uint32_t ret;
1687 uint32_t mask;
1688 int i;
1689 ret = 0;
1690 mask = 3;
1691 for (i = 0; i < 16; i += 2) {
1692 ret |= (val & mask) << i;
1693 mask <<= 2;
1694 }
1695 return ret;
1696 }
1697
1698 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1699 uint64_t value)
1700 {
1701 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
1702 }
1703
1704 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1705 {
1706 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
1707 }
1708
1709 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1710 uint64_t value)
1711 {
1712 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
1713 }
1714
1715 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1716 {
1717 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
1718 }
1719
1720 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
1721 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
1722 .access = PL1_RW, .type = ARM_CP_ALIAS,
1723 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
1724 .resetvalue = 0,
1725 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
1726 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
1727 .access = PL1_RW, .type = ARM_CP_ALIAS,
1728 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
1729 .resetvalue = 0,
1730 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
1731 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
1732 .access = PL1_RW,
1733 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
1734 .resetvalue = 0, },
1735 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
1736 .access = PL1_RW,
1737 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
1738 .resetvalue = 0, },
1739 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
1740 .access = PL1_RW,
1741 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
1742 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
1743 .access = PL1_RW,
1744 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
1745 /* Protection region base and size registers */
1746 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
1747 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1748 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
1749 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
1750 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1751 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
1752 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
1753 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1754 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
1755 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
1756 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1757 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
1758 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
1759 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1760 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
1761 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
1762 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1763 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
1764 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
1765 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1766 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
1767 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
1768 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1769 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
1770 REGINFO_SENTINEL
1771 };
1772
1773 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
1774 uint64_t value)
1775 {
1776 TCR *tcr = raw_ptr(env, ri);
1777 int maskshift = extract32(value, 0, 3);
1778
1779 if (!arm_feature(env, ARM_FEATURE_V8)) {
1780 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
1781 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
1782 * using Long-desciptor translation table format */
1783 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
1784 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
1785 /* In an implementation that includes the Security Extensions
1786 * TTBCR has additional fields PD0 [4] and PD1 [5] for
1787 * Short-descriptor translation table format.
1788 */
1789 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
1790 } else {
1791 value &= TTBCR_N;
1792 }
1793 }
1794
1795 /* Update the masks corresponding to the the TCR bank being written
1796 * Note that we always calculate mask and base_mask, but
1797 * they are only used for short-descriptor tables (ie if EAE is 0);
1798 * for long-descriptor tables the TCR fields are used differently
1799 * and the mask and base_mask values are meaningless.
1800 */
1801 tcr->raw_tcr = value;
1802 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
1803 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
1804 }
1805
1806 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1807 uint64_t value)
1808 {
1809 ARMCPU *cpu = arm_env_get_cpu(env);
1810
1811 if (arm_feature(env, ARM_FEATURE_LPAE)) {
1812 /* With LPAE the TTBCR could result in a change of ASID
1813 * via the TTBCR.A1 bit, so do a TLB flush.
1814 */
1815 tlb_flush(CPU(cpu), 1);
1816 }
1817 vmsa_ttbcr_raw_write(env, ri, value);
1818 }
1819
1820 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1821 {
1822 TCR *tcr = raw_ptr(env, ri);
1823
1824 /* Reset both the TCR as well as the masks corresponding to the bank of
1825 * the TCR being reset.
1826 */
1827 tcr->raw_tcr = 0;
1828 tcr->mask = 0;
1829 tcr->base_mask = 0xffffc000u;
1830 }
1831
1832 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
1833 uint64_t value)
1834 {
1835 ARMCPU *cpu = arm_env_get_cpu(env);
1836 TCR *tcr = raw_ptr(env, ri);
1837
1838 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
1839 tlb_flush(CPU(cpu), 1);
1840 tcr->raw_tcr = value;
1841 }
1842
1843 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1844 uint64_t value)
1845 {
1846 /* 64 bit accesses to the TTBRs can change the ASID and so we
1847 * must flush the TLB.
1848 */
1849 if (cpreg_field_is_64bit(ri)) {
1850 ARMCPU *cpu = arm_env_get_cpu(env);
1851
1852 tlb_flush(CPU(cpu), 1);
1853 }
1854 raw_write(env, ri, value);
1855 }
1856
1857 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
1858 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
1859 .access = PL1_RW, .type = ARM_CP_ALIAS,
1860 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
1861 offsetoflow32(CPUARMState, cp15.dfsr_ns) },
1862 .resetfn = arm_cp_reset_ignore, },
1863 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
1864 .access = PL1_RW, .resetvalue = 0,
1865 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
1866 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
1867 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
1868 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
1869 .access = PL1_RW,
1870 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
1871 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
1872 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
1873 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
1874 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
1875 offsetof(CPUARMState, cp15.ttbr0_ns) } },
1876 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
1877 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
1878 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
1879 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
1880 offsetof(CPUARMState, cp15.ttbr1_ns) } },
1881 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
1882 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
1883 .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
1884 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
1885 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
1886 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
1887 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
1888 .resetfn = arm_cp_reset_ignore, .raw_writefn = vmsa_ttbcr_raw_write,
1889 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
1890 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
1891 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
1892 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
1893 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
1894 .resetvalue = 0, },
1895 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
1896 .access = PL1_RW, .resetvalue = 0,
1897 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
1898 offsetof(CPUARMState, cp15.dfar_ns) } },
1899 REGINFO_SENTINEL
1900 };
1901
1902 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
1903 uint64_t value)
1904 {
1905 env->cp15.c15_ticonfig = value & 0xe7;
1906 /* The OS_TYPE bit in this register changes the reported CPUID! */
1907 env->cp15.c0_cpuid = (value & (1 << 5)) ?
1908 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1909 }
1910
1911 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
1912 uint64_t value)
1913 {
1914 env->cp15.c15_threadid = value & 0xffff;
1915 }
1916
1917 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
1918 uint64_t value)
1919 {
1920 /* Wait-for-interrupt (deprecated) */
1921 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
1922 }
1923
1924 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
1925 uint64_t value)
1926 {
1927 /* On OMAP there are registers indicating the max/min index of dcache lines
1928 * containing a dirty line; cache flush operations have to reset these.
1929 */
1930 env->cp15.c15_i_max = 0x000;
1931 env->cp15.c15_i_min = 0xff0;
1932 }
1933
1934 static const ARMCPRegInfo omap_cp_reginfo[] = {
1935 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
1936 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
1937 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
1938 .resetvalue = 0, },
1939 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
1940 .access = PL1_RW, .type = ARM_CP_NOP },
1941 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
1942 .access = PL1_RW,
1943 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
1944 .writefn = omap_ticonfig_write },
1945 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
1946 .access = PL1_RW,
1947 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
1948 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
1949 .access = PL1_RW, .resetvalue = 0xff0,
1950 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
1951 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
1952 .access = PL1_RW,
1953 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
1954 .writefn = omap_threadid_write },
1955 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
1956 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
1957 .type = ARM_CP_NO_RAW,
1958 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
1959 /* TODO: Peripheral port remap register:
1960 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
1961 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
1962 * when MMU is off.
1963 */
1964 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
1965 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
1966 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
1967 .writefn = omap_cachemaint_write },
1968 { .name = "C9", .cp = 15, .crn = 9,
1969 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
1970 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
1971 REGINFO_SENTINEL
1972 };
1973
1974 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1975 uint64_t value)
1976 {
1977 env->cp15.c15_cpar = value & 0x3fff;
1978 }
1979
1980 static const ARMCPRegInfo xscale_cp_reginfo[] = {
1981 { .name = "XSCALE_CPAR",
1982 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
1983 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
1984 .writefn = xscale_cpar_write, },
1985 { .name = "XSCALE_AUXCR",
1986 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
1987 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
1988 .resetvalue = 0, },
1989 /* XScale specific cache-lockdown: since we have no cache we NOP these
1990 * and hope the guest does not really rely on cache behaviour.
1991 */
1992 { .name = "XSCALE_LOCK_ICACHE_LINE",
1993 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
1994 .access = PL1_W, .type = ARM_CP_NOP },
1995 { .name = "XSCALE_UNLOCK_ICACHE",
1996 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
1997 .access = PL1_W, .type = ARM_CP_NOP },
1998 { .name = "XSCALE_DCACHE_LOCK",
1999 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2000 .access = PL1_RW, .type = ARM_CP_NOP },
2001 { .name = "XSCALE_UNLOCK_DCACHE",
2002 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2003 .access = PL1_W, .type = ARM_CP_NOP },
2004 REGINFO_SENTINEL
2005 };
2006
2007 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2008 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2009 * implementation of this implementation-defined space.
2010 * Ideally this should eventually disappear in favour of actually
2011 * implementing the correct behaviour for all cores.
2012 */
2013 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2014 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2015 .access = PL1_RW,
2016 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2017 .resetvalue = 0 },
2018 REGINFO_SENTINEL
2019 };
2020
2021 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2022 /* Cache status: RAZ because we have no cache so it's always clean */
2023 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2024 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2025 .resetvalue = 0 },
2026 REGINFO_SENTINEL
2027 };
2028
2029 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2030 /* We never have a a block transfer operation in progress */
2031 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2032 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2033 .resetvalue = 0 },
2034 /* The cache ops themselves: these all NOP for QEMU */
2035 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2036 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2037 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2038 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2039 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2040 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2041 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2042 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2043 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2044 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2045 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2046 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2047 REGINFO_SENTINEL
2048 };
2049
2050 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2051 /* The cache test-and-clean instructions always return (1 << 30)
2052 * to indicate that there are no dirty cache lines.
2053 */
2054 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2055 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2056 .resetvalue = (1 << 30) },
2057 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2058 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2059 .resetvalue = (1 << 30) },
2060 REGINFO_SENTINEL
2061 };
2062
2063 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2064 /* Ignore ReadBuffer accesses */
2065 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
2066 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2067 .access = PL1_RW, .resetvalue = 0,
2068 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
2069 REGINFO_SENTINEL
2070 };
2071
2072 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2073 {
2074 CPUState *cs = CPU(arm_env_get_cpu(env));
2075 uint32_t mpidr = cs->cpu_index;
2076 /* We don't support setting cluster ID ([8..11]) (known as Aff1
2077 * in later ARM ARM versions), or any of the higher affinity level fields,
2078 * so these bits always RAZ.
2079 */
2080 if (arm_feature(env, ARM_FEATURE_V7MP)) {
2081 mpidr |= (1U << 31);
2082 /* Cores which are uniprocessor (non-coherent)
2083 * but still implement the MP extensions set
2084 * bit 30. (For instance, A9UP.) However we do
2085 * not currently model any of those cores.
2086 */
2087 }
2088 return mpidr;
2089 }
2090
2091 static const ARMCPRegInfo mpidr_cp_reginfo[] = {
2092 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
2093 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
2094 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
2095 REGINFO_SENTINEL
2096 };
2097
2098 static const ARMCPRegInfo lpae_cp_reginfo[] = {
2099 /* NOP AMAIR0/1 */
2100 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
2101 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
2102 .access = PL1_RW, .type = ARM_CP_CONST,
2103 .resetvalue = 0 },
2104 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2105 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
2106 .access = PL1_RW, .type = ARM_CP_CONST,
2107 .resetvalue = 0 },
2108 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
2109 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
2110 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
2111 offsetof(CPUARMState, cp15.par_ns)} },
2112 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
2113 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2114 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2115 offsetof(CPUARMState, cp15.ttbr0_ns) },
2116 .writefn = vmsa_ttbr_write, .resetfn = arm_cp_reset_ignore },
2117 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
2118 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2119 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2120 offsetof(CPUARMState, cp15.ttbr1_ns) },
2121 .writefn = vmsa_ttbr_write, .resetfn = arm_cp_reset_ignore },
2122 REGINFO_SENTINEL
2123 };
2124
2125 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2126 {
2127 return vfp_get_fpcr(env);
2128 }
2129
2130 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2131 uint64_t value)
2132 {
2133 vfp_set_fpcr(env, value);
2134 }
2135
2136 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2137 {
2138 return vfp_get_fpsr(env);
2139 }
2140
2141 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2142 uint64_t value)
2143 {
2144 vfp_set_fpsr(env, value);
2145 }
2146
2147 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri)
2148 {
2149 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
2150 return CP_ACCESS_TRAP;
2151 }
2152 return CP_ACCESS_OK;
2153 }
2154
2155 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
2156 uint64_t value)
2157 {
2158 env->daif = value & PSTATE_DAIF;
2159 }
2160
2161 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
2162 const ARMCPRegInfo *ri)
2163 {
2164 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
2165 * SCTLR_EL1.UCI is set.
2166 */
2167 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
2168 return CP_ACCESS_TRAP;
2169 }
2170 return CP_ACCESS_OK;
2171 }
2172
2173 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
2174 * Page D4-1736 (DDI0487A.b)
2175 */
2176
2177 static void tlbi_aa64_va_write(CPUARMState *env, const ARMCPRegInfo *ri,
2178 uint64_t value)
2179 {
2180 /* Invalidate by VA (AArch64 version) */
2181 ARMCPU *cpu = arm_env_get_cpu(env);
2182 uint64_t pageaddr = sextract64(value << 12, 0, 56);
2183
2184 tlb_flush_page(CPU(cpu), pageaddr);
2185 }
2186
2187 static void tlbi_aa64_vaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
2188 uint64_t value)
2189 {
2190 /* Invalidate by VA, all ASIDs (AArch64 version) */
2191 ARMCPU *cpu = arm_env_get_cpu(env);
2192 uint64_t pageaddr = sextract64(value << 12, 0, 56);
2193
2194 tlb_flush_page(CPU(cpu), pageaddr);
2195 }
2196
2197 static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2198 uint64_t value)
2199 {
2200 /* Invalidate by ASID (AArch64 version) */
2201 ARMCPU *cpu = arm_env_get_cpu(env);
2202 int asid = extract64(value, 48, 16);
2203 tlb_flush(CPU(cpu), asid == 0);
2204 }
2205
2206 static void tlbi_aa64_va_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2207 uint64_t value)
2208 {
2209 CPUState *other_cs;
2210 uint64_t pageaddr = sextract64(value << 12, 0, 56);
2211
2212 CPU_FOREACH(other_cs) {
2213 tlb_flush_page(other_cs, pageaddr);
2214 }
2215 }
2216
2217 static void tlbi_aa64_vaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2218 uint64_t value)
2219 {
2220 CPUState *other_cs;
2221 uint64_t pageaddr = sextract64(value << 12, 0, 56);
2222
2223 CPU_FOREACH(other_cs) {
2224 tlb_flush_page(other_cs, pageaddr);
2225 }
2226 }
2227
2228 static void tlbi_aa64_asid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2229 uint64_t value)
2230 {
2231 CPUState *other_cs;
2232 int asid = extract64(value, 48, 16);
2233
2234 CPU_FOREACH(other_cs) {
2235 tlb_flush(other_cs, asid == 0);
2236 }
2237 }
2238
2239 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri)
2240 {
2241 /* We don't implement EL2, so the only control on DC ZVA is the
2242 * bit in the SCTLR which can prohibit access for EL0.
2243 */
2244 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
2245 return CP_ACCESS_TRAP;
2246 }
2247 return CP_ACCESS_OK;
2248 }
2249
2250 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
2251 {
2252 ARMCPU *cpu = arm_env_get_cpu(env);
2253 int dzp_bit = 1 << 4;
2254
2255 /* DZP indicates whether DC ZVA access is allowed */
2256 if (aa64_zva_access(env, NULL) == CP_ACCESS_OK) {
2257 dzp_bit = 0;
2258 }
2259 return cpu->dcz_blocksize | dzp_bit;
2260 }
2261
2262 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
2263 {
2264 if (!(env->pstate & PSTATE_SP)) {
2265 /* Access to SP_EL0 is undefined if it's being used as
2266 * the stack pointer.
2267 */
2268 return CP_ACCESS_TRAP_UNCATEGORIZED;
2269 }
2270 return CP_ACCESS_OK;
2271 }
2272
2273 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
2274 {
2275 return env->pstate & PSTATE_SP;
2276 }
2277
2278 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
2279 {
2280 update_spsel(env, val);
2281 }
2282
2283 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2284 uint64_t value)
2285 {
2286 ARMCPU *cpu = arm_env_get_cpu(env);
2287
2288 if (raw_read(env, ri) == value) {
2289 /* Skip the TLB flush if nothing actually changed; Linux likes
2290 * to do a lot of pointless SCTLR writes.
2291 */
2292 return;
2293 }
2294
2295 raw_write(env, ri, value);
2296 /* ??? Lots of these bits are not implemented. */
2297 /* This may enable/disable the MMU, so do a TLB flush. */
2298 tlb_flush(CPU(cpu), 1);
2299 }
2300
2301 static const ARMCPRegInfo v8_cp_reginfo[] = {
2302 /* Minimal set of EL0-visible registers. This will need to be expanded
2303 * significantly for system emulation of AArch64 CPUs.
2304 */
2305 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
2306 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
2307 .access = PL0_RW, .type = ARM_CP_NZCV },
2308 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
2309 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
2310 .type = ARM_CP_NO_RAW,
2311 .access = PL0_RW, .accessfn = aa64_daif_access,
2312 .fieldoffset = offsetof(CPUARMState, daif),
2313 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
2314 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
2315 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
2316 .access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
2317 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
2318 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
2319 .access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
2320 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
2321 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
2322 .access = PL0_R, .type = ARM_CP_NO_RAW,
2323 .readfn = aa64_dczid_read },
2324 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
2325 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
2326 .access = PL0_W, .type = ARM_CP_DC_ZVA,
2327 #ifndef CONFIG_USER_ONLY
2328 /* Avoid overhead of an access check that always passes in user-mode */
2329 .accessfn = aa64_zva_access,
2330 #endif
2331 },
2332 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
2333 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
2334 .access = PL1_R, .type = ARM_CP_CURRENTEL },
2335 /* Cache ops: all NOPs since we don't emulate caches */
2336 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
2337 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
2338 .access = PL1_W, .type = ARM_CP_NOP },
2339 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
2340 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
2341 .access = PL1_W, .type = ARM_CP_NOP },
2342 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
2343 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
2344 .access = PL0_W, .type = ARM_CP_NOP,
2345 .accessfn = aa64_cacheop_access },
2346 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
2347 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
2348 .access = PL1_W, .type = ARM_CP_NOP },
2349 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
2350 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
2351 .access = PL1_W, .type = ARM_CP_NOP },
2352 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
2353 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
2354 .access = PL0_W, .type = ARM_CP_NOP,
2355 .accessfn = aa64_cacheop_access },
2356 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
2357 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
2358 .access = PL1_W, .type = ARM_CP_NOP },
2359 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
2360 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
2361 .access = PL0_W, .type = ARM_CP_NOP,
2362 .accessfn = aa64_cacheop_access },
2363 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
2364 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
2365 .access = PL0_W, .type = ARM_CP_NOP,
2366 .accessfn = aa64_cacheop_access },
2367 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
2368 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
2369 .access = PL1_W, .type = ARM_CP_NOP },
2370 /* TLBI operations */
2371 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
2372 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2373 .access = PL1_W, .type = ARM_CP_NO_RAW,
2374 .writefn = tlbiall_is_write },
2375 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
2376 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2377 .access = PL1_W, .type = ARM_CP_NO_RAW,
2378 .writefn = tlbi_aa64_va_is_write },
2379 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
2380 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2381 .access = PL1_W, .type = ARM_CP_NO_RAW,
2382 .writefn = tlbi_aa64_asid_is_write },
2383 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
2384 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2385 .access = PL1_W, .type = ARM_CP_NO_RAW,
2386 .writefn = tlbi_aa64_vaa_is_write },
2387 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
2388 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
2389 .access = PL1_W, .type = ARM_CP_NO_RAW,
2390 .writefn = tlbi_aa64_va_is_write },
2391 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
2392 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
2393 .access = PL1_W, .type = ARM_CP_NO_RAW,
2394 .writefn = tlbi_aa64_vaa_is_write },
2395 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
2396 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2397 .access = PL1_W, .type = ARM_CP_NO_RAW,
2398 .writefn = tlbiall_write },
2399 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
2400 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2401 .access = PL1_W, .type = ARM_CP_NO_RAW,
2402 .writefn = tlbi_aa64_va_write },
2403 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
2404 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2405 .access = PL1_W, .type = ARM_CP_NO_RAW,
2406 .writefn = tlbi_aa64_asid_write },
2407 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
2408 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2409 .access = PL1_W, .type = ARM_CP_NO_RAW,
2410 .writefn = tlbi_aa64_vaa_write },
2411 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
2412 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
2413 .access = PL1_W, .type = ARM_CP_NO_RAW,
2414 .writefn = tlbi_aa64_va_write },
2415 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
2416 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
2417 .access = PL1_W, .type = ARM_CP_NO_RAW,
2418 .writefn = tlbi_aa64_vaa_write },
2419 #ifndef CONFIG_USER_ONLY
2420 /* 64 bit address translation operations */
2421 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
2422 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
2423 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
2424 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
2425 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
2426 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
2427 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
2428 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
2429 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
2430 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
2431 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
2432 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
2433 #endif
2434 /* TLB invalidate last level of translation table walk */
2435 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
2436 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
2437 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
2438 .type = ARM_CP_NO_RAW, .access = PL1_W,
2439 .writefn = tlbimvaa_is_write },
2440 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
2441 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2442 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
2443 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
2444 /* 32 bit cache operations */
2445 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
2446 .type = ARM_CP_NOP, .access = PL1_W },
2447 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
2448 .type = ARM_CP_NOP, .access = PL1_W },
2449 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
2450 .type = ARM_CP_NOP, .access = PL1_W },
2451 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
2452 .type = ARM_CP_NOP, .access = PL1_W },
2453 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
2454 .type = ARM_CP_NOP, .access = PL1_W },
2455 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
2456 .type = ARM_CP_NOP, .access = PL1_W },
2457 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
2458 .type = ARM_CP_NOP, .access = PL1_W },
2459 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
2460 .type = ARM_CP_NOP, .access = PL1_W },
2461 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
2462 .type = ARM_CP_NOP, .access = PL1_W },
2463 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
2464 .type = ARM_CP_NOP, .access = PL1_W },
2465 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
2466 .type = ARM_CP_NOP, .access = PL1_W },
2467 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
2468 .type = ARM_CP_NOP, .access = PL1_W },
2469 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
2470 .type = ARM_CP_NOP, .access = PL1_W },
2471 /* MMU Domain access control / MPU write buffer control */
2472 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
2473 .access = PL1_RW, .resetvalue = 0,
2474 .writefn = dacr_write, .raw_writefn = raw_write,
2475 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
2476 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
2477 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
2478 .type = ARM_CP_ALIAS,
2479 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
2480 .access = PL1_RW,
2481 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
2482 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
2483 .type = ARM_CP_ALIAS,
2484 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
2485 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[1]) },
2486 /* We rely on the access checks not allowing the guest to write to the
2487 * state field when SPSel indicates that it's being used as the stack
2488 * pointer.
2489 */
2490 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
2491 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
2492 .access = PL1_RW, .accessfn = sp_el0_access,
2493 .type = ARM_CP_ALIAS,
2494 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
2495 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
2496 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
2497 .access = PL2_RW, .type = ARM_CP_ALIAS,
2498 .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
2499 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
2500 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
2501 .type = ARM_CP_NO_RAW,
2502 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
2503 REGINFO_SENTINEL
2504 };
2505
2506 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
2507 static const ARMCPRegInfo v8_el3_no_el2_cp_reginfo[] = {
2508 { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
2509 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
2510 .access = PL2_RW,
2511 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
2512 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
2513 .type = ARM_CP_NO_RAW,
2514 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
2515 .access = PL2_RW,
2516 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
2517 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
2518 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
2519 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2520 REGINFO_SENTINEL
2521 };
2522
2523 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2524 {
2525 ARMCPU *cpu = arm_env_get_cpu(env);
2526 uint64_t valid_mask = HCR_MASK;
2527
2528 if (arm_feature(env, ARM_FEATURE_EL3)) {
2529 valid_mask &= ~HCR_HCD;
2530 } else {
2531 valid_mask &= ~HCR_TSC;
2532 }
2533
2534 /* Clear RES0 bits. */
2535 value &= valid_mask;
2536
2537 /* These bits change the MMU setup:
2538 * HCR_VM enables stage 2 translation
2539 * HCR_PTW forbids certain page-table setups
2540 * HCR_DC Disables stage1 and enables stage2 translation
2541 */
2542 if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
2543 tlb_flush(CPU(cpu), 1);
2544 }
2545 raw_write(env, ri, value);
2546 }
2547
2548 static const ARMCPRegInfo v8_el2_cp_reginfo[] = {
2549 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
2550 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
2551 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
2552 .writefn = hcr_write },
2553 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
2554 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
2555 .access = PL2_RW, .resetvalue = 0,
2556 .writefn = dacr_write, .raw_writefn = raw_write,
2557 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
2558 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
2559 .type = ARM_CP_ALIAS,
2560 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
2561 .access = PL2_RW,
2562 .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
2563 { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64,
2564 .type = ARM_CP_ALIAS,
2565 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
2566 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
2567 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
2568 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
2569 .access = PL2_RW, .resetvalue = 0,
2570 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
2571 { .name = "FAR_EL2", .state = ARM_CP_STATE_AA64,
2572 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
2573 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
2574 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
2575 .type = ARM_CP_ALIAS,
2576 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
2577 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[6]) },
2578 { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
2579 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
2580 .access = PL2_RW, .writefn = vbar_write,
2581 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
2582 .resetvalue = 0 },
2583 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
2584 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
2585 .access = PL3_RW, .type = ARM_CP_ALIAS,
2586 .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
2587 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
2588 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
2589 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
2590 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
2591 REGINFO_SENTINEL
2592 };
2593
2594 static const ARMCPRegInfo el3_cp_reginfo[] = {
2595 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
2596 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
2597 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
2598 .resetvalue = 0, .writefn = scr_write },
2599 { .name = "SCR", .type = ARM_CP_ALIAS,
2600 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
2601 .access = PL3_RW, .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
2602 .resetfn = arm_cp_reset_ignore, .writefn = scr_write },
2603 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
2604 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
2605 .access = PL3_RW, .resetvalue = 0,
2606 .fieldoffset = offsetof(CPUARMState, cp15.sder) },
2607 { .name = "SDER",
2608 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
2609 .access = PL3_RW, .resetvalue = 0,
2610 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
2611 /* TODO: Implement NSACR trapping of secure EL1 accesses to EL3 */
2612 { .name = "NSACR", .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
2613 .access = PL3_W | PL1_R, .resetvalue = 0,
2614 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) },
2615 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
2616 .access = PL3_RW, .writefn = vbar_write, .resetvalue = 0,
2617 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
2618 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
2619 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
2620 .access = PL3_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
2621 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]) },
2622 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
2623 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
2624 .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2625 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
2626 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
2627 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
2628 .access = PL3_RW, .writefn = vmsa_tcr_el1_write,
2629 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2630 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
2631 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
2632 .type = ARM_CP_ALIAS,
2633 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
2634 .access = PL3_RW,
2635 .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
2636 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
2637 .type = ARM_CP_ALIAS,
2638 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
2639 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
2640 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
2641 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
2642 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
2643 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
2644 .type = ARM_CP_ALIAS,
2645 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
2646 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[7]) },
2647 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
2648 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
2649 .access = PL3_RW, .writefn = vbar_write,
2650 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
2651 .resetvalue = 0 },
2652 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
2653 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
2654 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
2655 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
2656 REGINFO_SENTINEL
2657 };
2658
2659 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
2660 {
2661 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
2662 * but the AArch32 CTR has its own reginfo struct)
2663 */
2664 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
2665 return CP_ACCESS_TRAP;
2666 }
2667 return CP_ACCESS_OK;
2668 }
2669
2670 static const ARMCPRegInfo debug_cp_reginfo[] = {
2671 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
2672 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
2673 * unlike DBGDRAR it is never accessible from EL0.
2674 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
2675 * accessor.
2676 */
2677 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
2678 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2679 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
2680 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
2681 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2682 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2683 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2684 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
2685 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
2686 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
2687 .access = PL1_RW,
2688 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
2689 .resetvalue = 0 },
2690 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
2691 * We don't implement the configurable EL0 access.
2692 */
2693 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
2694 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
2695 .type = ARM_CP_ALIAS,
2696 .access = PL1_R,
2697 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
2698 .resetfn = arm_cp_reset_ignore },
2699 /* We define a dummy WI OSLAR_EL1, because Linux writes to it. */
2700 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
2701 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
2702 .access = PL1_W, .type = ARM_CP_NOP },
2703 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
2704 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
2705 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
2706 .access = PL1_RW, .type = ARM_CP_NOP },
2707 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
2708 * implement vector catch debug events yet.
2709 */
2710 { .name = "DBGVCR",
2711 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
2712 .access = PL1_RW, .type = ARM_CP_NOP },
2713 REGINFO_SENTINEL
2714 };
2715
2716 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
2717 /* 64 bit access versions of the (dummy) debug registers */
2718 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
2719 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
2720 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
2721 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
2722 REGINFO_SENTINEL
2723 };
2724
2725 void hw_watchpoint_update(ARMCPU *cpu, int n)
2726 {
2727 CPUARMState *env = &cpu->env;
2728 vaddr len = 0;
2729 vaddr wvr = env->cp15.dbgwvr[n];
2730 uint64_t wcr = env->cp15.dbgwcr[n];
2731 int mask;
2732 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
2733
2734 if (env->cpu_watchpoint[n]) {
2735 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
2736 env->cpu_watchpoint[n] = NULL;
2737 }
2738
2739 if (!extract64(wcr, 0, 1)) {
2740 /* E bit clear : watchpoint disabled */
2741 return;
2742 }
2743
2744 switch (extract64(wcr, 3, 2)) {
2745 case 0:
2746 /* LSC 00 is reserved and must behave as if the wp is disabled */
2747 return;
2748 case 1:
2749 flags |= BP_MEM_READ;
2750 break;
2751 case 2:
2752 flags |= BP_MEM_WRITE;
2753 break;
2754 case 3:
2755 flags |= BP_MEM_ACCESS;
2756 break;
2757 }
2758
2759 /* Attempts to use both MASK and BAS fields simultaneously are
2760 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
2761 * thus generating a watchpoint for every byte in the masked region.
2762 */
2763 mask = extract64(wcr, 24, 4);
2764 if (mask == 1 || mask == 2) {
2765 /* Reserved values of MASK; we must act as if the mask value was
2766 * some non-reserved value, or as if the watchpoint were disabled.
2767 * We choose the latter.
2768 */
2769 return;
2770 } else if (mask) {
2771 /* Watchpoint covers an aligned area up to 2GB in size */
2772 len = 1ULL << mask;
2773 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
2774 * whether the watchpoint fires when the unmasked bits match; we opt
2775 * to generate the exceptions.
2776 */
2777 wvr &= ~(len - 1);
2778 } else {
2779 /* Watchpoint covers bytes defined by the byte address select bits */
2780 int bas = extract64(wcr, 5, 8);
2781 int basstart;
2782
2783 if (bas == 0) {
2784 /* This must act as if the watchpoint is disabled */
2785 return;
2786 }
2787
2788 if (extract64(wvr, 2, 1)) {
2789 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
2790 * ignored, and BAS[3:0] define which bytes to watch.
2791 */
2792 bas &= 0xf;
2793 }
2794 /* The BAS bits are supposed to be programmed to indicate a contiguous
2795 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
2796 * we fire for each byte in the word/doubleword addressed by the WVR.
2797 * We choose to ignore any non-zero bits after the first range of 1s.
2798 */
2799 basstart = ctz32(bas);
2800 len = cto32(bas >> basstart);
2801 wvr += basstart;
2802 }
2803
2804 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
2805 &env->cpu_watchpoint[n]);
2806 }
2807
2808 void hw_watchpoint_update_all(ARMCPU *cpu)
2809 {
2810 int i;
2811 CPUARMState *env = &cpu->env;
2812
2813 /* Completely clear out existing QEMU watchpoints and our array, to
2814 * avoid possible stale entries following migration load.
2815 */
2816 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
2817 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
2818
2819 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
2820 hw_watchpoint_update(cpu, i);
2821 }
2822 }
2823
2824 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2825 uint64_t value)
2826 {
2827 ARMCPU *cpu = arm_env_get_cpu(env);
2828 int i = ri->crm;
2829
2830 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
2831 * register reads and behaves as if values written are sign extended.
2832 * Bits [1:0] are RES0.
2833 */
2834 value = sextract64(value, 0, 49) & ~3ULL;
2835
2836 raw_write(env, ri, value);
2837 hw_watchpoint_update(cpu, i);
2838 }
2839
2840 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2841 uint64_t value)
2842 {
2843 ARMCPU *cpu = arm_env_get_cpu(env);
2844 int i = ri->crm;
2845
2846 raw_write(env, ri, value);
2847 hw_watchpoint_update(cpu, i);
2848 }
2849
2850 void hw_breakpoint_update(ARMCPU *cpu, int n)
2851 {
2852 CPUARMState *env = &cpu->env;
2853 uint64_t bvr = env->cp15.dbgbvr[n];
2854 uint64_t bcr = env->cp15.dbgbcr[n];
2855 vaddr addr;
2856 int bt;
2857 int flags = BP_CPU;
2858
2859 if (env->cpu_breakpoint[n]) {
2860 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
2861 env->cpu_breakpoint[n] = NULL;
2862 }
2863
2864 if (!extract64(bcr, 0, 1)) {
2865 /* E bit clear : watchpoint disabled */
2866 return;
2867 }
2868
2869 bt = extract64(bcr, 20, 4);
2870
2871 switch (bt) {
2872 case 4: /* unlinked address mismatch (reserved if AArch64) */
2873 case 5: /* linked address mismatch (reserved if AArch64) */
2874 qemu_log_mask(LOG_UNIMP,
2875 "arm: address mismatch breakpoint types not implemented");
2876 return;
2877 case 0: /* unlinked address match */
2878 case 1: /* linked address match */
2879 {
2880 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
2881 * we behave as if the register was sign extended. Bits [1:0] are
2882 * RES0. The BAS field is used to allow setting breakpoints on 16
2883 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
2884 * a bp will fire if the addresses covered by the bp and the addresses
2885 * covered by the insn overlap but the insn doesn't start at the
2886 * start of the bp address range. We choose to require the insn and
2887 * the bp to have the same address. The constraints on writing to
2888 * BAS enforced in dbgbcr_write mean we have only four cases:
2889 * 0b0000 => no breakpoint
2890 * 0b0011 => breakpoint on addr
2891 * 0b1100 => breakpoint on addr + 2
2892 * 0b1111 => breakpoint on addr
2893 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
2894 */
2895 int bas = extract64(bcr, 5, 4);
2896 addr = sextract64(bvr, 0, 49) & ~3ULL;
2897 if (bas == 0) {
2898 return;
2899 }
2900 if (bas == 0xc) {
2901 addr += 2;
2902 }
2903 break;
2904 }
2905 case 2: /* unlinked context ID match */
2906 case 8: /* unlinked VMID match (reserved if no EL2) */
2907 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
2908 qemu_log_mask(LOG_UNIMP,
2909 "arm: unlinked context breakpoint types not implemented");
2910 return;
2911 case 9: /* linked VMID match (reserved if no EL2) */
2912 case 11: /* linked context ID and VMID match (reserved if no EL2) */
2913 case 3: /* linked context ID match */
2914 default:
2915 /* We must generate no events for Linked context matches (unless
2916 * they are linked to by some other bp/wp, which is handled in
2917 * updates for the linking bp/wp). We choose to also generate no events
2918 * for reserved values.
2919 */
2920 return;
2921 }
2922
2923 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
2924 }
2925
2926 void hw_breakpoint_update_all(ARMCPU *cpu)
2927 {
2928 int i;
2929 CPUARMState *env = &cpu->env;
2930
2931 /* Completely clear out existing QEMU breakpoints and our array, to
2932 * avoid possible stale entries following migration load.
2933 */
2934 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
2935 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
2936
2937 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
2938 hw_breakpoint_update(cpu, i);
2939 }
2940 }
2941
2942 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2943 uint64_t value)
2944 {
2945 ARMCPU *cpu = arm_env_get_cpu(env);
2946 int i = ri->crm;
2947
2948 raw_write(env, ri, value);
2949 hw_breakpoint_update(cpu, i);
2950 }
2951
2952 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2953 uint64_t value)
2954 {
2955 ARMCPU *cpu = arm_env_get_cpu(env);
2956 int i = ri->crm;
2957
2958 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
2959 * copy of BAS[0].
2960 */
2961 value = deposit64(value, 6, 1, extract64(value, 5, 1));
2962 value = deposit64(value, 8, 1, extract64(value, 7, 1));
2963
2964 raw_write(env, ri, value);
2965 hw_breakpoint_update(cpu, i);
2966 }
2967
2968 static void define_debug_regs(ARMCPU *cpu)
2969 {
2970 /* Define v7 and v8 architectural debug registers.
2971 * These are just dummy implementations for now.
2972 */
2973 int i;
2974 int wrps, brps, ctx_cmps;
2975 ARMCPRegInfo dbgdidr = {
2976 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
2977 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
2978 };
2979
2980 /* Note that all these register fields hold "number of Xs minus 1". */
2981 brps = extract32(cpu->dbgdidr, 24, 4);
2982 wrps = extract32(cpu->dbgdidr, 28, 4);
2983 ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
2984
2985 assert(ctx_cmps <= brps);
2986
2987 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
2988 * of the debug registers such as number of breakpoints;
2989 * check that if they both exist then they agree.
2990 */
2991 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
2992 assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
2993 assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
2994 assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
2995 }
2996
2997 define_one_arm_cp_reg(cpu, &dbgdidr);
2998 define_arm_cp_regs(cpu, debug_cp_reginfo);
2999
3000 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
3001 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
3002 }
3003
3004 for (i = 0; i < brps + 1; i++) {
3005 ARMCPRegInfo dbgregs[] = {
3006 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
3007 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
3008 .access = PL1_RW,
3009 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
3010 .writefn = dbgbvr_write, .raw_writefn = raw_write
3011 },
3012 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
3013 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
3014 .access = PL1_RW,
3015 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
3016 .writefn = dbgbcr_write, .raw_writefn = raw_write
3017 },
3018 REGINFO_SENTINEL
3019 };
3020 define_arm_cp_regs(cpu, dbgregs);
3021 }
3022
3023 for (i = 0; i < wrps + 1; i++) {
3024 ARMCPRegInfo dbgregs[] = {
3025 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
3026 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
3027 .access = PL1_RW,
3028 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
3029 .writefn = dbgwvr_write, .raw_writefn = raw_write
3030 },
3031 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
3032 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
3033 .access = PL1_RW,
3034 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
3035 .writefn = dbgwcr_write, .raw_writefn = raw_write
3036 },