virtio-scsi: suppress virtqueue kick during processing
[qemu.git] / target / arm / translate-a64.c
1 /*
2 * AArch64 translation
3 *
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "tcg-op.h"
24 #include "qemu/log.h"
25 #include "arm_ldst.h"
26 #include "translate.h"
27 #include "internals.h"
28 #include "qemu/host-utils.h"
29
30 #include "exec/semihost.h"
31 #include "exec/gen-icount.h"
32
33 #include "exec/helper-proto.h"
34 #include "exec/helper-gen.h"
35 #include "exec/log.h"
36
37 #include "trace-tcg.h"
38
39 static TCGv_i64 cpu_X[32];
40 static TCGv_i64 cpu_pc;
41
42 /* Load/store exclusive handling */
43 static TCGv_i64 cpu_exclusive_high;
44 static TCGv_i64 cpu_reg(DisasContext *s, int reg);
45
46 static const char *regnames[] = {
47 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
48 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
49 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
50 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
51 };
52
53 enum a64_shift_type {
54 A64_SHIFT_TYPE_LSL = 0,
55 A64_SHIFT_TYPE_LSR = 1,
56 A64_SHIFT_TYPE_ASR = 2,
57 A64_SHIFT_TYPE_ROR = 3
58 };
59
60 /* Table based decoder typedefs - used when the relevant bits for decode
61 * are too awkwardly scattered across the instruction (eg SIMD).
62 */
63 typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
64
65 typedef struct AArch64DecodeTable {
66 uint32_t pattern;
67 uint32_t mask;
68 AArch64DecodeFn *disas_fn;
69 } AArch64DecodeTable;
70
71 /* Function prototype for gen_ functions for calling Neon helpers */
72 typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
73 typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
74 typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
75 typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
76 typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
77 typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
78 typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
79 typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
80 typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
81 typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
82 typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
83 typedef void CryptoTwoOpEnvFn(TCGv_ptr, TCGv_i32, TCGv_i32);
84 typedef void CryptoThreeOpEnvFn(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
85
86 /* initialize TCG globals. */
87 void a64_translate_init(void)
88 {
89 int i;
90
91 cpu_pc = tcg_global_mem_new_i64(cpu_env,
92 offsetof(CPUARMState, pc),
93 "pc");
94 for (i = 0; i < 32; i++) {
95 cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
96 offsetof(CPUARMState, xregs[i]),
97 regnames[i]);
98 }
99
100 cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
101 offsetof(CPUARMState, exclusive_high), "exclusive_high");
102 }
103
104 static inline ARMMMUIdx get_a64_user_mem_index(DisasContext *s)
105 {
106 /* Return the mmu_idx to use for A64 "unprivileged load/store" insns:
107 * if EL1, access as if EL0; otherwise access at current EL
108 */
109 switch (s->mmu_idx) {
110 case ARMMMUIdx_S12NSE1:
111 return ARMMMUIdx_S12NSE0;
112 case ARMMMUIdx_S1SE1:
113 return ARMMMUIdx_S1SE0;
114 case ARMMMUIdx_S2NS:
115 g_assert_not_reached();
116 default:
117 return s->mmu_idx;
118 }
119 }
120
121 void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
122 fprintf_function cpu_fprintf, int flags)
123 {
124 ARMCPU *cpu = ARM_CPU(cs);
125 CPUARMState *env = &cpu->env;
126 uint32_t psr = pstate_read(env);
127 int i;
128 int el = arm_current_el(env);
129 const char *ns_status;
130
131 cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
132 env->pc, env->xregs[31]);
133 for (i = 0; i < 31; i++) {
134 cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
135 if ((i % 4) == 3) {
136 cpu_fprintf(f, "\n");
137 } else {
138 cpu_fprintf(f, " ");
139 }
140 }
141
142 if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
143 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
144 } else {
145 ns_status = "";
146 }
147
148 cpu_fprintf(f, "\nPSTATE=%08x %c%c%c%c %sEL%d%c\n",
149 psr,
150 psr & PSTATE_N ? 'N' : '-',
151 psr & PSTATE_Z ? 'Z' : '-',
152 psr & PSTATE_C ? 'C' : '-',
153 psr & PSTATE_V ? 'V' : '-',
154 ns_status,
155 el,
156 psr & PSTATE_SP ? 'h' : 't');
157
158 if (flags & CPU_DUMP_FPU) {
159 int numvfpregs = 32;
160 for (i = 0; i < numvfpregs; i += 2) {
161 uint64_t vlo = float64_val(env->vfp.regs[i * 2]);
162 uint64_t vhi = float64_val(env->vfp.regs[(i * 2) + 1]);
163 cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 " ",
164 i, vhi, vlo);
165 vlo = float64_val(env->vfp.regs[(i + 1) * 2]);
166 vhi = float64_val(env->vfp.regs[((i + 1) * 2) + 1]);
167 cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 "\n",
168 i + 1, vhi, vlo);
169 }
170 cpu_fprintf(f, "FPCR: %08x FPSR: %08x\n",
171 vfp_get_fpcr(env), vfp_get_fpsr(env));
172 }
173 }
174
175 void gen_a64_set_pc_im(uint64_t val)
176 {
177 tcg_gen_movi_i64(cpu_pc, val);
178 }
179
180 /* Load the PC from a generic TCG variable.
181 *
182 * If address tagging is enabled via the TCR TBI bits, then loading
183 * an address into the PC will clear out any tag in the it:
184 * + for EL2 and EL3 there is only one TBI bit, and if it is set
185 * then the address is zero-extended, clearing bits [63:56]
186 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
187 * and TBI1 controls addressses with bit 55 == 1.
188 * If the appropriate TBI bit is set for the address then
189 * the address is sign-extended from bit 55 into bits [63:56]
190 *
191 * We can avoid doing this for relative-branches, because the
192 * PC + offset can never overflow into the tag bits (assuming
193 * that virtual addresses are less than 56 bits wide, as they
194 * are currently), but we must handle it for branch-to-register.
195 */
196 static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
197 {
198
199 if (s->current_el <= 1) {
200 /* Test if NEITHER or BOTH TBI values are set. If so, no need to
201 * examine bit 55 of address, can just generate code.
202 * If mixed, then test via generated code
203 */
204 if (s->tbi0 && s->tbi1) {
205 TCGv_i64 tmp_reg = tcg_temp_new_i64();
206 /* Both bits set, sign extension from bit 55 into [63:56] will
207 * cover both cases
208 */
209 tcg_gen_shli_i64(tmp_reg, src, 8);
210 tcg_gen_sari_i64(cpu_pc, tmp_reg, 8);
211 tcg_temp_free_i64(tmp_reg);
212 } else if (!s->tbi0 && !s->tbi1) {
213 /* Neither bit set, just load it as-is */
214 tcg_gen_mov_i64(cpu_pc, src);
215 } else {
216 TCGv_i64 tcg_tmpval = tcg_temp_new_i64();
217 TCGv_i64 tcg_bit55 = tcg_temp_new_i64();
218 TCGv_i64 tcg_zero = tcg_const_i64(0);
219
220 tcg_gen_andi_i64(tcg_bit55, src, (1ull << 55));
221
222 if (s->tbi0) {
223 /* tbi0==1, tbi1==0, so 0-fill upper byte if bit 55 = 0 */
224 tcg_gen_andi_i64(tcg_tmpval, src,
225 0x00FFFFFFFFFFFFFFull);
226 tcg_gen_movcond_i64(TCG_COND_EQ, cpu_pc, tcg_bit55, tcg_zero,
227 tcg_tmpval, src);
228 } else {
229 /* tbi0==0, tbi1==1, so 1-fill upper byte if bit 55 = 1 */
230 tcg_gen_ori_i64(tcg_tmpval, src,
231 0xFF00000000000000ull);
232 tcg_gen_movcond_i64(TCG_COND_NE, cpu_pc, tcg_bit55, tcg_zero,
233 tcg_tmpval, src);
234 }
235 tcg_temp_free_i64(tcg_zero);
236 tcg_temp_free_i64(tcg_bit55);
237 tcg_temp_free_i64(tcg_tmpval);
238 }
239 } else { /* EL > 1 */
240 if (s->tbi0) {
241 /* Force tag byte to all zero */
242 tcg_gen_andi_i64(cpu_pc, src, 0x00FFFFFFFFFFFFFFull);
243 } else {
244 /* Load unmodified address */
245 tcg_gen_mov_i64(cpu_pc, src);
246 }
247 }
248 }
249
250 typedef struct DisasCompare64 {
251 TCGCond cond;
252 TCGv_i64 value;
253 } DisasCompare64;
254
255 static void a64_test_cc(DisasCompare64 *c64, int cc)
256 {
257 DisasCompare c32;
258
259 arm_test_cc(&c32, cc);
260
261 /* Sign-extend the 32-bit value so that the GE/LT comparisons work
262 * properly. The NE/EQ comparisons are also fine with this choice. */
263 c64->cond = c32.cond;
264 c64->value = tcg_temp_new_i64();
265 tcg_gen_ext_i32_i64(c64->value, c32.value);
266
267 arm_free_cc(&c32);
268 }
269
270 static void a64_free_cc(DisasCompare64 *c64)
271 {
272 tcg_temp_free_i64(c64->value);
273 }
274
275 static void gen_exception_internal(int excp)
276 {
277 TCGv_i32 tcg_excp = tcg_const_i32(excp);
278
279 assert(excp_is_internal(excp));
280 gen_helper_exception_internal(cpu_env, tcg_excp);
281 tcg_temp_free_i32(tcg_excp);
282 }
283
284 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
285 {
286 TCGv_i32 tcg_excp = tcg_const_i32(excp);
287 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
288 TCGv_i32 tcg_el = tcg_const_i32(target_el);
289
290 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
291 tcg_syn, tcg_el);
292 tcg_temp_free_i32(tcg_el);
293 tcg_temp_free_i32(tcg_syn);
294 tcg_temp_free_i32(tcg_excp);
295 }
296
297 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
298 {
299 gen_a64_set_pc_im(s->pc - offset);
300 gen_exception_internal(excp);
301 s->is_jmp = DISAS_EXC;
302 }
303
304 static void gen_exception_insn(DisasContext *s, int offset, int excp,
305 uint32_t syndrome, uint32_t target_el)
306 {
307 gen_a64_set_pc_im(s->pc - offset);
308 gen_exception(excp, syndrome, target_el);
309 s->is_jmp = DISAS_EXC;
310 }
311
312 static void gen_ss_advance(DisasContext *s)
313 {
314 /* If the singlestep state is Active-not-pending, advance to
315 * Active-pending.
316 */
317 if (s->ss_active) {
318 s->pstate_ss = 0;
319 gen_helper_clear_pstate_ss(cpu_env);
320 }
321 }
322
323 static void gen_step_complete_exception(DisasContext *s)
324 {
325 /* We just completed step of an insn. Move from Active-not-pending
326 * to Active-pending, and then also take the swstep exception.
327 * This corresponds to making the (IMPDEF) choice to prioritize
328 * swstep exceptions over asynchronous exceptions taken to an exception
329 * level where debug is disabled. This choice has the advantage that
330 * we do not need to maintain internal state corresponding to the
331 * ISV/EX syndrome bits between completion of the step and generation
332 * of the exception, and our syndrome information is always correct.
333 */
334 gen_ss_advance(s);
335 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
336 default_exception_el(s));
337 s->is_jmp = DISAS_EXC;
338 }
339
340 static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
341 {
342 /* No direct tb linking with singlestep (either QEMU's or the ARM
343 * debug architecture kind) or deterministic io
344 */
345 if (s->singlestep_enabled || s->ss_active || (s->tb->cflags & CF_LAST_IO)) {
346 return false;
347 }
348
349 #ifndef CONFIG_USER_ONLY
350 /* Only link tbs from inside the same guest page */
351 if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
352 return false;
353 }
354 #endif
355
356 return true;
357 }
358
359 static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
360 {
361 TranslationBlock *tb;
362
363 tb = s->tb;
364 if (use_goto_tb(s, n, dest)) {
365 tcg_gen_goto_tb(n);
366 gen_a64_set_pc_im(dest);
367 tcg_gen_exit_tb((intptr_t)tb + n);
368 s->is_jmp = DISAS_TB_JUMP;
369 } else {
370 gen_a64_set_pc_im(dest);
371 if (s->ss_active) {
372 gen_step_complete_exception(s);
373 } else if (s->singlestep_enabled) {
374 gen_exception_internal(EXCP_DEBUG);
375 } else {
376 tcg_gen_exit_tb(0);
377 s->is_jmp = DISAS_TB_JUMP;
378 }
379 }
380 }
381
382 static void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
383 {
384 /* We don't need to save all of the syndrome so we mask and shift
385 * out uneeded bits to help the sleb128 encoder do a better job.
386 */
387 syn &= ARM_INSN_START_WORD2_MASK;
388 syn >>= ARM_INSN_START_WORD2_SHIFT;
389
390 /* We check and clear insn_start_idx to catch multiple updates. */
391 assert(s->insn_start_idx != 0);
392 tcg_set_insn_param(s->insn_start_idx, 2, syn);
393 s->insn_start_idx = 0;
394 }
395
396 static void unallocated_encoding(DisasContext *s)
397 {
398 /* Unallocated and reserved encodings are uncategorized */
399 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
400 default_exception_el(s));
401 }
402
403 #define unsupported_encoding(s, insn) \
404 do { \
405 qemu_log_mask(LOG_UNIMP, \
406 "%s:%d: unsupported instruction encoding 0x%08x " \
407 "at pc=%016" PRIx64 "\n", \
408 __FILE__, __LINE__, insn, s->pc - 4); \
409 unallocated_encoding(s); \
410 } while (0);
411
412 static void init_tmp_a64_array(DisasContext *s)
413 {
414 #ifdef CONFIG_DEBUG_TCG
415 int i;
416 for (i = 0; i < ARRAY_SIZE(s->tmp_a64); i++) {
417 TCGV_UNUSED_I64(s->tmp_a64[i]);
418 }
419 #endif
420 s->tmp_a64_count = 0;
421 }
422
423 static void free_tmp_a64(DisasContext *s)
424 {
425 int i;
426 for (i = 0; i < s->tmp_a64_count; i++) {
427 tcg_temp_free_i64(s->tmp_a64[i]);
428 }
429 init_tmp_a64_array(s);
430 }
431
432 static TCGv_i64 new_tmp_a64(DisasContext *s)
433 {
434 assert(s->tmp_a64_count < TMP_A64_MAX);
435 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
436 }
437
438 static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
439 {
440 TCGv_i64 t = new_tmp_a64(s);
441 tcg_gen_movi_i64(t, 0);
442 return t;
443 }
444
445 /*
446 * Register access functions
447 *
448 * These functions are used for directly accessing a register in where
449 * changes to the final register value are likely to be made. If you
450 * need to use a register for temporary calculation (e.g. index type
451 * operations) use the read_* form.
452 *
453 * B1.2.1 Register mappings
454 *
455 * In instruction register encoding 31 can refer to ZR (zero register) or
456 * the SP (stack pointer) depending on context. In QEMU's case we map SP
457 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
458 * This is the point of the _sp forms.
459 */
460 static TCGv_i64 cpu_reg(DisasContext *s, int reg)
461 {
462 if (reg == 31) {
463 return new_tmp_a64_zero(s);
464 } else {
465 return cpu_X[reg];
466 }
467 }
468
469 /* register access for when 31 == SP */
470 static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
471 {
472 return cpu_X[reg];
473 }
474
475 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
476 * representing the register contents. This TCGv is an auto-freed
477 * temporary so it need not be explicitly freed, and may be modified.
478 */
479 static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
480 {
481 TCGv_i64 v = new_tmp_a64(s);
482 if (reg != 31) {
483 if (sf) {
484 tcg_gen_mov_i64(v, cpu_X[reg]);
485 } else {
486 tcg_gen_ext32u_i64(v, cpu_X[reg]);
487 }
488 } else {
489 tcg_gen_movi_i64(v, 0);
490 }
491 return v;
492 }
493
494 static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
495 {
496 TCGv_i64 v = new_tmp_a64(s);
497 if (sf) {
498 tcg_gen_mov_i64(v, cpu_X[reg]);
499 } else {
500 tcg_gen_ext32u_i64(v, cpu_X[reg]);
501 }
502 return v;
503 }
504
505 /* We should have at some point before trying to access an FP register
506 * done the necessary access check, so assert that
507 * (a) we did the check and
508 * (b) we didn't then just plough ahead anyway if it failed.
509 * Print the instruction pattern in the abort message so we can figure
510 * out what we need to fix if a user encounters this problem in the wild.
511 */
512 static inline void assert_fp_access_checked(DisasContext *s)
513 {
514 #ifdef CONFIG_DEBUG_TCG
515 if (unlikely(!s->fp_access_checked || s->fp_excp_el)) {
516 fprintf(stderr, "target-arm: FP access check missing for "
517 "instruction 0x%08x\n", s->insn);
518 abort();
519 }
520 #endif
521 }
522
523 /* Return the offset into CPUARMState of an element of specified
524 * size, 'element' places in from the least significant end of
525 * the FP/vector register Qn.
526 */
527 static inline int vec_reg_offset(DisasContext *s, int regno,
528 int element, TCGMemOp size)
529 {
530 int offs = 0;
531 #ifdef HOST_WORDS_BIGENDIAN
532 /* This is complicated slightly because vfp.regs[2n] is
533 * still the low half and vfp.regs[2n+1] the high half
534 * of the 128 bit vector, even on big endian systems.
535 * Calculate the offset assuming a fully bigendian 128 bits,
536 * then XOR to account for the order of the two 64 bit halves.
537 */
538 offs += (16 - ((element + 1) * (1 << size)));
539 offs ^= 8;
540 #else
541 offs += element * (1 << size);
542 #endif
543 offs += offsetof(CPUARMState, vfp.regs[regno * 2]);
544 assert_fp_access_checked(s);
545 return offs;
546 }
547
548 /* Return the offset into CPUARMState of a slice (from
549 * the least significant end) of FP register Qn (ie
550 * Dn, Sn, Hn or Bn).
551 * (Note that this is not the same mapping as for A32; see cpu.h)
552 */
553 static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size)
554 {
555 int offs = offsetof(CPUARMState, vfp.regs[regno * 2]);
556 #ifdef HOST_WORDS_BIGENDIAN
557 offs += (8 - (1 << size));
558 #endif
559 assert_fp_access_checked(s);
560 return offs;
561 }
562
563 /* Offset of the high half of the 128 bit vector Qn */
564 static inline int fp_reg_hi_offset(DisasContext *s, int regno)
565 {
566 assert_fp_access_checked(s);
567 return offsetof(CPUARMState, vfp.regs[regno * 2 + 1]);
568 }
569
570 /* Convenience accessors for reading and writing single and double
571 * FP registers. Writing clears the upper parts of the associated
572 * 128 bit vector register, as required by the architecture.
573 * Note that unlike the GP register accessors, the values returned
574 * by the read functions must be manually freed.
575 */
576 static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
577 {
578 TCGv_i64 v = tcg_temp_new_i64();
579
580 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
581 return v;
582 }
583
584 static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
585 {
586 TCGv_i32 v = tcg_temp_new_i32();
587
588 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
589 return v;
590 }
591
592 static void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
593 {
594 TCGv_i64 tcg_zero = tcg_const_i64(0);
595
596 tcg_gen_st_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
597 tcg_gen_st_i64(tcg_zero, cpu_env, fp_reg_hi_offset(s, reg));
598 tcg_temp_free_i64(tcg_zero);
599 }
600
601 static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
602 {
603 TCGv_i64 tmp = tcg_temp_new_i64();
604
605 tcg_gen_extu_i32_i64(tmp, v);
606 write_fp_dreg(s, reg, tmp);
607 tcg_temp_free_i64(tmp);
608 }
609
610 static TCGv_ptr get_fpstatus_ptr(void)
611 {
612 TCGv_ptr statusptr = tcg_temp_new_ptr();
613 int offset;
614
615 /* In A64 all instructions (both FP and Neon) use the FPCR;
616 * there is no equivalent of the A32 Neon "standard FPSCR value"
617 * and all operations use vfp.fp_status.
618 */
619 offset = offsetof(CPUARMState, vfp.fp_status);
620 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
621 return statusptr;
622 }
623
624 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
625 * than the 32 bit equivalent.
626 */
627 static inline void gen_set_NZ64(TCGv_i64 result)
628 {
629 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
630 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
631 }
632
633 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
634 static inline void gen_logic_CC(int sf, TCGv_i64 result)
635 {
636 if (sf) {
637 gen_set_NZ64(result);
638 } else {
639 tcg_gen_extrl_i64_i32(cpu_ZF, result);
640 tcg_gen_mov_i32(cpu_NF, cpu_ZF);
641 }
642 tcg_gen_movi_i32(cpu_CF, 0);
643 tcg_gen_movi_i32(cpu_VF, 0);
644 }
645
646 /* dest = T0 + T1; compute C, N, V and Z flags */
647 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
648 {
649 if (sf) {
650 TCGv_i64 result, flag, tmp;
651 result = tcg_temp_new_i64();
652 flag = tcg_temp_new_i64();
653 tmp = tcg_temp_new_i64();
654
655 tcg_gen_movi_i64(tmp, 0);
656 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
657
658 tcg_gen_extrl_i64_i32(cpu_CF, flag);
659
660 gen_set_NZ64(result);
661
662 tcg_gen_xor_i64(flag, result, t0);
663 tcg_gen_xor_i64(tmp, t0, t1);
664 tcg_gen_andc_i64(flag, flag, tmp);
665 tcg_temp_free_i64(tmp);
666 tcg_gen_extrh_i64_i32(cpu_VF, flag);
667
668 tcg_gen_mov_i64(dest, result);
669 tcg_temp_free_i64(result);
670 tcg_temp_free_i64(flag);
671 } else {
672 /* 32 bit arithmetic */
673 TCGv_i32 t0_32 = tcg_temp_new_i32();
674 TCGv_i32 t1_32 = tcg_temp_new_i32();
675 TCGv_i32 tmp = tcg_temp_new_i32();
676
677 tcg_gen_movi_i32(tmp, 0);
678 tcg_gen_extrl_i64_i32(t0_32, t0);
679 tcg_gen_extrl_i64_i32(t1_32, t1);
680 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
681 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
682 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
683 tcg_gen_xor_i32(tmp, t0_32, t1_32);
684 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
685 tcg_gen_extu_i32_i64(dest, cpu_NF);
686
687 tcg_temp_free_i32(tmp);
688 tcg_temp_free_i32(t0_32);
689 tcg_temp_free_i32(t1_32);
690 }
691 }
692
693 /* dest = T0 - T1; compute C, N, V and Z flags */
694 static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
695 {
696 if (sf) {
697 /* 64 bit arithmetic */
698 TCGv_i64 result, flag, tmp;
699
700 result = tcg_temp_new_i64();
701 flag = tcg_temp_new_i64();
702 tcg_gen_sub_i64(result, t0, t1);
703
704 gen_set_NZ64(result);
705
706 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
707 tcg_gen_extrl_i64_i32(cpu_CF, flag);
708
709 tcg_gen_xor_i64(flag, result, t0);
710 tmp = tcg_temp_new_i64();
711 tcg_gen_xor_i64(tmp, t0, t1);
712 tcg_gen_and_i64(flag, flag, tmp);
713 tcg_temp_free_i64(tmp);
714 tcg_gen_extrh_i64_i32(cpu_VF, flag);
715 tcg_gen_mov_i64(dest, result);
716 tcg_temp_free_i64(flag);
717 tcg_temp_free_i64(result);
718 } else {
719 /* 32 bit arithmetic */
720 TCGv_i32 t0_32 = tcg_temp_new_i32();
721 TCGv_i32 t1_32 = tcg_temp_new_i32();
722 TCGv_i32 tmp;
723
724 tcg_gen_extrl_i64_i32(t0_32, t0);
725 tcg_gen_extrl_i64_i32(t1_32, t1);
726 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
727 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
728 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
729 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
730 tmp = tcg_temp_new_i32();
731 tcg_gen_xor_i32(tmp, t0_32, t1_32);
732 tcg_temp_free_i32(t0_32);
733 tcg_temp_free_i32(t1_32);
734 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
735 tcg_temp_free_i32(tmp);
736 tcg_gen_extu_i32_i64(dest, cpu_NF);
737 }
738 }
739
740 /* dest = T0 + T1 + CF; do not compute flags. */
741 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
742 {
743 TCGv_i64 flag = tcg_temp_new_i64();
744 tcg_gen_extu_i32_i64(flag, cpu_CF);
745 tcg_gen_add_i64(dest, t0, t1);
746 tcg_gen_add_i64(dest, dest, flag);
747 tcg_temp_free_i64(flag);
748
749 if (!sf) {
750 tcg_gen_ext32u_i64(dest, dest);
751 }
752 }
753
754 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
755 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
756 {
757 if (sf) {
758 TCGv_i64 result, cf_64, vf_64, tmp;
759 result = tcg_temp_new_i64();
760 cf_64 = tcg_temp_new_i64();
761 vf_64 = tcg_temp_new_i64();
762 tmp = tcg_const_i64(0);
763
764 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
765 tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
766 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
767 tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
768 gen_set_NZ64(result);
769
770 tcg_gen_xor_i64(vf_64, result, t0);
771 tcg_gen_xor_i64(tmp, t0, t1);
772 tcg_gen_andc_i64(vf_64, vf_64, tmp);
773 tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
774
775 tcg_gen_mov_i64(dest, result);
776
777 tcg_temp_free_i64(tmp);
778 tcg_temp_free_i64(vf_64);
779 tcg_temp_free_i64(cf_64);
780 tcg_temp_free_i64(result);
781 } else {
782 TCGv_i32 t0_32, t1_32, tmp;
783 t0_32 = tcg_temp_new_i32();
784 t1_32 = tcg_temp_new_i32();
785 tmp = tcg_const_i32(0);
786
787 tcg_gen_extrl_i64_i32(t0_32, t0);
788 tcg_gen_extrl_i64_i32(t1_32, t1);
789 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
790 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
791
792 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
793 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
794 tcg_gen_xor_i32(tmp, t0_32, t1_32);
795 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
796 tcg_gen_extu_i32_i64(dest, cpu_NF);
797
798 tcg_temp_free_i32(tmp);
799 tcg_temp_free_i32(t1_32);
800 tcg_temp_free_i32(t0_32);
801 }
802 }
803
804 /*
805 * Load/Store generators
806 */
807
808 /*
809 * Store from GPR register to memory.
810 */
811 static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
812 TCGv_i64 tcg_addr, int size, int memidx,
813 bool iss_valid,
814 unsigned int iss_srt,
815 bool iss_sf, bool iss_ar)
816 {
817 g_assert(size <= 3);
818 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
819
820 if (iss_valid) {
821 uint32_t syn;
822
823 syn = syn_data_abort_with_iss(0,
824 size,
825 false,
826 iss_srt,
827 iss_sf,
828 iss_ar,
829 0, 0, 0, 0, 0, false);
830 disas_set_insn_syndrome(s, syn);
831 }
832 }
833
834 static void do_gpr_st(DisasContext *s, TCGv_i64 source,
835 TCGv_i64 tcg_addr, int size,
836 bool iss_valid,
837 unsigned int iss_srt,
838 bool iss_sf, bool iss_ar)
839 {
840 do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
841 iss_valid, iss_srt, iss_sf, iss_ar);
842 }
843
844 /*
845 * Load from memory to GPR register
846 */
847 static void do_gpr_ld_memidx(DisasContext *s,
848 TCGv_i64 dest, TCGv_i64 tcg_addr,
849 int size, bool is_signed,
850 bool extend, int memidx,
851 bool iss_valid, unsigned int iss_srt,
852 bool iss_sf, bool iss_ar)
853 {
854 TCGMemOp memop = s->be_data + size;
855
856 g_assert(size <= 3);
857
858 if (is_signed) {
859 memop += MO_SIGN;
860 }
861
862 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
863
864 if (extend && is_signed) {
865 g_assert(size < 3);
866 tcg_gen_ext32u_i64(dest, dest);
867 }
868
869 if (iss_valid) {
870 uint32_t syn;
871
872 syn = syn_data_abort_with_iss(0,
873 size,
874 is_signed,
875 iss_srt,
876 iss_sf,
877 iss_ar,
878 0, 0, 0, 0, 0, false);
879 disas_set_insn_syndrome(s, syn);
880 }
881 }
882
883 static void do_gpr_ld(DisasContext *s,
884 TCGv_i64 dest, TCGv_i64 tcg_addr,
885 int size, bool is_signed, bool extend,
886 bool iss_valid, unsigned int iss_srt,
887 bool iss_sf, bool iss_ar)
888 {
889 do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
890 get_mem_index(s),
891 iss_valid, iss_srt, iss_sf, iss_ar);
892 }
893
894 /*
895 * Store from FP register to memory
896 */
897 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
898 {
899 /* This writes the bottom N bits of a 128 bit wide vector to memory */
900 TCGv_i64 tmp = tcg_temp_new_i64();
901 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
902 if (size < 4) {
903 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
904 s->be_data + size);
905 } else {
906 bool be = s->be_data == MO_BE;
907 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
908
909 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
910 tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
911 s->be_data | MO_Q);
912 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
913 tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
914 s->be_data | MO_Q);
915 tcg_temp_free_i64(tcg_hiaddr);
916 }
917
918 tcg_temp_free_i64(tmp);
919 }
920
921 /*
922 * Load from memory to FP register
923 */
924 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
925 {
926 /* This always zero-extends and writes to a full 128 bit wide vector */
927 TCGv_i64 tmplo = tcg_temp_new_i64();
928 TCGv_i64 tmphi;
929
930 if (size < 4) {
931 TCGMemOp memop = s->be_data + size;
932 tmphi = tcg_const_i64(0);
933 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
934 } else {
935 bool be = s->be_data == MO_BE;
936 TCGv_i64 tcg_hiaddr;
937
938 tmphi = tcg_temp_new_i64();
939 tcg_hiaddr = tcg_temp_new_i64();
940
941 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
942 tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
943 s->be_data | MO_Q);
944 tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
945 s->be_data | MO_Q);
946 tcg_temp_free_i64(tcg_hiaddr);
947 }
948
949 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
950 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
951
952 tcg_temp_free_i64(tmplo);
953 tcg_temp_free_i64(tmphi);
954 }
955
956 /*
957 * Vector load/store helpers.
958 *
959 * The principal difference between this and a FP load is that we don't
960 * zero extend as we are filling a partial chunk of the vector register.
961 * These functions don't support 128 bit loads/stores, which would be
962 * normal load/store operations.
963 *
964 * The _i32 versions are useful when operating on 32 bit quantities
965 * (eg for floating point single or using Neon helper functions).
966 */
967
968 /* Get value of an element within a vector register */
969 static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
970 int element, TCGMemOp memop)
971 {
972 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
973 switch (memop) {
974 case MO_8:
975 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
976 break;
977 case MO_16:
978 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
979 break;
980 case MO_32:
981 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
982 break;
983 case MO_8|MO_SIGN:
984 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
985 break;
986 case MO_16|MO_SIGN:
987 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
988 break;
989 case MO_32|MO_SIGN:
990 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
991 break;
992 case MO_64:
993 case MO_64|MO_SIGN:
994 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
995 break;
996 default:
997 g_assert_not_reached();
998 }
999 }
1000
1001 static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1002 int element, TCGMemOp memop)
1003 {
1004 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1005 switch (memop) {
1006 case MO_8:
1007 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1008 break;
1009 case MO_16:
1010 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1011 break;
1012 case MO_8|MO_SIGN:
1013 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1014 break;
1015 case MO_16|MO_SIGN:
1016 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1017 break;
1018 case MO_32:
1019 case MO_32|MO_SIGN:
1020 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1021 break;
1022 default:
1023 g_assert_not_reached();
1024 }
1025 }
1026
1027 /* Set value of an element within a vector register */
1028 static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1029 int element, TCGMemOp memop)
1030 {
1031 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1032 switch (memop) {
1033 case MO_8:
1034 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1035 break;
1036 case MO_16:
1037 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1038 break;
1039 case MO_32:
1040 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1041 break;
1042 case MO_64:
1043 tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1044 break;
1045 default:
1046 g_assert_not_reached();
1047 }
1048 }
1049
1050 static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1051 int destidx, int element, TCGMemOp memop)
1052 {
1053 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1054 switch (memop) {
1055 case MO_8:
1056 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1057 break;
1058 case MO_16:
1059 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1060 break;
1061 case MO_32:
1062 tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1063 break;
1064 default:
1065 g_assert_not_reached();
1066 }
1067 }
1068
1069 /* Clear the high 64 bits of a 128 bit vector (in general non-quad
1070 * vector ops all need to do this).
1071 */
1072 static void clear_vec_high(DisasContext *s, int rd)
1073 {
1074 TCGv_i64 tcg_zero = tcg_const_i64(0);
1075
1076 write_vec_element(s, tcg_zero, rd, 1, MO_64);
1077 tcg_temp_free_i64(tcg_zero);
1078 }
1079
1080 /* Store from vector register to memory */
1081 static void do_vec_st(DisasContext *s, int srcidx, int element,
1082 TCGv_i64 tcg_addr, int size)
1083 {
1084 TCGMemOp memop = s->be_data + size;
1085 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1086
1087 read_vec_element(s, tcg_tmp, srcidx, element, size);
1088 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1089
1090 tcg_temp_free_i64(tcg_tmp);
1091 }
1092
1093 /* Load from memory to vector register */
1094 static void do_vec_ld(DisasContext *s, int destidx, int element,
1095 TCGv_i64 tcg_addr, int size)
1096 {
1097 TCGMemOp memop = s->be_data + size;
1098 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1099
1100 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1101 write_vec_element(s, tcg_tmp, destidx, element, size);
1102
1103 tcg_temp_free_i64(tcg_tmp);
1104 }
1105
1106 /* Check that FP/Neon access is enabled. If it is, return
1107 * true. If not, emit code to generate an appropriate exception,
1108 * and return false; the caller should not emit any code for
1109 * the instruction. Note that this check must happen after all
1110 * unallocated-encoding checks (otherwise the syndrome information
1111 * for the resulting exception will be incorrect).
1112 */
1113 static inline bool fp_access_check(DisasContext *s)
1114 {
1115 assert(!s->fp_access_checked);
1116 s->fp_access_checked = true;
1117
1118 if (!s->fp_excp_el) {
1119 return true;
1120 }
1121
1122 gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
1123 s->fp_excp_el);
1124 return false;
1125 }
1126
1127 /*
1128 * This utility function is for doing register extension with an
1129 * optional shift. You will likely want to pass a temporary for the
1130 * destination register. See DecodeRegExtend() in the ARM ARM.
1131 */
1132 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1133 int option, unsigned int shift)
1134 {
1135 int extsize = extract32(option, 0, 2);
1136 bool is_signed = extract32(option, 2, 1);
1137
1138 if (is_signed) {
1139 switch (extsize) {
1140 case 0:
1141 tcg_gen_ext8s_i64(tcg_out, tcg_in);
1142 break;
1143 case 1:
1144 tcg_gen_ext16s_i64(tcg_out, tcg_in);
1145 break;
1146 case 2:
1147 tcg_gen_ext32s_i64(tcg_out, tcg_in);
1148 break;
1149 case 3:
1150 tcg_gen_mov_i64(tcg_out, tcg_in);
1151 break;
1152 }
1153 } else {
1154 switch (extsize) {
1155 case 0:
1156 tcg_gen_ext8u_i64(tcg_out, tcg_in);
1157 break;
1158 case 1:
1159 tcg_gen_ext16u_i64(tcg_out, tcg_in);
1160 break;
1161 case 2:
1162 tcg_gen_ext32u_i64(tcg_out, tcg_in);
1163 break;
1164 case 3:
1165 tcg_gen_mov_i64(tcg_out, tcg_in);
1166 break;
1167 }
1168 }
1169
1170 if (shift) {
1171 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1172 }
1173 }
1174
1175 static inline void gen_check_sp_alignment(DisasContext *s)
1176 {
1177 /* The AArch64 architecture mandates that (if enabled via PSTATE
1178 * or SCTLR bits) there is a check that SP is 16-aligned on every
1179 * SP-relative load or store (with an exception generated if it is not).
1180 * In line with general QEMU practice regarding misaligned accesses,
1181 * we omit these checks for the sake of guest program performance.
1182 * This function is provided as a hook so we can more easily add these
1183 * checks in future (possibly as a "favour catching guest program bugs
1184 * over speed" user selectable option).
1185 */
1186 }
1187
1188 /*
1189 * This provides a simple table based table lookup decoder. It is
1190 * intended to be used when the relevant bits for decode are too
1191 * awkwardly placed and switch/if based logic would be confusing and
1192 * deeply nested. Since it's a linear search through the table, tables
1193 * should be kept small.
1194 *
1195 * It returns the first handler where insn & mask == pattern, or
1196 * NULL if there is no match.
1197 * The table is terminated by an empty mask (i.e. 0)
1198 */
1199 static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1200 uint32_t insn)
1201 {
1202 const AArch64DecodeTable *tptr = table;
1203
1204 while (tptr->mask) {
1205 if ((insn & tptr->mask) == tptr->pattern) {
1206 return tptr->disas_fn;
1207 }
1208 tptr++;
1209 }
1210 return NULL;
1211 }
1212
1213 /*
1214 * the instruction disassembly implemented here matches
1215 * the instruction encoding classifications in chapter 3 (C3)
1216 * of the ARM Architecture Reference Manual (DDI0487A_a)
1217 */
1218
1219 /* C3.2.7 Unconditional branch (immediate)
1220 * 31 30 26 25 0
1221 * +----+-----------+-------------------------------------+
1222 * | op | 0 0 1 0 1 | imm26 |
1223 * +----+-----------+-------------------------------------+
1224 */
1225 static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1226 {
1227 uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
1228
1229 if (insn & (1U << 31)) {
1230 /* C5.6.26 BL Branch with link */
1231 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1232 }
1233
1234 /* C5.6.20 B Branch / C5.6.26 BL Branch with link */
1235 gen_goto_tb(s, 0, addr);
1236 }
1237
1238 /* C3.2.1 Compare & branch (immediate)
1239 * 31 30 25 24 23 5 4 0
1240 * +----+-------------+----+---------------------+--------+
1241 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
1242 * +----+-------------+----+---------------------+--------+
1243 */
1244 static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1245 {
1246 unsigned int sf, op, rt;
1247 uint64_t addr;
1248 TCGLabel *label_match;
1249 TCGv_i64 tcg_cmp;
1250
1251 sf = extract32(insn, 31, 1);
1252 op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
1253 rt = extract32(insn, 0, 5);
1254 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1255
1256 tcg_cmp = read_cpu_reg(s, rt, sf);
1257 label_match = gen_new_label();
1258
1259 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1260 tcg_cmp, 0, label_match);
1261
1262 gen_goto_tb(s, 0, s->pc);
1263 gen_set_label(label_match);
1264 gen_goto_tb(s, 1, addr);
1265 }
1266
1267 /* C3.2.5 Test & branch (immediate)
1268 * 31 30 25 24 23 19 18 5 4 0
1269 * +----+-------------+----+-------+-------------+------+
1270 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
1271 * +----+-------------+----+-------+-------------+------+
1272 */
1273 static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1274 {
1275 unsigned int bit_pos, op, rt;
1276 uint64_t addr;
1277 TCGLabel *label_match;
1278 TCGv_i64 tcg_cmp;
1279
1280 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1281 op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
1282 addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
1283 rt = extract32(insn, 0, 5);
1284
1285 tcg_cmp = tcg_temp_new_i64();
1286 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1287 label_match = gen_new_label();
1288 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1289 tcg_cmp, 0, label_match);
1290 tcg_temp_free_i64(tcg_cmp);
1291 gen_goto_tb(s, 0, s->pc);
1292 gen_set_label(label_match);
1293 gen_goto_tb(s, 1, addr);
1294 }
1295
1296 /* C3.2.2 / C5.6.19 Conditional branch (immediate)
1297 * 31 25 24 23 5 4 3 0
1298 * +---------------+----+---------------------+----+------+
1299 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
1300 * +---------------+----+---------------------+----+------+
1301 */
1302 static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1303 {
1304 unsigned int cond;
1305 uint64_t addr;
1306
1307 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1308 unallocated_encoding(s);
1309 return;
1310 }
1311 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1312 cond = extract32(insn, 0, 4);
1313
1314 if (cond < 0x0e) {
1315 /* genuinely conditional branches */
1316 TCGLabel *label_match = gen_new_label();
1317 arm_gen_test_cc(cond, label_match);
1318 gen_goto_tb(s, 0, s->pc);
1319 gen_set_label(label_match);
1320 gen_goto_tb(s, 1, addr);
1321 } else {
1322 /* 0xe and 0xf are both "always" conditions */
1323 gen_goto_tb(s, 0, addr);
1324 }
1325 }
1326
1327 /* C5.6.68 HINT */
1328 static void handle_hint(DisasContext *s, uint32_t insn,
1329 unsigned int op1, unsigned int op2, unsigned int crm)
1330 {
1331 unsigned int selector = crm << 3 | op2;
1332
1333 if (op1 != 3) {
1334 unallocated_encoding(s);
1335 return;
1336 }
1337
1338 switch (selector) {
1339 case 0: /* NOP */
1340 return;
1341 case 3: /* WFI */
1342 s->is_jmp = DISAS_WFI;
1343 return;
1344 case 1: /* YIELD */
1345 s->is_jmp = DISAS_YIELD;
1346 return;
1347 case 2: /* WFE */
1348 s->is_jmp = DISAS_WFE;
1349 return;
1350 case 4: /* SEV */
1351 case 5: /* SEVL */
1352 /* we treat all as NOP at least for now */
1353 return;
1354 default:
1355 /* default specified as NOP equivalent */
1356 return;
1357 }
1358 }
1359
1360 static void gen_clrex(DisasContext *s, uint32_t insn)
1361 {
1362 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1363 }
1364
1365 /* CLREX, DSB, DMB, ISB */
1366 static void handle_sync(DisasContext *s, uint32_t insn,
1367 unsigned int op1, unsigned int op2, unsigned int crm)
1368 {
1369 TCGBar bar;
1370
1371 if (op1 != 3) {
1372 unallocated_encoding(s);
1373 return;
1374 }
1375
1376 switch (op2) {
1377 case 2: /* CLREX */
1378 gen_clrex(s, insn);
1379 return;
1380 case 4: /* DSB */
1381 case 5: /* DMB */
1382 switch (crm & 3) {
1383 case 1: /* MBReqTypes_Reads */
1384 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1385 break;
1386 case 2: /* MBReqTypes_Writes */
1387 bar = TCG_BAR_SC | TCG_MO_ST_ST;
1388 break;
1389 default: /* MBReqTypes_All */
1390 bar = TCG_BAR_SC | TCG_MO_ALL;
1391 break;
1392 }
1393 tcg_gen_mb(bar);
1394 return;
1395 case 6: /* ISB */
1396 /* We need to break the TB after this insn to execute
1397 * a self-modified code correctly and also to take
1398 * any pending interrupts immediately.
1399 */
1400 s->is_jmp = DISAS_UPDATE;
1401 return;
1402 default:
1403 unallocated_encoding(s);
1404 return;
1405 }
1406 }
1407
1408 /* C5.6.130 MSR (immediate) - move immediate to processor state field */
1409 static void handle_msr_i(DisasContext *s, uint32_t insn,
1410 unsigned int op1, unsigned int op2, unsigned int crm)
1411 {
1412 int op = op1 << 3 | op2;
1413 switch (op) {
1414 case 0x05: /* SPSel */
1415 if (s->current_el == 0) {
1416 unallocated_encoding(s);
1417 return;
1418 }
1419 /* fall through */
1420 case 0x1e: /* DAIFSet */
1421 case 0x1f: /* DAIFClear */
1422 {
1423 TCGv_i32 tcg_imm = tcg_const_i32(crm);
1424 TCGv_i32 tcg_op = tcg_const_i32(op);
1425 gen_a64_set_pc_im(s->pc - 4);
1426 gen_helper_msr_i_pstate(cpu_env, tcg_op, tcg_imm);
1427 tcg_temp_free_i32(tcg_imm);
1428 tcg_temp_free_i32(tcg_op);
1429 s->is_jmp = DISAS_UPDATE;
1430 break;
1431 }
1432 default:
1433 unallocated_encoding(s);
1434 return;
1435 }
1436 }
1437
1438 static void gen_get_nzcv(TCGv_i64 tcg_rt)
1439 {
1440 TCGv_i32 tmp = tcg_temp_new_i32();
1441 TCGv_i32 nzcv = tcg_temp_new_i32();
1442
1443 /* build bit 31, N */
1444 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1445 /* build bit 30, Z */
1446 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1447 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1448 /* build bit 29, C */
1449 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1450 /* build bit 28, V */
1451 tcg_gen_shri_i32(tmp, cpu_VF, 31);
1452 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1453 /* generate result */
1454 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1455
1456 tcg_temp_free_i32(nzcv);
1457 tcg_temp_free_i32(tmp);
1458 }
1459
1460 static void gen_set_nzcv(TCGv_i64 tcg_rt)
1461
1462 {
1463 TCGv_i32 nzcv = tcg_temp_new_i32();
1464
1465 /* take NZCV from R[t] */
1466 tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1467
1468 /* bit 31, N */
1469 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1470 /* bit 30, Z */
1471 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1472 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1473 /* bit 29, C */
1474 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1475 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1476 /* bit 28, V */
1477 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1478 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1479 tcg_temp_free_i32(nzcv);
1480 }
1481
1482 /* C5.6.129 MRS - move from system register
1483 * C5.6.131 MSR (register) - move to system register
1484 * C5.6.204 SYS
1485 * C5.6.205 SYSL
1486 * These are all essentially the same insn in 'read' and 'write'
1487 * versions, with varying op0 fields.
1488 */
1489 static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1490 unsigned int op0, unsigned int op1, unsigned int op2,
1491 unsigned int crn, unsigned int crm, unsigned int rt)
1492 {
1493 const ARMCPRegInfo *ri;
1494 TCGv_i64 tcg_rt;
1495
1496 ri = get_arm_cp_reginfo(s->cp_regs,
1497 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1498 crn, crm, op0, op1, op2));
1499
1500 if (!ri) {
1501 /* Unknown register; this might be a guest error or a QEMU
1502 * unimplemented feature.
1503 */
1504 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1505 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1506 isread ? "read" : "write", op0, op1, crn, crm, op2);
1507 unallocated_encoding(s);
1508 return;
1509 }
1510
1511 /* Check access permissions */
1512 if (!cp_access_ok(s->current_el, ri, isread)) {
1513 unallocated_encoding(s);
1514 return;
1515 }
1516
1517 if (ri->accessfn) {
1518 /* Emit code to perform further access permissions checks at
1519 * runtime; this may result in an exception.
1520 */
1521 TCGv_ptr tmpptr;
1522 TCGv_i32 tcg_syn, tcg_isread;
1523 uint32_t syndrome;
1524
1525 gen_a64_set_pc_im(s->pc - 4);
1526 tmpptr = tcg_const_ptr(ri);
1527 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1528 tcg_syn = tcg_const_i32(syndrome);
1529 tcg_isread = tcg_const_i32(isread);
1530 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
1531 tcg_temp_free_ptr(tmpptr);
1532 tcg_temp_free_i32(tcg_syn);
1533 tcg_temp_free_i32(tcg_isread);
1534 }
1535
1536 /* Handle special cases first */
1537 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
1538 case ARM_CP_NOP:
1539 return;
1540 case ARM_CP_NZCV:
1541 tcg_rt = cpu_reg(s, rt);
1542 if (isread) {
1543 gen_get_nzcv(tcg_rt);
1544 } else {
1545 gen_set_nzcv(tcg_rt);
1546 }
1547 return;
1548 case ARM_CP_CURRENTEL:
1549 /* Reads as current EL value from pstate, which is
1550 * guaranteed to be constant by the tb flags.
1551 */
1552 tcg_rt = cpu_reg(s, rt);
1553 tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1554 return;
1555 case ARM_CP_DC_ZVA:
1556 /* Writes clear the aligned block of memory which rt points into. */
1557 tcg_rt = cpu_reg(s, rt);
1558 gen_helper_dc_zva(cpu_env, tcg_rt);
1559 return;
1560 default:
1561 break;
1562 }
1563
1564 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1565 gen_io_start();
1566 }
1567
1568 tcg_rt = cpu_reg(s, rt);
1569
1570 if (isread) {
1571 if (ri->type & ARM_CP_CONST) {
1572 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
1573 } else if (ri->readfn) {
1574 TCGv_ptr tmpptr;
1575 tmpptr = tcg_const_ptr(ri);
1576 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
1577 tcg_temp_free_ptr(tmpptr);
1578 } else {
1579 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
1580 }
1581 } else {
1582 if (ri->type & ARM_CP_CONST) {
1583 /* If not forbidden by access permissions, treat as WI */
1584 return;
1585 } else if (ri->writefn) {
1586 TCGv_ptr tmpptr;
1587 tmpptr = tcg_const_ptr(ri);
1588 gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
1589 tcg_temp_free_ptr(tmpptr);
1590 } else {
1591 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
1592 }
1593 }
1594
1595 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1596 /* I/O operations must end the TB here (whether read or write) */
1597 gen_io_end();
1598 s->is_jmp = DISAS_UPDATE;
1599 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
1600 /* We default to ending the TB on a coprocessor register write,
1601 * but allow this to be suppressed by the register definition
1602 * (usually only necessary to work around guest bugs).
1603 */
1604 s->is_jmp = DISAS_UPDATE;
1605 }
1606 }
1607
1608 /* C3.2.4 System
1609 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
1610 * +---------------------+---+-----+-----+-------+-------+-----+------+
1611 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
1612 * +---------------------+---+-----+-----+-------+-------+-----+------+
1613 */
1614 static void disas_system(DisasContext *s, uint32_t insn)
1615 {
1616 unsigned int l, op0, op1, crn, crm, op2, rt;
1617 l = extract32(insn, 21, 1);
1618 op0 = extract32(insn, 19, 2);
1619 op1 = extract32(insn, 16, 3);
1620 crn = extract32(insn, 12, 4);
1621 crm = extract32(insn, 8, 4);
1622 op2 = extract32(insn, 5, 3);
1623 rt = extract32(insn, 0, 5);
1624
1625 if (op0 == 0) {
1626 if (l || rt != 31) {
1627 unallocated_encoding(s);
1628 return;
1629 }
1630 switch (crn) {
1631 case 2: /* C5.6.68 HINT */
1632 handle_hint(s, insn, op1, op2, crm);
1633 break;
1634 case 3: /* CLREX, DSB, DMB, ISB */
1635 handle_sync(s, insn, op1, op2, crm);
1636 break;
1637 case 4: /* C5.6.130 MSR (immediate) */
1638 handle_msr_i(s, insn, op1, op2, crm);
1639 break;
1640 default:
1641 unallocated_encoding(s);
1642 break;
1643 }
1644 return;
1645 }
1646 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
1647 }
1648
1649 /* C3.2.3 Exception generation
1650 *
1651 * 31 24 23 21 20 5 4 2 1 0
1652 * +-----------------+-----+------------------------+-----+----+
1653 * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
1654 * +-----------------------+------------------------+----------+
1655 */
1656 static void disas_exc(DisasContext *s, uint32_t insn)
1657 {
1658 int opc = extract32(insn, 21, 3);
1659 int op2_ll = extract32(insn, 0, 5);
1660 int imm16 = extract32(insn, 5, 16);
1661 TCGv_i32 tmp;
1662
1663 switch (opc) {
1664 case 0:
1665 /* For SVC, HVC and SMC we advance the single-step state
1666 * machine before taking the exception. This is architecturally
1667 * mandated, to ensure that single-stepping a system call
1668 * instruction works properly.
1669 */
1670 switch (op2_ll) {
1671 case 1: /* SVC */
1672 gen_ss_advance(s);
1673 gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
1674 default_exception_el(s));
1675 break;
1676 case 2: /* HVC */
1677 if (s->current_el == 0) {
1678 unallocated_encoding(s);
1679 break;
1680 }
1681 /* The pre HVC helper handles cases when HVC gets trapped
1682 * as an undefined insn by runtime configuration.
1683 */
1684 gen_a64_set_pc_im(s->pc - 4);
1685 gen_helper_pre_hvc(cpu_env);
1686 gen_ss_advance(s);
1687 gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
1688 break;
1689 case 3: /* SMC */
1690 if (s->current_el == 0) {
1691 unallocated_encoding(s);
1692 break;
1693 }
1694 gen_a64_set_pc_im(s->pc - 4);
1695 tmp = tcg_const_i32(syn_aa64_smc(imm16));
1696 gen_helper_pre_smc(cpu_env, tmp);
1697 tcg_temp_free_i32(tmp);
1698 gen_ss_advance(s);
1699 gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
1700 break;
1701 default:
1702 unallocated_encoding(s);
1703 break;
1704 }
1705 break;
1706 case 1:
1707 if (op2_ll != 0) {
1708 unallocated_encoding(s);
1709 break;
1710 }
1711 /* BRK */
1712 gen_exception_insn(s, 4, EXCP_BKPT, syn_aa64_bkpt(imm16),
1713 default_exception_el(s));
1714 break;
1715 case 2:
1716 if (op2_ll != 0) {
1717 unallocated_encoding(s);
1718 break;
1719 }
1720 /* HLT. This has two purposes.
1721 * Architecturally, it is an external halting debug instruction.
1722 * Since QEMU doesn't implement external debug, we treat this as
1723 * it is required for halting debug disabled: it will UNDEF.
1724 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
1725 */
1726 if (semihosting_enabled() && imm16 == 0xf000) {
1727 #ifndef CONFIG_USER_ONLY
1728 /* In system mode, don't allow userspace access to semihosting,
1729 * to provide some semblance of security (and for consistency
1730 * with our 32-bit semihosting).
1731 */
1732 if (s->current_el == 0) {
1733 unsupported_encoding(s, insn);
1734 break;
1735 }
1736 #endif
1737 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1738 } else {
1739 unsupported_encoding(s, insn);
1740 }
1741 break;
1742 case 5:
1743 if (op2_ll < 1 || op2_ll > 3) {
1744 unallocated_encoding(s);
1745 break;
1746 }
1747 /* DCPS1, DCPS2, DCPS3 */
1748 unsupported_encoding(s, insn);
1749 break;
1750 default:
1751 unallocated_encoding(s);
1752 break;
1753 }
1754 }
1755
1756 /* C3.2.7 Unconditional branch (register)
1757 * 31 25 24 21 20 16 15 10 9 5 4 0
1758 * +---------------+-------+-------+-------+------+-------+
1759 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
1760 * +---------------+-------+-------+-------+------+-------+
1761 */
1762 static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
1763 {
1764 unsigned int opc, op2, op3, rn, op4;
1765
1766 opc = extract32(insn, 21, 4);
1767 op2 = extract32(insn, 16, 5);
1768 op3 = extract32(insn, 10, 6);
1769 rn = extract32(insn, 5, 5);
1770 op4 = extract32(insn, 0, 5);
1771
1772 if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
1773 unallocated_encoding(s);
1774 return;
1775 }
1776
1777 switch (opc) {
1778 case 0: /* BR */
1779 case 1: /* BLR */
1780 case 2: /* RET */
1781 gen_a64_set_pc(s, cpu_reg(s, rn));
1782 /* BLR also needs to load return address */
1783 if (opc == 1) {
1784 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1785 }
1786 break;
1787 case 4: /* ERET */
1788 if (s->current_el == 0) {
1789 unallocated_encoding(s);
1790 return;
1791 }
1792 gen_helper_exception_return(cpu_env);
1793 s->is_jmp = DISAS_JUMP;
1794 return;
1795 case 5: /* DRPS */
1796 if (rn != 0x1f) {
1797 unallocated_encoding(s);
1798 } else {
1799 unsupported_encoding(s, insn);
1800 }
1801 return;
1802 default:
1803 unallocated_encoding(s);
1804 return;
1805 }
1806
1807 s->is_jmp = DISAS_JUMP;
1808 }
1809
1810 /* C3.2 Branches, exception generating and system instructions */
1811 static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
1812 {
1813 switch (extract32(insn, 25, 7)) {
1814 case 0x0a: case 0x0b:
1815 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
1816 disas_uncond_b_imm(s, insn);
1817 break;
1818 case 0x1a: case 0x5a: /* Compare & branch (immediate) */
1819 disas_comp_b_imm(s, insn);
1820 break;
1821 case 0x1b: case 0x5b: /* Test & branch (immediate) */
1822 disas_test_b_imm(s, insn);
1823 break;
1824 case 0x2a: /* Conditional branch (immediate) */
1825 disas_cond_b_imm(s, insn);
1826 break;
1827 case 0x6a: /* Exception generation / System */
1828 if (insn & (1 << 24)) {
1829 disas_system(s, insn);
1830 } else {
1831 disas_exc(s, insn);
1832 }
1833 break;
1834 case 0x6b: /* Unconditional branch (register) */
1835 disas_uncond_b_reg(s, insn);
1836 break;
1837 default:
1838 unallocated_encoding(s);
1839 break;
1840 }
1841 }
1842
1843 /*
1844 * Load/Store exclusive instructions are implemented by remembering
1845 * the value/address loaded, and seeing if these are the same
1846 * when the store is performed. This is not actually the architecturally
1847 * mandated semantics, but it works for typical guest code sequences
1848 * and avoids having to monitor regular stores.
1849 *
1850 * The store exclusive uses the atomic cmpxchg primitives to avoid
1851 * races in multi-threaded linux-user and when MTTCG softmmu is
1852 * enabled.
1853 */
1854 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
1855 TCGv_i64 addr, int size, bool is_pair)
1856 {
1857 TCGv_i64 tmp = tcg_temp_new_i64();
1858 TCGMemOp memop = s->be_data + size;
1859
1860 g_assert(size <= 3);
1861 tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), memop);
1862
1863 if (is_pair) {
1864 TCGv_i64 addr2 = tcg_temp_new_i64();
1865 TCGv_i64 hitmp = tcg_temp_new_i64();
1866
1867 g_assert(size >= 2);
1868 tcg_gen_addi_i64(addr2, addr, 1 << size);
1869 tcg_gen_qemu_ld_i64(hitmp, addr2, get_mem_index(s), memop);
1870 tcg_temp_free_i64(addr2);
1871 tcg_gen_mov_i64(cpu_exclusive_high, hitmp);
1872 tcg_gen_mov_i64(cpu_reg(s, rt2), hitmp);
1873 tcg_temp_free_i64(hitmp);
1874 }
1875
1876 tcg_gen_mov_i64(cpu_exclusive_val, tmp);
1877 tcg_gen_mov_i64(cpu_reg(s, rt), tmp);
1878
1879 tcg_temp_free_i64(tmp);
1880 tcg_gen_mov_i64(cpu_exclusive_addr, addr);
1881 }
1882
1883 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
1884 TCGv_i64 inaddr, int size, int is_pair)
1885 {
1886 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
1887 * && (!is_pair || env->exclusive_high == [addr + datasize])) {
1888 * [addr] = {Rt};
1889 * if (is_pair) {
1890 * [addr + datasize] = {Rt2};
1891 * }
1892 * {Rd} = 0;
1893 * } else {
1894 * {Rd} = 1;
1895 * }
1896 * env->exclusive_addr = -1;
1897 */
1898 TCGLabel *fail_label = gen_new_label();
1899 TCGLabel *done_label = gen_new_label();
1900 TCGv_i64 addr = tcg_temp_local_new_i64();
1901 TCGv_i64 tmp;
1902
1903 /* Copy input into a local temp so it is not trashed when the
1904 * basic block ends at the branch insn.
1905 */
1906 tcg_gen_mov_i64(addr, inaddr);
1907 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
1908
1909 tmp = tcg_temp_new_i64();
1910 if (is_pair) {
1911 if (size == 2) {
1912 TCGv_i64 val = tcg_temp_new_i64();
1913 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
1914 tcg_gen_concat32_i64(val, cpu_exclusive_val, cpu_exclusive_high);
1915 tcg_gen_atomic_cmpxchg_i64(tmp, addr, val, tmp,
1916 get_mem_index(s),
1917 size | MO_ALIGN | s->be_data);
1918 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, val);
1919 tcg_temp_free_i64(val);
1920 } else if (s->be_data == MO_LE) {
1921 gen_helper_paired_cmpxchg64_le(tmp, cpu_env, addr, cpu_reg(s, rt),
1922 cpu_reg(s, rt2));
1923 } else {
1924 gen_helper_paired_cmpxchg64_be(tmp, cpu_env, addr, cpu_reg(s, rt),
1925 cpu_reg(s, rt2));
1926 }
1927 } else {
1928 TCGv_i64 val = cpu_reg(s, rt);
1929 tcg_gen_atomic_cmpxchg_i64(tmp, addr, cpu_exclusive_val, val,
1930 get_mem_index(s),
1931 size | MO_ALIGN | s->be_data);
1932 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
1933 }
1934
1935 tcg_temp_free_i64(addr);
1936
1937 tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
1938 tcg_temp_free_i64(tmp);
1939 tcg_gen_br(done_label);
1940
1941 gen_set_label(fail_label);
1942 tcg_gen_movi_i64(cpu_reg(s, rd), 1);
1943 gen_set_label(done_label);
1944 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1945 }
1946
1947 /* Update the Sixty-Four bit (SF) registersize. This logic is derived
1948 * from the ARMv8 specs for LDR (Shared decode for all encodings).
1949 */
1950 static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
1951 {
1952 int opc0 = extract32(opc, 0, 1);
1953 int regsize;
1954
1955 if (is_signed) {
1956 regsize = opc0 ? 32 : 64;
1957 } else {
1958 regsize = size == 3 ? 64 : 32;
1959 }
1960 return regsize == 64;
1961 }
1962
1963 /* C3.3.6 Load/store exclusive
1964 *
1965 * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
1966 * +-----+-------------+----+---+----+------+----+-------+------+------+
1967 * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
1968 * +-----+-------------+----+---+----+------+----+-------+------+------+
1969 *
1970 * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
1971 * L: 0 -> store, 1 -> load
1972 * o2: 0 -> exclusive, 1 -> not
1973 * o1: 0 -> single register, 1 -> register pair
1974 * o0: 1 -> load-acquire/store-release, 0 -> not
1975 */
1976 static void disas_ldst_excl(DisasContext *s, uint32_t insn)
1977 {
1978 int rt = extract32(insn, 0, 5);
1979 int rn = extract32(insn, 5, 5);
1980 int rt2 = extract32(insn, 10, 5);
1981 int is_lasr = extract32(insn, 15, 1);
1982 int rs = extract32(insn, 16, 5);
1983 int is_pair = extract32(insn, 21, 1);
1984 int is_store = !extract32(insn, 22, 1);
1985 int is_excl = !extract32(insn, 23, 1);
1986 int size = extract32(insn, 30, 2);
1987 TCGv_i64 tcg_addr;
1988
1989 if ((!is_excl && !is_pair && !is_lasr) ||
1990 (!is_excl && is_pair) ||
1991 (is_pair && size < 2)) {
1992 unallocated_encoding(s);
1993 return;
1994 }
1995
1996 if (rn == 31) {
1997 gen_check_sp_alignment(s);
1998 }
1999 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2000
2001 /* Note that since TCG is single threaded load-acquire/store-release
2002 * semantics require no extra if (is_lasr) { ... } handling.
2003 */
2004
2005 if (is_excl) {
2006 if (!is_store) {
2007 s->is_ldex = true;
2008 gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair);
2009 if (is_lasr) {
2010 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2011 }
2012 } else {
2013 if (is_lasr) {
2014 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2015 }
2016 gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair);
2017 }
2018 } else {
2019 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2020 bool iss_sf = disas_ldst_compute_iss_sf(size, false, 0);
2021
2022 /* Generate ISS for non-exclusive accesses including LASR. */
2023 if (is_store) {
2024 if (is_lasr) {
2025 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2026 }
2027 do_gpr_st(s, tcg_rt, tcg_addr, size,
2028 true, rt, iss_sf, is_lasr);
2029 } else {
2030 do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false,
2031 true, rt, iss_sf, is_lasr);
2032 if (is_lasr) {
2033 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2034 }
2035 }
2036 }
2037 }
2038
2039 /*
2040 * C3.3.5 Load register (literal)
2041 *
2042 * 31 30 29 27 26 25 24 23 5 4 0
2043 * +-----+-------+---+-----+-------------------+-------+
2044 * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
2045 * +-----+-------+---+-----+-------------------+-------+
2046 *
2047 * V: 1 -> vector (simd/fp)
2048 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2049 * 10-> 32 bit signed, 11 -> prefetch
2050 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2051 */
2052 static void disas_ld_lit(DisasContext *s, uint32_t insn)
2053 {
2054 int rt = extract32(insn, 0, 5);
2055 int64_t imm = sextract32(insn, 5, 19) << 2;
2056 bool is_vector = extract32(insn, 26, 1);
2057 int opc = extract32(insn, 30, 2);
2058 bool is_signed = false;
2059 int size = 2;
2060 TCGv_i64 tcg_rt, tcg_addr;
2061
2062 if (is_vector) {
2063 if (opc == 3) {
2064 unallocated_encoding(s);
2065 return;
2066 }
2067 size = 2 + opc;
2068 if (!fp_access_check(s)) {
2069 return;
2070 }
2071 } else {
2072 if (opc == 3) {
2073 /* PRFM (literal) : prefetch */
2074 return;
2075 }
2076 size = 2 + extract32(opc, 0, 1);
2077 is_signed = extract32(opc, 1, 1);
2078 }
2079
2080 tcg_rt = cpu_reg(s, rt);
2081
2082 tcg_addr = tcg_const_i64((s->pc - 4) + imm);
2083 if (is_vector) {
2084 do_fp_ld(s, rt, tcg_addr, size);
2085 } else {
2086 /* Only unsigned 32bit loads target 32bit registers. */
2087 bool iss_sf = opc != 0;
2088
2089 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
2090 true, rt, iss_sf, false);
2091 }
2092 tcg_temp_free_i64(tcg_addr);
2093 }
2094
2095 /*
2096 * C5.6.80 LDNP (Load Pair - non-temporal hint)
2097 * C5.6.81 LDP (Load Pair - non vector)
2098 * C5.6.82 LDPSW (Load Pair Signed Word - non vector)
2099 * C5.6.176 STNP (Store Pair - non-temporal hint)
2100 * C5.6.177 STP (Store Pair - non vector)
2101 * C6.3.165 LDNP (Load Pair of SIMD&FP - non-temporal hint)
2102 * C6.3.165 LDP (Load Pair of SIMD&FP)
2103 * C6.3.284 STNP (Store Pair of SIMD&FP - non-temporal hint)
2104 * C6.3.284 STP (Store Pair of SIMD&FP)
2105 *
2106 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
2107 * +-----+-------+---+---+-------+---+-----------------------------+
2108 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
2109 * +-----+-------+---+---+-------+---+-------+-------+------+------+
2110 *
2111 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
2112 * LDPSW 01
2113 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2114 * V: 0 -> GPR, 1 -> Vector
2115 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2116 * 10 -> signed offset, 11 -> pre-index
2117 * L: 0 -> Store 1 -> Load
2118 *
2119 * Rt, Rt2 = GPR or SIMD registers to be stored
2120 * Rn = general purpose register containing address
2121 * imm7 = signed offset (multiple of 4 or 8 depending on size)
2122 */
2123 static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2124 {
2125 int rt = extract32(insn, 0, 5);
2126 int rn = extract32(insn, 5, 5);
2127 int rt2 = extract32(insn, 10, 5);
2128 uint64_t offset = sextract64(insn, 15, 7);
2129 int index = extract32(insn, 23, 2);
2130 bool is_vector = extract32(insn, 26, 1);
2131 bool is_load = extract32(insn, 22, 1);
2132 int opc = extract32(insn, 30, 2);
2133
2134 bool is_signed = false;
2135 bool postindex = false;
2136 bool wback = false;
2137
2138 TCGv_i64 tcg_addr; /* calculated address */
2139 int size;
2140
2141 if (opc == 3) {
2142 unallocated_encoding(s);
2143 return;
2144 }
2145
2146 if (is_vector) {
2147 size = 2 + opc;
2148 } else {
2149 size = 2 + extract32(opc, 1, 1);
2150 is_signed = extract32(opc, 0, 1);
2151 if (!is_load && is_signed) {
2152 unallocated_encoding(s);
2153 return;
2154 }
2155 }
2156
2157 switch (index) {
2158 case 1: /* post-index */
2159 postindex = true;
2160 wback = true;
2161 break;
2162 case 0:
2163 /* signed offset with "non-temporal" hint. Since we don't emulate
2164 * caches we don't care about hints to the cache system about
2165 * data access patterns, and handle this identically to plain
2166 * signed offset.
2167 */
2168 if (is_signed) {
2169 /* There is no non-temporal-hint version of LDPSW */
2170 unallocated_encoding(s);
2171 return;
2172 }
2173 postindex = false;
2174 break;
2175 case 2: /* signed offset, rn not updated */
2176 postindex = false;
2177 break;
2178 case 3: /* pre-index */
2179 postindex = false;
2180 wback = true;
2181 break;
2182 }
2183
2184 if (is_vector && !fp_access_check(s)) {
2185 return;
2186 }
2187
2188 offset <<= size;
2189
2190 if (rn == 31) {
2191 gen_check_sp_alignment(s);
2192 }
2193
2194 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2195
2196 if (!postindex) {
2197 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2198 }
2199
2200 if (is_vector) {
2201 if (is_load) {
2202 do_fp_ld(s, rt, tcg_addr, size);
2203 } else {
2204 do_fp_st(s, rt, tcg_addr, size);
2205 }
2206 } else {
2207 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2208 if (is_load) {
2209 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
2210 false, 0, false, false);
2211 } else {
2212 do_gpr_st(s, tcg_rt, tcg_addr, size,
2213 false, 0, false, false);
2214 }
2215 }
2216 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2217 if (is_vector) {
2218 if (is_load) {
2219 do_fp_ld(s, rt2, tcg_addr, size);
2220 } else {
2221 do_fp_st(s, rt2, tcg_addr, size);
2222 }
2223 } else {
2224 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2225 if (is_load) {
2226 do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
2227 false, 0, false, false);
2228 } else {
2229 do_gpr_st(s, tcg_rt2, tcg_addr, size,
2230 false, 0, false, false);
2231 }
2232 }
2233
2234 if (wback) {
2235 if (postindex) {
2236 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
2237 } else {
2238 tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
2239 }
2240 tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
2241 }
2242 }
2243
2244 /*
2245 * C3.3.8 Load/store (immediate post-indexed)
2246 * C3.3.9 Load/store (immediate pre-indexed)
2247 * C3.3.12 Load/store (unscaled immediate)
2248 *
2249 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
2250 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2251 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
2252 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2253 *
2254 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
2255 10 -> unprivileged
2256 * V = 0 -> non-vector
2257 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
2258 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2259 */
2260 static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
2261 int opc,
2262 int size,
2263 int rt,
2264 bool is_vector)
2265 {
2266 int rn = extract32(insn, 5, 5);
2267 int imm9 = sextract32(insn, 12, 9);
2268 int idx = extract32(insn, 10, 2);
2269 bool is_signed = false;
2270 bool is_store = false;
2271 bool is_extended = false;
2272 bool is_unpriv = (idx == 2);
2273 bool iss_valid = !is_vector;
2274 bool post_index;
2275 bool writeback;
2276
2277 TCGv_i64 tcg_addr;
2278
2279 if (is_vector) {
2280 size |= (opc & 2) << 1;
2281 if (size > 4 || is_unpriv) {
2282 unallocated_encoding(s);
2283 return;
2284 }
2285 is_store = ((opc & 1) == 0);
2286 if (!fp_access_check(s)) {
2287 return;
2288 }
2289 } else {
2290 if (size == 3 && opc == 2) {
2291 /* PRFM - prefetch */
2292 if (is_unpriv) {
2293 unallocated_encoding(s);
2294 return;
2295 }
2296 return;
2297 }
2298 if (opc == 3 && size > 1) {
2299 unallocated_encoding(s);
2300 return;
2301 }
2302 is_store = (opc == 0);
2303 is_signed = extract32(opc, 1, 1);
2304 is_extended = (size < 3) && extract32(opc, 0, 1);
2305 }
2306
2307 switch (idx) {
2308 case 0:
2309 case 2:
2310 post_index = false;
2311 writeback = false;
2312 break;
2313 case 1:
2314 post_index = true;
2315 writeback = true;
2316 break;
2317 case 3:
2318 post_index = false;
2319 writeback = true;
2320 break;
2321 }
2322
2323 if (rn == 31) {
2324 gen_check_sp_alignment(s);
2325 }
2326 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2327
2328 if (!post_index) {
2329 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2330 }
2331
2332 if (is_vector) {
2333 if (is_store) {
2334 do_fp_st(s, rt, tcg_addr, size);
2335 } else {
2336 do_fp_ld(s, rt, tcg_addr, size);
2337 }
2338 } else {
2339 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2340 int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
2341 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2342
2343 if (is_store) {
2344 do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx,
2345 iss_valid, rt, iss_sf, false);
2346 } else {
2347 do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size,
2348 is_signed, is_extended, memidx,
2349 iss_valid, rt, iss_sf, false);
2350 }
2351 }
2352
2353 if (writeback) {
2354 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
2355 if (post_index) {
2356 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2357 }
2358 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2359 }
2360 }
2361
2362 /*
2363 * C3.3.10 Load/store (register offset)
2364 *
2365 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2366 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2367 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
2368 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2369 *
2370 * For non-vector:
2371 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2372 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2373 * For vector:
2374 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2375 * opc<0>: 0 -> store, 1 -> load
2376 * V: 1 -> vector/simd
2377 * opt: extend encoding (see DecodeRegExtend)
2378 * S: if S=1 then scale (essentially index by sizeof(size))
2379 * Rt: register to transfer into/out of
2380 * Rn: address register or SP for base
2381 * Rm: offset register or ZR for offset
2382 */
2383 static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
2384 int opc,
2385 int size,
2386 int rt,
2387 bool is_vector)
2388 {
2389 int rn = extract32(insn, 5, 5);
2390 int shift = extract32(insn, 12, 1);
2391 int rm = extract32(insn, 16, 5);
2392 int opt = extract32(insn, 13, 3);
2393 bool is_signed = false;
2394 bool is_store = false;
2395 bool is_extended = false;
2396
2397 TCGv_i64 tcg_rm;
2398 TCGv_i64 tcg_addr;
2399
2400 if (extract32(opt, 1, 1) == 0) {
2401 unallocated_encoding(s);
2402 return;
2403 }
2404
2405 if (is_vector) {
2406 size |= (opc & 2) << 1;
2407 if (size > 4) {
2408 unallocated_encoding(s);
2409 return;
2410 }
2411 is_store = !extract32(opc, 0, 1);
2412 if (!fp_access_check(s)) {
2413 return;
2414 }
2415 } else {
2416 if (size == 3 && opc == 2) {
2417 /* PRFM - prefetch */
2418 return;
2419 }
2420 if (opc == 3 && size > 1) {
2421 unallocated_encoding(s);
2422 return;
2423 }
2424 is_store = (opc == 0);
2425 is_signed = extract32(opc, 1, 1);
2426 is_extended = (size < 3) && extract32(opc, 0, 1);
2427 }
2428
2429 if (rn == 31) {
2430 gen_check_sp_alignment(s);
2431 }
2432 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2433
2434 tcg_rm = read_cpu_reg(s, rm, 1);
2435 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
2436
2437 tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
2438
2439 if (is_vector) {
2440 if (is_store) {
2441 do_fp_st(s, rt, tcg_addr, size);
2442 } else {
2443 do_fp_ld(s, rt, tcg_addr, size);
2444 }
2445 } else {
2446 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2447 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2448 if (is_store) {
2449 do_gpr_st(s, tcg_rt, tcg_addr, size,
2450 true, rt, iss_sf, false);
2451 } else {
2452 do_gpr_ld(s, tcg_rt, tcg_addr, size,
2453 is_signed, is_extended,
2454 true, rt, iss_sf, false);
2455 }
2456 }
2457 }
2458
2459 /*
2460 * C3.3.13 Load/store (unsigned immediate)
2461 *
2462 * 31 30 29 27 26 25 24 23 22 21 10 9 5
2463 * +----+-------+---+-----+-----+------------+-------+------+
2464 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
2465 * +----+-------+---+-----+-----+------------+-------+------+
2466 *
2467 * For non-vector:
2468 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2469 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2470 * For vector:
2471 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2472 * opc<0>: 0 -> store, 1 -> load
2473 * Rn: base address register (inc SP)
2474 * Rt: target register
2475 */
2476 static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
2477 int opc,
2478 int size,
2479 int rt,
2480 bool is_vector)
2481 {
2482 int rn = extract32(insn, 5, 5);
2483 unsigned int imm12 = extract32(insn, 10, 12);
2484 unsigned int offset;
2485
2486 TCGv_i64 tcg_addr;
2487
2488 bool is_store;
2489 bool is_signed = false;
2490 bool is_extended = false;
2491
2492 if (is_vector) {
2493 size |= (opc & 2) << 1;
2494 if (size > 4) {
2495 unallocated_encoding(s);
2496 return;
2497 }
2498 is_store = !extract32(opc, 0, 1);
2499 if (!fp_access_check(s)) {
2500 return;
2501 }
2502 } else {
2503 if (size == 3 && opc == 2) {
2504 /* PRFM - prefetch */
2505 return;
2506 }
2507 if (opc == 3 && size > 1) {
2508 unallocated_encoding(s);
2509 return;
2510 }
2511 is_store = (opc == 0);
2512 is_signed = extract32(opc, 1, 1);
2513 is_extended = (size < 3) && extract32(opc, 0, 1);
2514 }
2515
2516 if (rn == 31) {
2517 gen_check_sp_alignment(s);
2518 }
2519 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2520 offset = imm12 << size;
2521 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2522
2523 if (is_vector) {
2524 if (is_store) {
2525 do_fp_st(s, rt, tcg_addr, size);
2526 } else {
2527 do_fp_ld(s, rt, tcg_addr, size);
2528 }
2529 } else {
2530 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2531 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2532 if (is_store) {
2533 do_gpr_st(s, tcg_rt, tcg_addr, size,
2534 true, rt, iss_sf, false);
2535 } else {
2536 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended,
2537 true, rt, iss_sf, false);
2538 }
2539 }
2540 }
2541
2542 /* Load/store register (all forms) */
2543 static void disas_ldst_reg(DisasContext *s, uint32_t insn)
2544 {
2545 int rt = extract32(insn, 0, 5);
2546 int opc = extract32(insn, 22, 2);
2547 bool is_vector = extract32(insn, 26, 1);
2548 int size = extract32(insn, 30, 2);
2549
2550 switch (extract32(insn, 24, 2)) {
2551 case 0:
2552 if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
2553 disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
2554 } else {
2555 /* Load/store register (unscaled immediate)
2556 * Load/store immediate pre/post-indexed
2557 * Load/store register unprivileged
2558 */
2559 disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
2560 }
2561 break;
2562 case 1:
2563 disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
2564 break;
2565 default:
2566 unallocated_encoding(s);
2567 break;
2568 }
2569 }
2570
2571 /* C3.3.1 AdvSIMD load/store multiple structures
2572 *
2573 * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
2574 * +---+---+---------------+---+-------------+--------+------+------+------+
2575 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
2576 * +---+---+---------------+---+-------------+--------+------+------+------+
2577 *
2578 * C3.3.2 AdvSIMD load/store multiple structures (post-indexed)
2579 *
2580 * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
2581 * +---+---+---------------+---+---+---------+--------+------+------+------+
2582 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
2583 * +---+---+---------------+---+---+---------+--------+------+------+------+
2584 *
2585 * Rt: first (or only) SIMD&FP register to be transferred
2586 * Rn: base address or SP
2587 * Rm (post-index only): post-index register (when !31) or size dependent #imm
2588 */
2589 static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
2590 {
2591 int rt = extract32(insn, 0, 5);
2592 int rn = extract32(insn, 5, 5);
2593 int size = extract32(insn, 10, 2);
2594 int opcode = extract32(insn, 12, 4);
2595 bool is_store = !extract32(insn, 22, 1);
2596 bool is_postidx = extract32(insn, 23, 1);
2597 bool is_q = extract32(insn, 30, 1);
2598 TCGv_i64 tcg_addr, tcg_rn;
2599
2600 int ebytes = 1 << size;
2601 int elements = (is_q ? 128 : 64) / (8 << size);
2602 int rpt; /* num iterations */
2603 int selem; /* structure elements */
2604 int r;
2605
2606 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
2607 unallocated_encoding(s);
2608 return;
2609 }
2610
2611 /* From the shared decode logic */
2612 switch (opcode) {
2613 case 0x0:
2614 rpt = 1;
2615 selem = 4;
2616 break;
2617 case 0x2:
2618 rpt = 4;
2619 selem = 1;
2620 break;
2621 case 0x4:
2622 rpt = 1;
2623 selem = 3;
2624 break;
2625 case 0x6:
2626 rpt = 3;
2627 selem = 1;
2628 break;
2629 case 0x7:
2630 rpt = 1;
2631 selem = 1;
2632 break;
2633 case 0x8:
2634 rpt = 1;
2635 selem = 2;
2636 break;
2637 case 0xa:
2638 rpt = 2;
2639 selem = 1;
2640 break;
2641 default:
2642 unallocated_encoding(s);
2643 return;
2644 }
2645
2646 if (size == 3 && !is_q && selem != 1) {
2647 /* reserved */
2648 unallocated_encoding(s);
2649 return;
2650 }
2651
2652 if (!fp_access_check(s)) {
2653 return;
2654 }
2655
2656 if (rn == 31) {
2657 gen_check_sp_alignment(s);
2658 }
2659
2660 tcg_rn = cpu_reg_sp(s, rn);
2661 tcg_addr = tcg_temp_new_i64();
2662 tcg_gen_mov_i64(tcg_addr, tcg_rn);
2663
2664 for (r = 0; r < rpt; r++) {
2665 int e;
2666 for (e = 0; e < elements; e++) {
2667 int tt = (rt + r) % 32;
2668 int xs;
2669 for (xs = 0; xs < selem; xs++) {
2670 if (is_store) {
2671 do_vec_st(s, tt, e, tcg_addr, size);
2672 } else {
2673 do_vec_ld(s, tt, e, tcg_addr, size);
2674
2675 /* For non-quad operations, setting a slice of the low
2676 * 64 bits of the register clears the high 64 bits (in
2677 * the ARM ARM pseudocode this is implicit in the fact
2678 * that 'rval' is a 64 bit wide variable). We optimize
2679 * by noticing that we only need to do this the first
2680 * time we touch a register.
2681 */
2682 if (!is_q && e == 0 && (r == 0 || xs == selem - 1)) {
2683 clear_vec_high(s, tt);
2684 }
2685 }
2686 tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
2687 tt = (tt + 1) % 32;
2688 }
2689 }
2690 }
2691
2692 if (is_postidx) {
2693 int rm = extract32(insn, 16, 5);
2694 if (rm == 31) {
2695 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2696 } else {
2697 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
2698 }
2699 }
2700 tcg_temp_free_i64(tcg_addr);
2701 }
2702
2703 /* C3.3.3 AdvSIMD load/store single structure
2704 *
2705 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2706 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2707 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
2708 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2709 *
2710 * C3.3.4 AdvSIMD load/store single structure (post-indexed)
2711 *
2712 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2713 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2714 * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
2715 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2716 *
2717 * Rt: first (or only) SIMD&FP register to be transferred
2718 * Rn: base address or SP
2719 * Rm (post-index only): post-index register (when !31) or size dependent #imm
2720 * index = encoded in Q:S:size dependent on size
2721 *
2722 * lane_size = encoded in R, opc
2723 * transfer width = encoded in opc, S, size
2724 */
2725 static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
2726 {
2727 int rt = extract32(insn, 0, 5);
2728 int rn = extract32(insn, 5, 5);
2729 int size = extract32(insn, 10, 2);
2730 int S = extract32(insn, 12, 1);
2731 int opc = extract32(insn, 13, 3);
2732 int R = extract32(insn, 21, 1);
2733 int is_load = extract32(insn, 22, 1);
2734 int is_postidx = extract32(insn, 23, 1);
2735 int is_q = extract32(insn, 30, 1);
2736
2737 int scale = extract32(opc, 1, 2);
2738 int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
2739 bool replicate = false;
2740 int index = is_q << 3 | S << 2 | size;
2741 int ebytes, xs;
2742 TCGv_i64 tcg_addr, tcg_rn;
2743
2744 switch (scale) {
2745 case 3:
2746 if (!is_load || S) {
2747 unallocated_encoding(s);
2748 return;
2749 }
2750 scale = size;
2751 replicate = true;
2752 break;
2753 case 0:
2754 break;
2755 case 1:
2756 if (extract32(size, 0, 1)) {
2757 unallocated_encoding(s);
2758 return;
2759 }
2760 index >>= 1;
2761 break;
2762 case 2:
2763 if (extract32(size, 1, 1)) {
2764 unallocated_encoding(s);
2765 return;
2766 }
2767 if (!extract32(size, 0, 1)) {
2768 index >>= 2;
2769 } else {
2770 if (S) {
2771 unallocated_encoding(s);
2772 return;
2773 }
2774 index >>= 3;
2775 scale = 3;
2776 }
2777 break;
2778 default:
2779 g_assert_not_reached();
2780 }
2781
2782 if (!fp_access_check(s)) {
2783 return;
2784 }
2785
2786 ebytes = 1 << scale;
2787
2788 if (rn == 31) {
2789 gen_check_sp_alignment(s);
2790 }
2791
2792 tcg_rn = cpu_reg_sp(s, rn);
2793 tcg_addr = tcg_temp_new_i64();
2794 tcg_gen_mov_i64(tcg_addr, tcg_rn);
2795
2796 for (xs = 0; xs < selem; xs++) {
2797 if (replicate) {
2798 /* Load and replicate to all elements */
2799 uint64_t mulconst;
2800 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
2801
2802 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
2803 get_mem_index(s), s->be_data + scale);
2804 switch (scale) {
2805 case 0:
2806 mulconst = 0x0101010101010101ULL;
2807 break;
2808 case 1:
2809 mulconst = 0x0001000100010001ULL;
2810 break;
2811 case 2:
2812 mulconst = 0x0000000100000001ULL;
2813 break;
2814 case 3:
2815 mulconst = 0;
2816 break;
2817 default:
2818 g_assert_not_reached();
2819 }
2820 if (mulconst) {
2821 tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst);
2822 }
2823 write_vec_element(s, tcg_tmp, rt, 0, MO_64);
2824 if (is_q) {
2825 write_vec_element(s, tcg_tmp, rt, 1, MO_64);
2826 } else {
2827 clear_vec_high(s, rt);
2828 }
2829 tcg_temp_free_i64(tcg_tmp);
2830 } else {
2831 /* Load/store one element per register */
2832 if (is_load) {
2833 do_vec_ld(s, rt, index, tcg_addr, scale);
2834 } else {
2835 do_vec_st(s, rt, index, tcg_addr, scale);
2836 }
2837 }
2838 tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
2839 rt = (rt + 1) % 32;
2840 }
2841
2842 if (is_postidx) {
2843 int rm = extract32(insn, 16, 5);
2844 if (rm == 31) {
2845 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2846 } else {
2847 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
2848 }
2849 }
2850 tcg_temp_free_i64(tcg_addr);
2851 }
2852
2853 /* C3.3 Loads and stores */
2854 static void disas_ldst(DisasContext *s, uint32_t insn)
2855 {
2856 switch (extract32(insn, 24, 6)) {
2857 case 0x08: /* Load/store exclusive */
2858 disas_ldst_excl(s, insn);
2859 break;
2860 case 0x18: case 0x1c: /* Load register (literal) */
2861 disas_ld_lit(s, insn);
2862 break;
2863 case 0x28: case 0x29:
2864 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
2865 disas_ldst_pair(s, insn);
2866 break;
2867 case 0x38: case 0x39:
2868 case 0x3c: case 0x3d: /* Load/store register (all forms) */
2869 disas_ldst_reg(s, insn);
2870 break;
2871 case 0x0c: /* AdvSIMD load/store multiple structures */
2872 disas_ldst_multiple_struct(s, insn);
2873 break;
2874 case 0x0d: /* AdvSIMD load/store single structure */
2875 disas_ldst_single_struct(s, insn);
2876 break;
2877 default:
2878 unallocated_encoding(s);
2879 break;
2880 }
2881 }
2882
2883 /* C3.4.6 PC-rel. addressing
2884 * 31 30 29 28 24 23 5 4 0
2885 * +----+-------+-----------+-------------------+------+
2886 * | op | immlo | 1 0 0 0 0 | immhi | Rd |
2887 * +----+-------+-----------+-------------------+------+
2888 */
2889 static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
2890 {
2891 unsigned int page, rd;
2892 uint64_t base;
2893 uint64_t offset;
2894
2895 page = extract32(insn, 31, 1);
2896 /* SignExtend(immhi:immlo) -> offset */
2897 offset = sextract64(insn, 5, 19);
2898 offset = offset << 2 | extract32(insn, 29, 2);
2899 rd = extract32(insn, 0, 5);
2900 base = s->pc - 4;
2901
2902 if (page) {
2903 /* ADRP (page based) */
2904 base &= ~0xfff;
2905 offset <<= 12;
2906 }
2907
2908 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
2909 }
2910
2911 /*
2912 * C3.4.1 Add/subtract (immediate)
2913 *
2914 * 31 30 29 28 24 23 22 21 10 9 5 4 0
2915 * +--+--+--+-----------+-----+-------------+-----+-----+
2916 * |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd |
2917 * +--+--+--+-----------+-----+-------------+-----+-----+
2918 *
2919 * sf: 0 -> 32bit, 1 -> 64bit
2920 * op: 0 -> add , 1 -> sub
2921 * S: 1 -> set flags
2922 * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
2923 */
2924 static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
2925 {
2926 int rd = extract32(insn, 0, 5);
2927 int rn = extract32(insn, 5, 5);
2928 uint64_t imm = extract32(insn, 10, 12);
2929 int shift = extract32(insn, 22, 2);
2930 bool setflags = extract32(insn, 29, 1);
2931 bool sub_op = extract32(insn, 30, 1);
2932 bool is_64bit = extract32(insn, 31, 1);
2933
2934 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
2935 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
2936 TCGv_i64 tcg_result;
2937
2938 switch (shift) {
2939 case 0x0:
2940 break;
2941 case 0x1:
2942 imm <<= 12;
2943 break;
2944 default:
2945 unallocated_encoding(s);
2946 return;
2947 }
2948
2949 tcg_result = tcg_temp_new_i64();
2950 if (!setflags) {
2951 if (sub_op) {
2952 tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
2953 } else {
2954 tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
2955 }
2956 } else {
2957 TCGv_i64 tcg_imm = tcg_const_i64(imm);
2958 if (sub_op) {
2959 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
2960 } else {
2961 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
2962 }
2963 tcg_temp_free_i64(tcg_imm);
2964 }
2965
2966 if (is_64bit) {
2967 tcg_gen_mov_i64(tcg_rd, tcg_result);
2968 } else {
2969 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
2970 }
2971
2972 tcg_temp_free_i64(tcg_result);
2973 }
2974
2975 /* The input should be a value in the bottom e bits (with higher
2976 * bits zero); returns that value replicated into every element
2977 * of size e in a 64 bit integer.
2978 */
2979 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
2980 {
2981 assert(e != 0);
2982 while (e < 64) {
2983 mask |= mask << e;
2984 e *= 2;
2985 }
2986 return mask;
2987 }
2988
2989 /* Return a value with the bottom len bits set (where 0 < len <= 64) */
2990 static inline uint64_t bitmask64(unsigned int length)
2991 {
2992 assert(length > 0 && length <= 64);
2993 return ~0ULL >> (64 - length);
2994 }
2995
2996 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we
2997 * only require the wmask. Returns false if the imms/immr/immn are a reserved
2998 * value (ie should cause a guest UNDEF exception), and true if they are
2999 * valid, in which case the decoded bit pattern is written to result.
3000 */
3001 static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
3002 unsigned int imms, unsigned int immr)
3003 {
3004 uint64_t mask;
3005 unsigned e, levels, s, r;
3006 int len;
3007
3008 assert(immn < 2 && imms < 64 && immr < 64);
3009
3010 /* The bit patterns we create here are 64 bit patterns which
3011 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
3012 * 64 bits each. Each element contains the same value: a run
3013 * of between 1 and e-1 non-zero bits, rotated within the
3014 * element by between 0 and e-1 bits.
3015 *
3016 * The element size and run length are encoded into immn (1 bit)
3017 * and imms (6 bits) as follows:
3018 * 64 bit elements: immn = 1, imms = <length of run - 1>
3019 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
3020 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
3021 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
3022 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
3023 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
3024 * Notice that immn = 0, imms = 11111x is the only combination
3025 * not covered by one of the above options; this is reserved.
3026 * Further, <length of run - 1> all-ones is a reserved pattern.
3027 *
3028 * In all cases the rotation is by immr % e (and immr is 6 bits).
3029 */
3030
3031 /* First determine the element size */
3032 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
3033 if (len < 1) {
3034 /* This is the immn == 0, imms == 0x11111x case */
3035 return false;
3036 }
3037 e = 1 << len;
3038
3039 levels = e - 1;
3040 s = imms & levels;
3041 r = immr & levels;
3042
3043 if (s == levels) {
3044 /* <length of run - 1> mustn't be all-ones. */
3045 return false;
3046 }
3047
3048 /* Create the value of one element: s+1 set bits rotated
3049 * by r within the element (which is e bits wide)...
3050 */
3051 mask = bitmask64(s + 1);
3052 if (r) {
3053 mask = (mask >> r) | (mask << (e - r));
3054 mask &= bitmask64(e);
3055 }
3056 /* ...then replicate the element over the whole 64 bit value */
3057 mask = bitfield_replicate(mask, e);
3058 *result = mask;
3059 return true;
3060 }
3061
3062 /* C3.4.4 Logical (immediate)
3063 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
3064 * +----+-----+-------------+---+------+------+------+------+
3065 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
3066 * +----+-----+-------------+---+------+------+------+------+
3067 */
3068 static void disas_logic_imm(DisasContext *s, uint32_t insn)
3069 {
3070 unsigned int sf, opc, is_n, immr, imms, rn, rd;
3071 TCGv_i64 tcg_rd, tcg_rn;
3072 uint64_t wmask;
3073 bool is_and = false;
3074
3075 sf = extract32(insn, 31, 1);
3076 opc = extract32(insn, 29, 2);
3077 is_n = extract32(insn, 22, 1);
3078 immr = extract32(insn, 16, 6);
3079 imms = extract32(insn, 10, 6);
3080 rn = extract32(insn, 5, 5);
3081 rd = extract32(insn, 0, 5);
3082
3083 if (!sf && is_n) {
3084 unallocated_encoding(s);
3085 return;
3086 }
3087
3088 if (opc == 0x3) { /* ANDS */
3089 tcg_rd = cpu_reg(s, rd);
3090 } else {
3091 tcg_rd = cpu_reg_sp(s, rd);
3092 }
3093 tcg_rn = cpu_reg(s, rn);
3094
3095 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
3096 /* some immediate field values are reserved */
3097 unallocated_encoding(s);
3098 return;
3099 }
3100
3101 if (!sf) {
3102 wmask &= 0xffffffff;
3103 }
3104
3105 switch (opc) {
3106 case 0x3: /* ANDS */
3107 case 0x0: /* AND */
3108 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
3109 is_and = true;
3110 break;
3111 case 0x1: /* ORR */
3112 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
3113 break;
3114 case 0x2: /* EOR */
3115 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
3116 break;
3117 default:
3118 assert(FALSE); /* must handle all above */
3119 break;
3120 }
3121
3122 if (!sf && !is_and) {
3123 /* zero extend final result; we know we can skip this for AND
3124 * since the immediate had the high 32 bits clear.
3125 */
3126 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3127 }
3128
3129 if (opc == 3) { /* ANDS */
3130 gen_logic_CC(sf, tcg_rd);
3131 }
3132 }
3133
3134 /*
3135 * C3.4.5 Move wide (immediate)
3136 *
3137 * 31 30 29 28 23 22 21 20 5 4 0
3138 * +--+-----+-------------+-----+----------------+------+
3139 * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
3140 * +--+-----+-------------+-----+----------------+------+
3141 *
3142 * sf: 0 -> 32 bit, 1 -> 64 bit
3143 * opc: 00 -> N, 10 -> Z, 11 -> K
3144 * hw: shift/16 (0,16, and sf only 32, 48)
3145 */
3146 static void disas_movw_imm(DisasContext *s, uint32_t insn)
3147 {
3148 int rd = extract32(insn, 0, 5);
3149 uint64_t imm = extract32(insn, 5, 16);
3150 int sf = extract32(insn, 31, 1);
3151 int opc = extract32(insn, 29, 2);
3152 int pos = extract32(insn, 21, 2) << 4;
3153 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3154 TCGv_i64 tcg_imm;
3155
3156 if (!sf && (pos >= 32)) {
3157 unallocated_encoding(s);
3158 return;
3159 }
3160
3161 switch (opc) {
3162 case 0: /* MOVN */
3163 case 2: /* MOVZ */
3164 imm <<= pos;
3165 if (opc == 0) {
3166 imm = ~imm;
3167 }
3168 if (!sf) {
3169 imm &= 0xffffffffu;
3170 }
3171 tcg_gen_movi_i64(tcg_rd, imm);
3172 break;
3173 case 3: /* MOVK */
3174 tcg_imm = tcg_const_i64(imm);
3175 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
3176 tcg_temp_free_i64(tcg_imm);
3177 if (!sf) {
3178 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3179 }
3180 break;
3181 default:
3182 unallocated_encoding(s);
3183 break;
3184 }
3185 }
3186
3187 /* C3.4.2 Bitfield
3188 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
3189 * +----+-----+-------------+---+------+------+------+------+
3190 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
3191 * +----+-----+-------------+---+------+------+------+------+
3192 */
3193 static void disas_bitfield(DisasContext *s, uint32_t insn)
3194 {
3195 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
3196 TCGv_i64 tcg_rd, tcg_tmp;
3197
3198 sf = extract32(insn, 31, 1);
3199 opc = extract32(insn, 29, 2);
3200 n = extract32(insn, 22, 1);
3201 ri = extract32(insn, 16, 6);
3202 si = extract32(insn, 10, 6);
3203 rn = extract32(insn, 5, 5);
3204 rd = extract32(insn, 0, 5);
3205 bitsize = sf ? 64 : 32;
3206
3207 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
3208 unallocated_encoding(s);
3209 return;
3210 }
3211
3212 tcg_rd = cpu_reg(s, rd);
3213
3214 /* Suppress the zero-extend for !sf. Since RI and SI are constrained
3215 to be smaller than bitsize, we'll never reference data outside the
3216 low 32-bits anyway. */
3217 tcg_tmp = read_cpu_reg(s, rn, 1);
3218
3219 /* Recognize the common aliases. */
3220 if (opc == 0) { /* SBFM */
3221 if (ri == 0) {
3222 if (si