4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
29 #include "qemu/bitops.h"
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
35 #include "trace-tcg.h"
39 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
40 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
41 /* currently all emulated v5 cores are also v5TE, so don't bother */
42 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
43 #define ENABLE_ARCH_5J 0
44 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
45 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
46 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
47 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
48 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
50 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
52 #include "translate.h"
54 #if defined(CONFIG_USER_ONLY)
57 #define IS_USER(s) (s->user)
61 /* We reuse the same 64-bit temporaries for efficiency. */
62 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
63 static TCGv_i32 cpu_R
[16];
64 TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
65 TCGv_i64 cpu_exclusive_addr
;
66 TCGv_i64 cpu_exclusive_val
;
67 #ifdef CONFIG_USER_ONLY
68 TCGv_i64 cpu_exclusive_test
;
69 TCGv_i32 cpu_exclusive_info
;
72 /* FIXME: These should be removed. */
73 static TCGv_i32 cpu_F0s
, cpu_F1s
;
74 static TCGv_i64 cpu_F0d
, cpu_F1d
;
76 #include "exec/gen-icount.h"
78 static const char *regnames
[] =
79 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
80 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
82 /* initialize TCG globals. */
83 void arm_translate_init(void)
87 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
89 for (i
= 0; i
< 16; i
++) {
90 cpu_R
[i
] = tcg_global_mem_new_i32(cpu_env
,
91 offsetof(CPUARMState
, regs
[i
]),
94 cpu_CF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, CF
), "CF");
95 cpu_NF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, NF
), "NF");
96 cpu_VF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, VF
), "VF");
97 cpu_ZF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, ZF
), "ZF");
99 cpu_exclusive_addr
= tcg_global_mem_new_i64(cpu_env
,
100 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
101 cpu_exclusive_val
= tcg_global_mem_new_i64(cpu_env
,
102 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
103 #ifdef CONFIG_USER_ONLY
104 cpu_exclusive_test
= tcg_global_mem_new_i64(cpu_env
,
105 offsetof(CPUARMState
, exclusive_test
), "exclusive_test");
106 cpu_exclusive_info
= tcg_global_mem_new_i32(cpu_env
,
107 offsetof(CPUARMState
, exclusive_info
), "exclusive_info");
110 a64_translate_init();
113 static inline ARMMMUIdx
get_a32_user_mem_index(DisasContext
*s
)
115 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
117 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
118 * otherwise, access as if at PL0.
120 switch (s
->mmu_idx
) {
121 case ARMMMUIdx_S1E2
: /* this one is UNPREDICTABLE */
122 case ARMMMUIdx_S12NSE0
:
123 case ARMMMUIdx_S12NSE1
:
124 return ARMMMUIdx_S12NSE0
;
126 case ARMMMUIdx_S1SE0
:
127 case ARMMMUIdx_S1SE1
:
128 return ARMMMUIdx_S1SE0
;
131 g_assert_not_reached();
135 static inline TCGv_i32
load_cpu_offset(int offset
)
137 TCGv_i32 tmp
= tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
144 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
146 tcg_gen_st_i32(var
, cpu_env
, offset
);
147 tcg_temp_free_i32(var
);
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUARMState, name))
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
158 /* normally, since we updated PC, we need only to add one insn */
160 addr
= (long)s
->pc
+ 2;
162 addr
= (long)s
->pc
+ 4;
163 tcg_gen_movi_i32(var
, addr
);
165 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
172 TCGv_i32 tmp
= tcg_temp_new_i32();
173 load_reg_var(s
, tmp
, reg
);
177 /* Set a CPU register. The source must be a temporary and will be
179 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
182 tcg_gen_andi_i32(var
, var
, ~1);
183 s
->is_jmp
= DISAS_JUMP
;
185 tcg_gen_mov_i32(cpu_R
[reg
], var
);
186 tcg_temp_free_i32(var
);
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
199 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
201 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
202 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
203 tcg_temp_free_i32(tmp_mask
);
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
208 static void gen_exception_internal(int excp
)
210 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
212 assert(excp_is_internal(excp
));
213 gen_helper_exception_internal(cpu_env
, tcg_excp
);
214 tcg_temp_free_i32(tcg_excp
);
217 static void gen_exception(int excp
, uint32_t syndrome
, uint32_t target_el
)
219 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
220 TCGv_i32 tcg_syn
= tcg_const_i32(syndrome
);
221 TCGv_i32 tcg_el
= tcg_const_i32(target_el
);
223 gen_helper_exception_with_syndrome(cpu_env
, tcg_excp
,
226 tcg_temp_free_i32(tcg_el
);
227 tcg_temp_free_i32(tcg_syn
);
228 tcg_temp_free_i32(tcg_excp
);
231 static void gen_ss_advance(DisasContext
*s
)
233 /* If the singlestep state is Active-not-pending, advance to
238 gen_helper_clear_pstate_ss(cpu_env
);
242 static void gen_step_complete_exception(DisasContext
*s
)
244 /* We just completed step of an insn. Move from Active-not-pending
245 * to Active-pending, and then also take the swstep exception.
246 * This corresponds to making the (IMPDEF) choice to prioritize
247 * swstep exceptions over asynchronous exceptions taken to an exception
248 * level where debug is disabled. This choice has the advantage that
249 * we do not need to maintain internal state corresponding to the
250 * ISV/EX syndrome bits between completion of the step and generation
251 * of the exception, and our syndrome information is always correct.
254 gen_exception(EXCP_UDEF
, syn_swstep(s
->ss_same_el
, 1, s
->is_ldex
),
255 default_exception_el(s
));
256 s
->is_jmp
= DISAS_EXC
;
259 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
261 TCGv_i32 tmp1
= tcg_temp_new_i32();
262 TCGv_i32 tmp2
= tcg_temp_new_i32();
263 tcg_gen_ext16s_i32(tmp1
, a
);
264 tcg_gen_ext16s_i32(tmp2
, b
);
265 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
266 tcg_temp_free_i32(tmp2
);
267 tcg_gen_sari_i32(a
, a
, 16);
268 tcg_gen_sari_i32(b
, b
, 16);
269 tcg_gen_mul_i32(b
, b
, a
);
270 tcg_gen_mov_i32(a
, tmp1
);
271 tcg_temp_free_i32(tmp1
);
274 /* Byteswap each halfword. */
275 static void gen_rev16(TCGv_i32 var
)
277 TCGv_i32 tmp
= tcg_temp_new_i32();
278 tcg_gen_shri_i32(tmp
, var
, 8);
279 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
280 tcg_gen_shli_i32(var
, var
, 8);
281 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
282 tcg_gen_or_i32(var
, var
, tmp
);
283 tcg_temp_free_i32(tmp
);
286 /* Byteswap low halfword and sign extend. */
287 static void gen_revsh(TCGv_i32 var
)
289 tcg_gen_ext16u_i32(var
, var
);
290 tcg_gen_bswap16_i32(var
, var
);
291 tcg_gen_ext16s_i32(var
, var
);
294 /* Unsigned bitfield extract. */
295 static void gen_ubfx(TCGv_i32 var
, int shift
, uint32_t mask
)
298 tcg_gen_shri_i32(var
, var
, shift
);
299 tcg_gen_andi_i32(var
, var
, mask
);
302 /* Signed bitfield extract. */
303 static void gen_sbfx(TCGv_i32 var
, int shift
, int width
)
308 tcg_gen_sari_i32(var
, var
, shift
);
309 if (shift
+ width
< 32) {
310 signbit
= 1u << (width
- 1);
311 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
312 tcg_gen_xori_i32(var
, var
, signbit
);
313 tcg_gen_subi_i32(var
, var
, signbit
);
317 /* Return (b << 32) + a. Mark inputs as dead */
318 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv_i32 b
)
320 TCGv_i64 tmp64
= tcg_temp_new_i64();
322 tcg_gen_extu_i32_i64(tmp64
, b
);
323 tcg_temp_free_i32(b
);
324 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
325 tcg_gen_add_i64(a
, tmp64
, a
);
327 tcg_temp_free_i64(tmp64
);
331 /* Return (b << 32) - a. Mark inputs as dead. */
332 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv_i32 b
)
334 TCGv_i64 tmp64
= tcg_temp_new_i64();
336 tcg_gen_extu_i32_i64(tmp64
, b
);
337 tcg_temp_free_i32(b
);
338 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
339 tcg_gen_sub_i64(a
, tmp64
, a
);
341 tcg_temp_free_i64(tmp64
);
345 /* 32x32->64 multiply. Marks inputs as dead. */
346 static TCGv_i64
gen_mulu_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
348 TCGv_i32 lo
= tcg_temp_new_i32();
349 TCGv_i32 hi
= tcg_temp_new_i32();
352 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
353 tcg_temp_free_i32(a
);
354 tcg_temp_free_i32(b
);
356 ret
= tcg_temp_new_i64();
357 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
358 tcg_temp_free_i32(lo
);
359 tcg_temp_free_i32(hi
);
364 static TCGv_i64
gen_muls_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
366 TCGv_i32 lo
= tcg_temp_new_i32();
367 TCGv_i32 hi
= tcg_temp_new_i32();
370 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
371 tcg_temp_free_i32(a
);
372 tcg_temp_free_i32(b
);
374 ret
= tcg_temp_new_i64();
375 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
376 tcg_temp_free_i32(lo
);
377 tcg_temp_free_i32(hi
);
382 /* Swap low and high halfwords. */
383 static void gen_swap_half(TCGv_i32 var
)
385 TCGv_i32 tmp
= tcg_temp_new_i32();
386 tcg_gen_shri_i32(tmp
, var
, 16);
387 tcg_gen_shli_i32(var
, var
, 16);
388 tcg_gen_or_i32(var
, var
, tmp
);
389 tcg_temp_free_i32(tmp
);
392 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
393 tmp = (t0 ^ t1) & 0x8000;
396 t0 = (t0 + t1) ^ tmp;
399 static void gen_add16(TCGv_i32 t0
, TCGv_i32 t1
)
401 TCGv_i32 tmp
= tcg_temp_new_i32();
402 tcg_gen_xor_i32(tmp
, t0
, t1
);
403 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
404 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
405 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
406 tcg_gen_add_i32(t0
, t0
, t1
);
407 tcg_gen_xor_i32(t0
, t0
, tmp
);
408 tcg_temp_free_i32(tmp
);
409 tcg_temp_free_i32(t1
);
412 /* Set CF to the top bit of var. */
413 static void gen_set_CF_bit31(TCGv_i32 var
)
415 tcg_gen_shri_i32(cpu_CF
, var
, 31);
418 /* Set N and Z flags from var. */
419 static inline void gen_logic_CC(TCGv_i32 var
)
421 tcg_gen_mov_i32(cpu_NF
, var
);
422 tcg_gen_mov_i32(cpu_ZF
, var
);
426 static void gen_adc(TCGv_i32 t0
, TCGv_i32 t1
)
428 tcg_gen_add_i32(t0
, t0
, t1
);
429 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
432 /* dest = T0 + T1 + CF. */
433 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
435 tcg_gen_add_i32(dest
, t0
, t1
);
436 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
439 /* dest = T0 - T1 + CF - 1. */
440 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
442 tcg_gen_sub_i32(dest
, t0
, t1
);
443 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
444 tcg_gen_subi_i32(dest
, dest
, 1);
447 /* dest = T0 + T1. Compute C, N, V and Z flags */
448 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
450 TCGv_i32 tmp
= tcg_temp_new_i32();
451 tcg_gen_movi_i32(tmp
, 0);
452 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
453 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
454 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
455 tcg_gen_xor_i32(tmp
, t0
, t1
);
456 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
457 tcg_temp_free_i32(tmp
);
458 tcg_gen_mov_i32(dest
, cpu_NF
);
461 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
462 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
464 TCGv_i32 tmp
= tcg_temp_new_i32();
465 if (TCG_TARGET_HAS_add2_i32
) {
466 tcg_gen_movi_i32(tmp
, 0);
467 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
468 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
470 TCGv_i64 q0
= tcg_temp_new_i64();
471 TCGv_i64 q1
= tcg_temp_new_i64();
472 tcg_gen_extu_i32_i64(q0
, t0
);
473 tcg_gen_extu_i32_i64(q1
, t1
);
474 tcg_gen_add_i64(q0
, q0
, q1
);
475 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
476 tcg_gen_add_i64(q0
, q0
, q1
);
477 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
478 tcg_temp_free_i64(q0
);
479 tcg_temp_free_i64(q1
);
481 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
482 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
483 tcg_gen_xor_i32(tmp
, t0
, t1
);
484 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
485 tcg_temp_free_i32(tmp
);
486 tcg_gen_mov_i32(dest
, cpu_NF
);
489 /* dest = T0 - T1. Compute C, N, V and Z flags */
490 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
493 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
494 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
495 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
496 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
497 tmp
= tcg_temp_new_i32();
498 tcg_gen_xor_i32(tmp
, t0
, t1
);
499 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
500 tcg_temp_free_i32(tmp
);
501 tcg_gen_mov_i32(dest
, cpu_NF
);
504 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
505 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
507 TCGv_i32 tmp
= tcg_temp_new_i32();
508 tcg_gen_not_i32(tmp
, t1
);
509 gen_adc_CC(dest
, t0
, tmp
);
510 tcg_temp_free_i32(tmp
);
513 #define GEN_SHIFT(name) \
514 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
516 TCGv_i32 tmp1, tmp2, tmp3; \
517 tmp1 = tcg_temp_new_i32(); \
518 tcg_gen_andi_i32(tmp1, t1, 0xff); \
519 tmp2 = tcg_const_i32(0); \
520 tmp3 = tcg_const_i32(0x1f); \
521 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
522 tcg_temp_free_i32(tmp3); \
523 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
524 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
525 tcg_temp_free_i32(tmp2); \
526 tcg_temp_free_i32(tmp1); \
532 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
535 tmp1
= tcg_temp_new_i32();
536 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
537 tmp2
= tcg_const_i32(0x1f);
538 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
539 tcg_temp_free_i32(tmp2
);
540 tcg_gen_sar_i32(dest
, t0
, tmp1
);
541 tcg_temp_free_i32(tmp1
);
544 static void tcg_gen_abs_i32(TCGv_i32 dest
, TCGv_i32 src
)
546 TCGv_i32 c0
= tcg_const_i32(0);
547 TCGv_i32 tmp
= tcg_temp_new_i32();
548 tcg_gen_neg_i32(tmp
, src
);
549 tcg_gen_movcond_i32(TCG_COND_GT
, dest
, src
, c0
, src
, tmp
);
550 tcg_temp_free_i32(c0
);
551 tcg_temp_free_i32(tmp
);
554 static void shifter_out_im(TCGv_i32 var
, int shift
)
557 tcg_gen_andi_i32(cpu_CF
, var
, 1);
559 tcg_gen_shri_i32(cpu_CF
, var
, shift
);
561 tcg_gen_andi_i32(cpu_CF
, cpu_CF
, 1);
566 /* Shift by immediate. Includes special handling for shift == 0. */
567 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
568 int shift
, int flags
)
574 shifter_out_im(var
, 32 - shift
);
575 tcg_gen_shli_i32(var
, var
, shift
);
581 tcg_gen_shri_i32(cpu_CF
, var
, 31);
583 tcg_gen_movi_i32(var
, 0);
586 shifter_out_im(var
, shift
- 1);
587 tcg_gen_shri_i32(var
, var
, shift
);
594 shifter_out_im(var
, shift
- 1);
597 tcg_gen_sari_i32(var
, var
, shift
);
599 case 3: /* ROR/RRX */
602 shifter_out_im(var
, shift
- 1);
603 tcg_gen_rotri_i32(var
, var
, shift
); break;
605 TCGv_i32 tmp
= tcg_temp_new_i32();
606 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
608 shifter_out_im(var
, 0);
609 tcg_gen_shri_i32(var
, var
, 1);
610 tcg_gen_or_i32(var
, var
, tmp
);
611 tcg_temp_free_i32(tmp
);
616 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
617 TCGv_i32 shift
, int flags
)
621 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
622 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
623 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
624 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
629 gen_shl(var
, var
, shift
);
632 gen_shr(var
, var
, shift
);
635 gen_sar(var
, var
, shift
);
637 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
638 tcg_gen_rotr_i32(var
, var
, shift
); break;
641 tcg_temp_free_i32(shift
);
644 #define PAS_OP(pfx) \
646 case 0: gen_pas_helper(glue(pfx,add16)); break; \
647 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
648 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
649 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
650 case 4: gen_pas_helper(glue(pfx,add8)); break; \
651 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
653 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
658 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
660 tmp
= tcg_temp_new_ptr();
661 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
663 tcg_temp_free_ptr(tmp
);
666 tmp
= tcg_temp_new_ptr();
667 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
669 tcg_temp_free_ptr(tmp
);
671 #undef gen_pas_helper
672 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
685 #undef gen_pas_helper
690 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
691 #define PAS_OP(pfx) \
693 case 0: gen_pas_helper(glue(pfx,add8)); break; \
694 case 1: gen_pas_helper(glue(pfx,add16)); break; \
695 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
696 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
697 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
698 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
700 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
705 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
707 tmp
= tcg_temp_new_ptr();
708 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
710 tcg_temp_free_ptr(tmp
);
713 tmp
= tcg_temp_new_ptr();
714 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
716 tcg_temp_free_ptr(tmp
);
718 #undef gen_pas_helper
719 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
732 #undef gen_pas_helper
738 * Generate a conditional based on ARM condition code cc.
739 * This is common between ARM and Aarch64 targets.
741 void arm_test_cc(DisasCompare
*cmp
, int cc
)
772 case 8: /* hi: C && !Z */
773 case 9: /* ls: !C || Z -> !(C && !Z) */
775 value
= tcg_temp_new_i32();
777 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
778 ZF is non-zero for !Z; so AND the two subexpressions. */
779 tcg_gen_neg_i32(value
, cpu_CF
);
780 tcg_gen_and_i32(value
, value
, cpu_ZF
);
783 case 10: /* ge: N == V -> N ^ V == 0 */
784 case 11: /* lt: N != V -> N ^ V != 0 */
785 /* Since we're only interested in the sign bit, == 0 is >= 0. */
787 value
= tcg_temp_new_i32();
789 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
792 case 12: /* gt: !Z && N == V */
793 case 13: /* le: Z || N != V */
795 value
= tcg_temp_new_i32();
797 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
798 * the sign bit then AND with ZF to yield the result. */
799 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
800 tcg_gen_sari_i32(value
, value
, 31);
801 tcg_gen_andc_i32(value
, cpu_ZF
, value
);
804 case 14: /* always */
805 case 15: /* always */
806 /* Use the ALWAYS condition, which will fold early.
807 * It doesn't matter what we use for the value. */
808 cond
= TCG_COND_ALWAYS
;
813 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
818 cond
= tcg_invert_cond(cond
);
824 cmp
->value_global
= global
;
827 void arm_free_cc(DisasCompare
*cmp
)
829 if (!cmp
->value_global
) {
830 tcg_temp_free_i32(cmp
->value
);
834 void arm_jump_cc(DisasCompare
*cmp
, TCGLabel
*label
)
836 tcg_gen_brcondi_i32(cmp
->cond
, cmp
->value
, 0, label
);
839 void arm_gen_test_cc(int cc
, TCGLabel
*label
)
842 arm_test_cc(&cmp
, cc
);
843 arm_jump_cc(&cmp
, label
);
847 static const uint8_t table_logic_cc
[16] = {
866 /* Set PC and Thumb state from an immediate address. */
867 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
871 s
->is_jmp
= DISAS_JUMP
;
872 if (s
->thumb
!= (addr
& 1)) {
873 tmp
= tcg_temp_new_i32();
874 tcg_gen_movi_i32(tmp
, addr
& 1);
875 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
876 tcg_temp_free_i32(tmp
);
878 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
881 /* Set PC and Thumb state from var. var is marked as dead. */
882 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
884 s
->is_jmp
= DISAS_JUMP
;
885 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
886 tcg_gen_andi_i32(var
, var
, 1);
887 store_cpu_field(var
, thumb
);
890 /* Variant of store_reg which uses branch&exchange logic when storing
891 to r15 in ARM architecture v7 and above. The source must be a temporary
892 and will be marked as dead. */
893 static inline void store_reg_bx(DisasContext
*s
, int reg
, TCGv_i32 var
)
895 if (reg
== 15 && ENABLE_ARCH_7
) {
898 store_reg(s
, reg
, var
);
902 /* Variant of store_reg which uses branch&exchange logic when storing
903 * to r15 in ARM architecture v5T and above. This is used for storing
904 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
905 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
906 static inline void store_reg_from_load(DisasContext
*s
, int reg
, TCGv_i32 var
)
908 if (reg
== 15 && ENABLE_ARCH_5
) {
911 store_reg(s
, reg
, var
);
915 #ifdef CONFIG_USER_ONLY
916 #define IS_USER_ONLY 1
918 #define IS_USER_ONLY 0
921 /* Abstractions of "generate code to do a guest load/store for
922 * AArch32", where a vaddr is always 32 bits (and is zero
923 * extended if we're a 64 bit core) and data is also
924 * 32 bits unless specifically doing a 64 bit access.
925 * These functions work like tcg_gen_qemu_{ld,st}* except
926 * that the address argument is TCGv_i32 rather than TCGv.
928 #if TARGET_LONG_BITS == 32
930 #define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
931 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
932 TCGv_i32 addr, int index) \
934 TCGMemOp opc = (OPC) | s->be_data; \
935 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
936 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
937 TCGv addr_be = tcg_temp_new(); \
938 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
939 tcg_gen_qemu_ld_i32(val, addr_be, index, opc); \
940 tcg_temp_free(addr_be); \
943 tcg_gen_qemu_ld_i32(val, addr, index, opc); \
946 #define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
947 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
948 TCGv_i32 addr, int index) \
950 TCGMemOp opc = (OPC) | s->be_data; \
951 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
952 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
953 TCGv addr_be = tcg_temp_new(); \
954 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
955 tcg_gen_qemu_st_i32(val, addr_be, index, opc); \
956 tcg_temp_free(addr_be); \
959 tcg_gen_qemu_st_i32(val, addr, index, opc); \
962 static inline void gen_aa32_ld64(DisasContext
*s
, TCGv_i64 val
,
963 TCGv_i32 addr
, int index
)
965 TCGMemOp opc
= MO_Q
| s
->be_data
;
966 tcg_gen_qemu_ld_i64(val
, addr
, index
, opc
);
967 /* Not needed for user-mode BE32, where we use MO_BE instead. */
968 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
969 tcg_gen_rotri_i64(val
, val
, 32);
973 static inline void gen_aa32_st64(DisasContext
*s
, TCGv_i64 val
,
974 TCGv_i32 addr
, int index
)
976 TCGMemOp opc
= MO_Q
| s
->be_data
;
977 /* Not needed for user-mode BE32, where we use MO_BE instead. */
978 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
979 TCGv_i64 tmp
= tcg_temp_new_i64();
980 tcg_gen_rotri_i64(tmp
, val
, 32);
981 tcg_gen_qemu_st_i64(tmp
, addr
, index
, opc
);
982 tcg_temp_free_i64(tmp
);
985 tcg_gen_qemu_st_i64(val
, addr
, index
, opc
);
990 #define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
991 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
992 TCGv_i32 addr, int index) \
994 TCGMemOp opc = (OPC) | s->be_data; \
995 TCGv addr64 = tcg_temp_new(); \
996 tcg_gen_extu_i32_i64(addr64, addr); \
997 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
998 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
999 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1001 tcg_gen_qemu_ld_i32(val, addr64, index, opc); \
1002 tcg_temp_free(addr64); \
1005 #define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
1006 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1007 TCGv_i32 addr, int index) \
1009 TCGMemOp opc = (OPC) | s->be_data; \
1010 TCGv addr64 = tcg_temp_new(); \
1011 tcg_gen_extu_i32_i64(addr64, addr); \
1012 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
1013 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
1014 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1016 tcg_gen_qemu_st_i32(val, addr64, index, opc); \
1017 tcg_temp_free(addr64); \
1020 static inline void gen_aa32_ld64(DisasContext
*s
, TCGv_i64 val
,
1021 TCGv_i32 addr
, int index
)
1023 TCGMemOp opc
= MO_Q
| s
->be_data
;
1024 TCGv addr64
= tcg_temp_new();
1025 tcg_gen_extu_i32_i64(addr64
, addr
);
1026 tcg_gen_qemu_ld_i64(val
, addr64
, index
, opc
);
1028 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1029 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
1030 tcg_gen_rotri_i64(val
, val
, 32);
1032 tcg_temp_free(addr64
);
1035 static inline void gen_aa32_st64(DisasContext
*s
, TCGv_i64 val
,
1036 TCGv_i32 addr
, int index
)
1038 TCGMemOp opc
= MO_Q
| s
->be_data
;
1039 TCGv addr64
= tcg_temp_new();
1040 tcg_gen_extu_i32_i64(addr64
, addr
);
1042 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1043 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
1044 TCGv tmp
= tcg_temp_new();
1045 tcg_gen_rotri_i64(tmp
, val
, 32);
1046 tcg_gen_qemu_st_i64(tmp
, addr64
, index
, opc
);
1049 tcg_gen_qemu_st_i64(val
, addr64
, index
, opc
);
1051 tcg_temp_free(addr64
);
1056 DO_GEN_LD(8s
, MO_SB
, 3)
1057 DO_GEN_LD(8u, MO_UB
, 3)
1058 DO_GEN_LD(16s
, MO_SW
, 2)
1059 DO_GEN_LD(16u, MO_UW
, 2)
1060 DO_GEN_LD(32u, MO_UL
, 0)
1061 /* 'a' variants include an alignment check */
1062 DO_GEN_LD(16ua
, MO_UW
| MO_ALIGN
, 2)
1063 DO_GEN_LD(32ua
, MO_UL
| MO_ALIGN
, 0)
1064 DO_GEN_ST(8, MO_UB
, 3)
1065 DO_GEN_ST(16, MO_UW
, 2)
1066 DO_GEN_ST(32, MO_UL
, 0)
1068 static inline void gen_set_pc_im(DisasContext
*s
, target_ulong val
)
1070 tcg_gen_movi_i32(cpu_R
[15], val
);
1073 static inline void gen_hvc(DisasContext
*s
, int imm16
)
1075 /* The pre HVC helper handles cases when HVC gets trapped
1076 * as an undefined insn by runtime configuration (ie before
1077 * the insn really executes).
1079 gen_set_pc_im(s
, s
->pc
- 4);
1080 gen_helper_pre_hvc(cpu_env
);
1081 /* Otherwise we will treat this as a real exception which
1082 * happens after execution of the insn. (The distinction matters
1083 * for the PC value reported to the exception handler and also
1084 * for single stepping.)
1087 gen_set_pc_im(s
, s
->pc
);
1088 s
->is_jmp
= DISAS_HVC
;
1091 static inline void gen_smc(DisasContext
*s
)
1093 /* As with HVC, we may take an exception either before or after
1094 * the insn executes.
1098 gen_set_pc_im(s
, s
->pc
- 4);
1099 tmp
= tcg_const_i32(syn_aa32_smc());
1100 gen_helper_pre_smc(cpu_env
, tmp
);
1101 tcg_temp_free_i32(tmp
);
1102 gen_set_pc_im(s
, s
->pc
);
1103 s
->is_jmp
= DISAS_SMC
;
1107 gen_set_condexec (DisasContext
*s
)
1109 if (s
->condexec_mask
) {
1110 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
1111 TCGv_i32 tmp
= tcg_temp_new_i32();
1112 tcg_gen_movi_i32(tmp
, val
);
1113 store_cpu_field(tmp
, condexec_bits
);
1117 static void gen_exception_internal_insn(DisasContext
*s
, int offset
, int excp
)
1119 gen_set_condexec(s
);
1120 gen_set_pc_im(s
, s
->pc
- offset
);
1121 gen_exception_internal(excp
);
1122 s
->is_jmp
= DISAS_JUMP
;
1125 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
,
1126 int syn
, uint32_t target_el
)
1128 gen_set_condexec(s
);
1129 gen_set_pc_im(s
, s
->pc
- offset
);
1130 gen_exception(excp
, syn
, target_el
);
1131 s
->is_jmp
= DISAS_JUMP
;
1134 /* Force a TB lookup after an instruction that changes the CPU state. */
1135 static inline void gen_lookup_tb(DisasContext
*s
)
1137 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
1138 s
->is_jmp
= DISAS_JUMP
;
1141 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
1144 int val
, rm
, shift
, shiftop
;
1147 if (!(insn
& (1 << 25))) {
1150 if (!(insn
& (1 << 23)))
1153 tcg_gen_addi_i32(var
, var
, val
);
1155 /* shift/register */
1157 shift
= (insn
>> 7) & 0x1f;
1158 shiftop
= (insn
>> 5) & 3;
1159 offset
= load_reg(s
, rm
);
1160 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
1161 if (!(insn
& (1 << 23)))
1162 tcg_gen_sub_i32(var
, var
, offset
);
1164 tcg_gen_add_i32(var
, var
, offset
);
1165 tcg_temp_free_i32(offset
);
1169 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
1170 int extra
, TCGv_i32 var
)
1175 if (insn
& (1 << 22)) {
1177 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
1178 if (!(insn
& (1 << 23)))
1182 tcg_gen_addi_i32(var
, var
, val
);
1186 tcg_gen_addi_i32(var
, var
, extra
);
1188 offset
= load_reg(s
, rm
);
1189 if (!(insn
& (1 << 23)))
1190 tcg_gen_sub_i32(var
, var
, offset
);
1192 tcg_gen_add_i32(var
, var
, offset
);
1193 tcg_temp_free_i32(offset
);
1197 static TCGv_ptr
get_fpstatus_ptr(int neon
)
1199 TCGv_ptr statusptr
= tcg_temp_new_ptr();
1202 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
1204 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
1206 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
1210 #define VFP_OP2(name) \
1211 static inline void gen_vfp_##name(int dp) \
1213 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1215 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1217 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1219 tcg_temp_free_ptr(fpst); \
1229 static inline void gen_vfp_F1_mul(int dp
)
1231 /* Like gen_vfp_mul() but put result in F1 */
1232 TCGv_ptr fpst
= get_fpstatus_ptr(0);
1234 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
1236 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
1238 tcg_temp_free_ptr(fpst
);
1241 static inline void gen_vfp_F1_neg(int dp
)
1243 /* Like gen_vfp_neg() but put result in F1 */
1245 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
1247 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
1251 static inline void gen_vfp_abs(int dp
)
1254 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
1256 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
1259 static inline void gen_vfp_neg(int dp
)
1262 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
1264 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
1267 static inline void gen_vfp_sqrt(int dp
)
1270 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
1272 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
1275 static inline void gen_vfp_cmp(int dp
)
1278 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1280 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1283 static inline void gen_vfp_cmpe(int dp
)
1286 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1288 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1291 static inline void gen_vfp_F1_ld0(int dp
)
1294 tcg_gen_movi_i64(cpu_F1d
, 0);
1296 tcg_gen_movi_i32(cpu_F1s
, 0);
1299 #define VFP_GEN_ITOF(name) \
1300 static inline void gen_vfp_##name(int dp, int neon) \
1302 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1304 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1306 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1308 tcg_temp_free_ptr(statusptr); \
1315 #define VFP_GEN_FTOI(name) \
1316 static inline void gen_vfp_##name(int dp, int neon) \
1318 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1320 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1322 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1324 tcg_temp_free_ptr(statusptr); \
1333 #define VFP_GEN_FIX(name, round) \
1334 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1336 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1337 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1339 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1342 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1345 tcg_temp_free_i32(tmp_shift); \
1346 tcg_temp_free_ptr(statusptr); \
1348 VFP_GEN_FIX(tosh
, _round_to_zero
)
1349 VFP_GEN_FIX(tosl
, _round_to_zero
)
1350 VFP_GEN_FIX(touh
, _round_to_zero
)
1351 VFP_GEN_FIX(toul
, _round_to_zero
)
1358 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1361 gen_aa32_ld64(s
, cpu_F0d
, addr
, get_mem_index(s
));
1363 gen_aa32_ld32u(s
, cpu_F0s
, addr
, get_mem_index(s
));
1367 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1370 gen_aa32_st64(s
, cpu_F0d
, addr
, get_mem_index(s
));
1372 gen_aa32_st32(s
, cpu_F0s
, addr
, get_mem_index(s
));
1377 vfp_reg_offset (int dp
, int reg
)
1380 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1382 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1383 + offsetof(CPU_DoubleU
, l
.upper
);
1385 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1386 + offsetof(CPU_DoubleU
, l
.lower
);
1390 /* Return the offset of a 32-bit piece of a NEON register.
1391 zero is the least significant end of the register. */
1393 neon_reg_offset (int reg
, int n
)
1397 return vfp_reg_offset(0, sreg
);
1400 static TCGv_i32
neon_load_reg(int reg
, int pass
)
1402 TCGv_i32 tmp
= tcg_temp_new_i32();
1403 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1407 static void neon_store_reg(int reg
, int pass
, TCGv_i32 var
)
1409 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1410 tcg_temp_free_i32(var
);
1413 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1415 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1418 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1420 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1423 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1424 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1425 #define tcg_gen_st_f32 tcg_gen_st_i32
1426 #define tcg_gen_st_f64 tcg_gen_st_i64
1428 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1431 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1433 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1436 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1439 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1441 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1444 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1447 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1449 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1452 #define ARM_CP_RW_BIT (1 << 20)
1454 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1456 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1459 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1461 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1464 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1466 TCGv_i32 var
= tcg_temp_new_i32();
1467 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1471 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1473 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1474 tcg_temp_free_i32(var
);
1477 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1479 iwmmxt_store_reg(cpu_M0
, rn
);
1482 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1484 iwmmxt_load_reg(cpu_M0
, rn
);
1487 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1489 iwmmxt_load_reg(cpu_V1
, rn
);
1490 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1493 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1495 iwmmxt_load_reg(cpu_V1
, rn
);
1496 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1499 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1501 iwmmxt_load_reg(cpu_V1
, rn
);
1502 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1505 #define IWMMXT_OP(name) \
1506 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1508 iwmmxt_load_reg(cpu_V1, rn); \
1509 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1512 #define IWMMXT_OP_ENV(name) \
1513 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1515 iwmmxt_load_reg(cpu_V1, rn); \
1516 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1519 #define IWMMXT_OP_ENV_SIZE(name) \
1520 IWMMXT_OP_ENV(name##b) \
1521 IWMMXT_OP_ENV(name##w) \
1522 IWMMXT_OP_ENV(name##l)
1524 #define IWMMXT_OP_ENV1(name) \
1525 static inline void gen_op_iwmmxt_##name##_M0(void) \
1527 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1541 IWMMXT_OP_ENV_SIZE(unpackl
)
1542 IWMMXT_OP_ENV_SIZE(unpackh
)
1544 IWMMXT_OP_ENV1(unpacklub
)
1545 IWMMXT_OP_ENV1(unpackluw
)
1546 IWMMXT_OP_ENV1(unpacklul
)
1547 IWMMXT_OP_ENV1(unpackhub
)
1548 IWMMXT_OP_ENV1(unpackhuw
)
1549 IWMMXT_OP_ENV1(unpackhul
)
1550 IWMMXT_OP_ENV1(unpacklsb
)
1551 IWMMXT_OP_ENV1(unpacklsw
)
1552 IWMMXT_OP_ENV1(unpacklsl
)
1553 IWMMXT_OP_ENV1(unpackhsb
)
1554 IWMMXT_OP_ENV1(unpackhsw
)
1555 IWMMXT_OP_ENV1(unpackhsl
)
1557 IWMMXT_OP_ENV_SIZE(cmpeq
)
1558 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1559 IWMMXT_OP_ENV_SIZE(cmpgts
)
1561 IWMMXT_OP_ENV_SIZE(mins
)
1562 IWMMXT_OP_ENV_SIZE(minu
)
1563 IWMMXT_OP_ENV_SIZE(maxs
)
1564 IWMMXT_OP_ENV_SIZE(maxu
)
1566 IWMMXT_OP_ENV_SIZE(subn
)
1567 IWMMXT_OP_ENV_SIZE(addn
)
1568 IWMMXT_OP_ENV_SIZE(subu
)
1569 IWMMXT_OP_ENV_SIZE(addu
)
1570 IWMMXT_OP_ENV_SIZE(subs
)
1571 IWMMXT_OP_ENV_SIZE(adds
)
1573 IWMMXT_OP_ENV(avgb0
)
1574 IWMMXT_OP_ENV(avgb1
)
1575 IWMMXT_OP_ENV(avgw0
)
1576 IWMMXT_OP_ENV(avgw1
)
1578 IWMMXT_OP_ENV(packuw
)
1579 IWMMXT_OP_ENV(packul
)
1580 IWMMXT_OP_ENV(packuq
)
1581 IWMMXT_OP_ENV(packsw
)
1582 IWMMXT_OP_ENV(packsl
)
1583 IWMMXT_OP_ENV(packsq
)
1585 static void gen_op_iwmmxt_set_mup(void)
1588 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1589 tcg_gen_ori_i32(tmp
, tmp
, 2);
1590 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1593 static void gen_op_iwmmxt_set_cup(void)
1596 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1597 tcg_gen_ori_i32(tmp
, tmp
, 1);
1598 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1601 static void gen_op_iwmmxt_setpsr_nz(void)
1603 TCGv_i32 tmp
= tcg_temp_new_i32();
1604 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1605 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1608 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1610 iwmmxt_load_reg(cpu_V1
, rn
);
1611 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1612 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1615 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1622 rd
= (insn
>> 16) & 0xf;
1623 tmp
= load_reg(s
, rd
);
1625 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1626 if (insn
& (1 << 24)) {
1628 if (insn
& (1 << 23))
1629 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1631 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1632 tcg_gen_mov_i32(dest
, tmp
);
1633 if (insn
& (1 << 21))
1634 store_reg(s
, rd
, tmp
);
1636 tcg_temp_free_i32(tmp
);
1637 } else if (insn
& (1 << 21)) {
1639 tcg_gen_mov_i32(dest
, tmp
);
1640 if (insn
& (1 << 23))
1641 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1643 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1644 store_reg(s
, rd
, tmp
);
1645 } else if (!(insn
& (1 << 23)))
1650 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1652 int rd
= (insn
>> 0) & 0xf;
1655 if (insn
& (1 << 8)) {
1656 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1659 tmp
= iwmmxt_load_creg(rd
);
1662 tmp
= tcg_temp_new_i32();
1663 iwmmxt_load_reg(cpu_V0
, rd
);
1664 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
1666 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1667 tcg_gen_mov_i32(dest
, tmp
);
1668 tcg_temp_free_i32(tmp
);
1672 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1673 (ie. an undefined instruction). */
1674 static int disas_iwmmxt_insn(DisasContext
*s
, uint32_t insn
)
1677 int rdhi
, rdlo
, rd0
, rd1
, i
;
1679 TCGv_i32 tmp
, tmp2
, tmp3
;
1681 if ((insn
& 0x0e000e00) == 0x0c000000) {
1682 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1684 rdlo
= (insn
>> 12) & 0xf;
1685 rdhi
= (insn
>> 16) & 0xf;
1686 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1687 iwmmxt_load_reg(cpu_V0
, wrd
);
1688 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1689 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1690 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1691 } else { /* TMCRR */
1692 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1693 iwmmxt_store_reg(cpu_V0
, wrd
);
1694 gen_op_iwmmxt_set_mup();
1699 wrd
= (insn
>> 12) & 0xf;
1700 addr
= tcg_temp_new_i32();
1701 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1702 tcg_temp_free_i32(addr
);
1705 if (insn
& ARM_CP_RW_BIT
) {
1706 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1707 tmp
= tcg_temp_new_i32();
1708 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1709 iwmmxt_store_creg(wrd
, tmp
);
1712 if (insn
& (1 << 8)) {
1713 if (insn
& (1 << 22)) { /* WLDRD */
1714 gen_aa32_ld64(s
, cpu_M0
, addr
, get_mem_index(s
));
1716 } else { /* WLDRW wRd */
1717 tmp
= tcg_temp_new_i32();
1718 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1721 tmp
= tcg_temp_new_i32();
1722 if (insn
& (1 << 22)) { /* WLDRH */
1723 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
1724 } else { /* WLDRB */
1725 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
1729 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1730 tcg_temp_free_i32(tmp
);
1732 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1735 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1736 tmp
= iwmmxt_load_creg(wrd
);
1737 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1739 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1740 tmp
= tcg_temp_new_i32();
1741 if (insn
& (1 << 8)) {
1742 if (insn
& (1 << 22)) { /* WSTRD */
1743 gen_aa32_st64(s
, cpu_M0
, addr
, get_mem_index(s
));
1744 } else { /* WSTRW wRd */
1745 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1746 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1749 if (insn
& (1 << 22)) { /* WSTRH */
1750 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1751 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
1752 } else { /* WSTRB */
1753 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1754 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
1758 tcg_temp_free_i32(tmp
);
1760 tcg_temp_free_i32(addr
);
1764 if ((insn
& 0x0f000000) != 0x0e000000)
1767 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1768 case 0x000: /* WOR */
1769 wrd
= (insn
>> 12) & 0xf;
1770 rd0
= (insn
>> 0) & 0xf;
1771 rd1
= (insn
>> 16) & 0xf;
1772 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1773 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1774 gen_op_iwmmxt_setpsr_nz();
1775 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1776 gen_op_iwmmxt_set_mup();
1777 gen_op_iwmmxt_set_cup();
1779 case 0x011: /* TMCR */
1782 rd
= (insn
>> 12) & 0xf;
1783 wrd
= (insn
>> 16) & 0xf;
1785 case ARM_IWMMXT_wCID
:
1786 case ARM_IWMMXT_wCASF
:
1788 case ARM_IWMMXT_wCon
:
1789 gen_op_iwmmxt_set_cup();
1791 case ARM_IWMMXT_wCSSF
:
1792 tmp
= iwmmxt_load_creg(wrd
);
1793 tmp2
= load_reg(s
, rd
);
1794 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1795 tcg_temp_free_i32(tmp2
);
1796 iwmmxt_store_creg(wrd
, tmp
);
1798 case ARM_IWMMXT_wCGR0
:
1799 case ARM_IWMMXT_wCGR1
:
1800 case ARM_IWMMXT_wCGR2
:
1801 case ARM_IWMMXT_wCGR3
:
1802 gen_op_iwmmxt_set_cup();
1803 tmp
= load_reg(s
, rd
);
1804 iwmmxt_store_creg(wrd
, tmp
);
1810 case 0x100: /* WXOR */
1811 wrd
= (insn
>> 12) & 0xf;
1812 rd0
= (insn
>> 0) & 0xf;
1813 rd1
= (insn
>> 16) & 0xf;
1814 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1815 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1816 gen_op_iwmmxt_setpsr_nz();
1817 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1818 gen_op_iwmmxt_set_mup();
1819 gen_op_iwmmxt_set_cup();
1821 case 0x111: /* TMRC */
1824 rd
= (insn
>> 12) & 0xf;
1825 wrd
= (insn
>> 16) & 0xf;
1826 tmp
= iwmmxt_load_creg(wrd
);
1827 store_reg(s
, rd
, tmp
);
1829 case 0x300: /* WANDN */
1830 wrd
= (insn
>> 12) & 0xf;
1831 rd0
= (insn
>> 0) & 0xf;
1832 rd1
= (insn
>> 16) & 0xf;
1833 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1834 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1835 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1836 gen_op_iwmmxt_setpsr_nz();
1837 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1838 gen_op_iwmmxt_set_mup();
1839 gen_op_iwmmxt_set_cup();
1841 case 0x200: /* WAND */
1842 wrd
= (insn
>> 12) & 0xf;
1843 rd0
= (insn
>> 0) & 0xf;
1844 rd1
= (insn
>> 16) & 0xf;
1845 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1846 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1847 gen_op_iwmmxt_setpsr_nz();
1848 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1849 gen_op_iwmmxt_set_mup();
1850 gen_op_iwmmxt_set_cup();
1852 case 0x810: case 0xa10: /* WMADD */
1853 wrd
= (insn
>> 12) & 0xf;
1854 rd0
= (insn
>> 0) & 0xf;
1855 rd1
= (insn
>> 16) & 0xf;
1856 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1857 if (insn
& (1 << 21))
1858 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1860 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1861 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1862 gen_op_iwmmxt_set_mup();
1864 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1865 wrd
= (insn
>> 12) & 0xf;
1866 rd0
= (insn
>> 16) & 0xf;
1867 rd1
= (insn
>> 0) & 0xf;
1868 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1869 switch ((insn
>> 22) & 3) {
1871 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1874 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1877 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1882 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1883 gen_op_iwmmxt_set_mup();
1884 gen_op_iwmmxt_set_cup();
1886 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1887 wrd
= (insn
>> 12) & 0xf;
1888 rd0
= (insn
>> 16) & 0xf;
1889 rd1
= (insn
>> 0) & 0xf;
1890 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1891 switch ((insn
>> 22) & 3) {
1893 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1896 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1899 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1904 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1905 gen_op_iwmmxt_set_mup();
1906 gen_op_iwmmxt_set_cup();
1908 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1909 wrd
= (insn
>> 12) & 0xf;
1910 rd0
= (insn
>> 16) & 0xf;
1911 rd1
= (insn
>> 0) & 0xf;
1912 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1913 if (insn
& (1 << 22))
1914 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1916 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1917 if (!(insn
& (1 << 20)))
1918 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1919 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1920 gen_op_iwmmxt_set_mup();
1922 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1923 wrd
= (insn
>> 12) & 0xf;
1924 rd0
= (insn
>> 16) & 0xf;
1925 rd1
= (insn
>> 0) & 0xf;
1926 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1927 if (insn
& (1 << 21)) {
1928 if (insn
& (1 << 20))
1929 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1931 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1933 if (insn
& (1 << 20))
1934 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1936 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1938 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1939 gen_op_iwmmxt_set_mup();
1941 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1942 wrd
= (insn
>> 12) & 0xf;
1943 rd0
= (insn
>> 16) & 0xf;
1944 rd1
= (insn
>> 0) & 0xf;
1945 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1946 if (insn
& (1 << 21))
1947 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1949 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1950 if (!(insn
& (1 << 20))) {
1951 iwmmxt_load_reg(cpu_V1
, wrd
);
1952 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1954 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1955 gen_op_iwmmxt_set_mup();
1957 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1958 wrd
= (insn
>> 12) & 0xf;
1959 rd0
= (insn
>> 16) & 0xf;
1960 rd1
= (insn
>> 0) & 0xf;
1961 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1962 switch ((insn
>> 22) & 3) {
1964 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1967 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1970 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1975 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1976 gen_op_iwmmxt_set_mup();
1977 gen_op_iwmmxt_set_cup();
1979 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1980 wrd
= (insn
>> 12) & 0xf;
1981 rd0
= (insn
>> 16) & 0xf;
1982 rd1
= (insn
>> 0) & 0xf;
1983 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1984 if (insn
& (1 << 22)) {
1985 if (insn
& (1 << 20))
1986 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1988 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1990 if (insn
& (1 << 20))
1991 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1993 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1995 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1999 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2000 wrd
= (insn
>> 12) & 0xf;
2001 rd0
= (insn
>> 16) & 0xf;
2002 rd1
= (insn
>> 0) & 0xf;
2003 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2004 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
2005 tcg_gen_andi_i32(tmp
, tmp
, 7);
2006 iwmmxt_load_reg(cpu_V1
, rd1
);
2007 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2008 tcg_temp_free_i32(tmp
);
2009 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2010 gen_op_iwmmxt_set_mup();
2012 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2013 if (((insn
>> 6) & 3) == 3)
2015 rd
= (insn
>> 12) & 0xf;
2016 wrd
= (insn
>> 16) & 0xf;
2017 tmp
= load_reg(s
, rd
);
2018 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2019 switch ((insn
>> 6) & 3) {
2021 tmp2
= tcg_const_i32(0xff);
2022 tmp3
= tcg_const_i32((insn
& 7) << 3);
2025 tmp2
= tcg_const_i32(0xffff);
2026 tmp3
= tcg_const_i32((insn
& 3) << 4);
2029 tmp2
= tcg_const_i32(0xffffffff);
2030 tmp3
= tcg_const_i32((insn
& 1) << 5);
2033 TCGV_UNUSED_I32(tmp2
);
2034 TCGV_UNUSED_I32(tmp3
);
2036 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
2037 tcg_temp_free_i32(tmp3
);
2038 tcg_temp_free_i32(tmp2
);
2039 tcg_temp_free_i32(tmp
);
2040 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2041 gen_op_iwmmxt_set_mup();
2043 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2044 rd
= (insn
>> 12) & 0xf;
2045 wrd
= (insn
>> 16) & 0xf;
2046 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
2048 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2049 tmp
= tcg_temp_new_i32();
2050 switch ((insn
>> 22) & 3) {
2052 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
2053 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2055 tcg_gen_ext8s_i32(tmp
, tmp
);
2057 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
2061 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
2062 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2064 tcg_gen_ext16s_i32(tmp
, tmp
);
2066 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
2070 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
2071 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2074 store_reg(s
, rd
, tmp
);
2076 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2077 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2079 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2080 switch ((insn
>> 22) & 3) {
2082 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
2085 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
2088 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
2091 tcg_gen_shli_i32(tmp
, tmp
, 28);
2093 tcg_temp_free_i32(tmp
);
2095 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2096 if (((insn
>> 6) & 3) == 3)
2098 rd
= (insn
>> 12) & 0xf;
2099 wrd
= (insn
>> 16) & 0xf;
2100 tmp
= load_reg(s
, rd
);
2101 switch ((insn
>> 6) & 3) {
2103 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
2106 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
2109 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
2112 tcg_temp_free_i32(tmp
);
2113 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2114 gen_op_iwmmxt_set_mup();
2116 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2117 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2119 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2120 tmp2
= tcg_temp_new_i32();
2121 tcg_gen_mov_i32(tmp2
, tmp
);
2122 switch ((insn
>> 22) & 3) {
2124 for (i
= 0; i
< 7; i
++) {
2125 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2126 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2130 for (i
= 0; i
< 3; i
++) {
2131 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2132 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2136 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2137 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2141 tcg_temp_free_i32(tmp2
);
2142 tcg_temp_free_i32(tmp
);
2144 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2145 wrd
= (insn
>> 12) & 0xf;
2146 rd0
= (insn
>> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2148 switch ((insn
>> 22) & 3) {
2150 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
2153 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
2156 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
2161 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2162 gen_op_iwmmxt_set_mup();
2164 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2165 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2167 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2168 tmp2
= tcg_temp_new_i32();
2169 tcg_gen_mov_i32(tmp2
, tmp
);
2170 switch ((insn
>> 22) & 3) {
2172 for (i
= 0; i
< 7; i
++) {
2173 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2174 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2178 for (i
= 0; i
< 3; i
++) {
2179 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2180 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2184 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2185 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2189 tcg_temp_free_i32(tmp2
);
2190 tcg_temp_free_i32(tmp
);
2192 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2193 rd
= (insn
>> 12) & 0xf;
2194 rd0
= (insn
>> 16) & 0xf;
2195 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
2197 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2198 tmp
= tcg_temp_new_i32();
2199 switch ((insn
>> 22) & 3) {
2201 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
2204 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
2207 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
2210 store_reg(s
, rd
, tmp
);
2212 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2213 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2214 wrd
= (insn
>> 12) & 0xf;
2215 rd0
= (insn
>> 16) & 0xf;
2216 rd1
= (insn
>> 0) & 0xf;
2217 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2218 switch ((insn
>> 22) & 3) {
2220 if (insn
& (1 << 21))
2221 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2223 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2226 if (insn
& (1 << 21))
2227 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2229 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2232 if (insn
& (1 << 21))
2233 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2235 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2240 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2241 gen_op_iwmmxt_set_mup();
2242 gen_op_iwmmxt_set_cup();
2244 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2245 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2246 wrd
= (insn
>> 12) & 0xf;
2247 rd0
= (insn
>> 16) & 0xf;
2248 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2249 switch ((insn
>> 22) & 3) {
2251 if (insn
& (1 << 21))
2252 gen_op_iwmmxt_unpacklsb_M0();
2254 gen_op_iwmmxt_unpacklub_M0();
2257 if (insn
& (1 << 21))
2258 gen_op_iwmmxt_unpacklsw_M0();
2260 gen_op_iwmmxt_unpackluw_M0();
2263 if (insn
& (1 << 21))
2264 gen_op_iwmmxt_unpacklsl_M0();
2266 gen_op_iwmmxt_unpacklul_M0();
2271 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2272 gen_op_iwmmxt_set_mup();
2273 gen_op_iwmmxt_set_cup();
2275 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2276 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2277 wrd
= (insn
>> 12) & 0xf;
2278 rd0
= (insn
>> 16) & 0xf;
2279 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2280 switch ((insn
>> 22) & 3) {
2282 if (insn
& (1 << 21))
2283 gen_op_iwmmxt_unpackhsb_M0();
2285 gen_op_iwmmxt_unpackhub_M0();
2288 if (insn
& (1 << 21))
2289 gen_op_iwmmxt_unpackhsw_M0();
2291 gen_op_iwmmxt_unpackhuw_M0();
2294 if (insn
& (1 << 21))
2295 gen_op_iwmmxt_unpackhsl_M0();
2297 gen_op_iwmmxt_unpackhul_M0();
2302 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2303 gen_op_iwmmxt_set_mup();
2304 gen_op_iwmmxt_set_cup();
2306 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2307 case 0x214: case 0x614: case 0xa14: case 0xe14:
2308 if (((insn
>> 22) & 3) == 0)
2310 wrd
= (insn
>> 12) & 0xf;
2311 rd0
= (insn
>> 16) & 0xf;
2312 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2313 tmp
= tcg_temp_new_i32();
2314 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2315 tcg_temp_free_i32(tmp
);
2318 switch ((insn
>> 22) & 3) {
2320 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2323 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2326 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2329 tcg_temp_free_i32(tmp
);
2330 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2331 gen_op_iwmmxt_set_mup();
2332 gen_op_iwmmxt_set_cup();
2334 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2335 case 0x014: case 0x414: case 0x814: case 0xc14:
2336 if (((insn
>> 22) & 3) == 0)
2338 wrd
= (insn
>> 12) & 0xf;
2339 rd0
= (insn
>> 16) & 0xf;
2340 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2341 tmp
= tcg_temp_new_i32();
2342 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2343 tcg_temp_free_i32(tmp
);
2346 switch ((insn
>> 22) & 3) {
2348 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2351 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2354 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2357 tcg_temp_free_i32(tmp
);
2358 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2359 gen_op_iwmmxt_set_mup();
2360 gen_op_iwmmxt_set_cup();
2362 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2363 case 0x114: case 0x514: case 0x914: case 0xd14:
2364 if (((insn
>> 22) & 3) == 0)
2366 wrd
= (insn
>> 12) & 0xf;
2367 rd0
= (insn
>> 16) & 0xf;
2368 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2369 tmp
= tcg_temp_new_i32();
2370 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2371 tcg_temp_free_i32(tmp
);
2374 switch ((insn
>> 22) & 3) {
2376 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2379 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2382 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2385 tcg_temp_free_i32(tmp
);
2386 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2387 gen_op_iwmmxt_set_mup();
2388 gen_op_iwmmxt_set_cup();
2390 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2391 case 0x314: case 0x714: case 0xb14: case 0xf14:
2392 if (((insn
>> 22) & 3) == 0)
2394 wrd
= (insn
>> 12) & 0xf;
2395 rd0
= (insn
>> 16) & 0xf;
2396 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2397 tmp
= tcg_temp_new_i32();
2398 switch ((insn
>> 22) & 3) {
2400 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2401 tcg_temp_free_i32(tmp
);
2404 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2407 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2408 tcg_temp_free_i32(tmp
);
2411 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2414 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2415 tcg_temp_free_i32(tmp
);
2418 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2421 tcg_temp_free_i32(tmp
);
2422 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2423 gen_op_iwmmxt_set_mup();
2424 gen_op_iwmmxt_set_cup();
2426 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2427 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2428 wrd
= (insn
>> 12) & 0xf;
2429 rd0
= (insn
>> 16) & 0xf;
2430 rd1
= (insn
>> 0) & 0xf;
2431 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2432 switch ((insn
>> 22) & 3) {
2434 if (insn
& (1 << 21))
2435 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2437 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2440 if (insn
& (1 << 21))
2441 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2443 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2446 if (insn
& (1 << 21))
2447 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2449 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2454 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2455 gen_op_iwmmxt_set_mup();
2457 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2458 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2459 wrd
= (insn
>> 12) & 0xf;
2460 rd0
= (insn
>> 16) & 0xf;
2461 rd1
= (insn
>> 0) & 0xf;
2462 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2463 switch ((insn
>> 22) & 3) {
2465 if (insn
& (1 << 21))
2466 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2468 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2471 if (insn
& (1 << 21))
2472 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2474 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2477 if (insn
& (1 << 21))
2478 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2480 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2485 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2486 gen_op_iwmmxt_set_mup();
2488 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2489 case 0x402: case 0x502: case 0x602: case 0x702:
2490 wrd
= (insn
>> 12) & 0xf;
2491 rd0
= (insn
>> 16) & 0xf;
2492 rd1
= (insn
>> 0) & 0xf;
2493 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2494 tmp
= tcg_const_i32((insn
>> 20) & 3);
2495 iwmmxt_load_reg(cpu_V1
, rd1
);
2496 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2497 tcg_temp_free_i32(tmp
);
2498 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2499 gen_op_iwmmxt_set_mup();
2501 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2502 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2503 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2504 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2505 wrd
= (insn
>> 12) & 0xf;
2506 rd0
= (insn
>> 16) & 0xf;
2507 rd1
= (insn
>> 0) & 0xf;
2508 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2509 switch ((insn
>> 20) & 0xf) {
2511 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2514 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2517 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2520 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2523 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2526 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2529 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2532 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2535 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2540 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2541 gen_op_iwmmxt_set_mup();
2542 gen_op_iwmmxt_set_cup();
2544 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2545 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2546 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2547 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2548 wrd
= (insn
>> 12) & 0xf;
2549 rd0
= (insn
>> 16) & 0xf;
2550 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2551 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2552 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2553 tcg_temp_free_i32(tmp
);
2554 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2555 gen_op_iwmmxt_set_mup();
2556 gen_op_iwmmxt_set_cup();
2558 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2559 case 0x418: case 0x518: case 0x618: case 0x718:
2560 case 0x818: case 0x918: case 0xa18: case 0xb18:
2561 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2562 wrd
= (insn
>> 12) & 0xf;
2563 rd0
= (insn
>> 16) & 0xf;
2564 rd1
= (insn
>> 0) & 0xf;
2565 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2566 switch ((insn
>> 20) & 0xf) {
2568 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2571 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2574 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2577 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2580 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2583 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2586 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2589 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2592 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2597 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2598 gen_op_iwmmxt_set_mup();
2599 gen_op_iwmmxt_set_cup();
2601 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2602 case 0x408: case 0x508: case 0x608: case 0x708:
2603 case 0x808: case 0x908: case 0xa08: case 0xb08:
2604 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2605 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2607 wrd
= (insn
>> 12) & 0xf;
2608 rd0
= (insn
>> 16) & 0xf;
2609 rd1
= (insn
>> 0) & 0xf;
2610 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2611 switch ((insn
>> 22) & 3) {
2613 if (insn
& (1 << 21))
2614 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2616 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2619 if (insn
& (1 << 21))
2620 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2622 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2625 if (insn
& (1 << 21))
2626 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2628 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2631 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2632 gen_op_iwmmxt_set_mup();
2633 gen_op_iwmmxt_set_cup();
2635 case 0x201: case 0x203: case 0x205: case 0x207:
2636 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2637 case 0x211: case 0x213: case 0x215: case 0x217:
2638 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2639 wrd
= (insn
>> 5) & 0xf;
2640 rd0
= (insn
>> 12) & 0xf;
2641 rd1
= (insn
>> 0) & 0xf;
2642 if (rd0
== 0xf || rd1
== 0xf)
2644 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2645 tmp
= load_reg(s
, rd0
);
2646 tmp2
= load_reg(s
, rd1
);
2647 switch ((insn
>> 16) & 0xf) {
2648 case 0x0: /* TMIA */
2649 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2651 case 0x8: /* TMIAPH */
2652 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2654 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2655 if (insn
& (1 << 16))
2656 tcg_gen_shri_i32(tmp
, tmp
, 16);
2657 if (insn
& (1 << 17))
2658 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2659 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2662 tcg_temp_free_i32(tmp2
);
2663 tcg_temp_free_i32(tmp
);
2666 tcg_temp_free_i32(tmp2
);
2667 tcg_temp_free_i32(tmp
);
2668 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2669 gen_op_iwmmxt_set_mup();
2678 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2679 (ie. an undefined instruction). */
2680 static int disas_dsp_insn(DisasContext
*s
, uint32_t insn
)
2682 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2685 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2686 /* Multiply with Internal Accumulate Format */
2687 rd0
= (insn
>> 12) & 0xf;
2689 acc
= (insn
>> 5) & 7;
2694 tmp
= load_reg(s
, rd0
);
2695 tmp2
= load_reg(s
, rd1
);
2696 switch ((insn
>> 16) & 0xf) {
2698 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2700 case 0x8: /* MIAPH */
2701 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2703 case 0xc: /* MIABB */
2704 case 0xd: /* MIABT */
2705 case 0xe: /* MIATB */
2706 case 0xf: /* MIATT */
2707 if (insn
& (1 << 16))
2708 tcg_gen_shri_i32(tmp
, tmp
, 16);
2709 if (insn
& (1 << 17))
2710 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2711 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2716 tcg_temp_free_i32(tmp2
);
2717 tcg_temp_free_i32(tmp
);
2719 gen_op_iwmmxt_movq_wRn_M0(acc
);
2723 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2724 /* Internal Accumulator Access Format */
2725 rdhi
= (insn
>> 16) & 0xf;
2726 rdlo
= (insn
>> 12) & 0xf;
2732 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2733 iwmmxt_load_reg(cpu_V0
, acc
);
2734 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2735 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2736 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2737 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2739 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2740 iwmmxt_store_reg(cpu_V0
, acc
);
2748 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2749 #define VFP_SREG(insn, bigbit, smallbit) \
2750 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2751 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2752 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2753 reg = (((insn) >> (bigbit)) & 0x0f) \
2754 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2756 if (insn & (1 << (smallbit))) \
2758 reg = ((insn) >> (bigbit)) & 0x0f; \
2761 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2762 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2763 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2764 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2765 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2766 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2768 /* Move between integer and VFP cores. */
2769 static TCGv_i32
gen_vfp_mrs(void)
2771 TCGv_i32 tmp
= tcg_temp_new_i32();
2772 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2776 static void gen_vfp_msr(TCGv_i32 tmp
)
2778 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2779 tcg_temp_free_i32(tmp
);
2782 static void gen_neon_dup_u8(TCGv_i32 var
, int shift
)
2784 TCGv_i32 tmp
= tcg_temp_new_i32();
2786 tcg_gen_shri_i32(var
, var
, shift
);
2787 tcg_gen_ext8u_i32(var
, var
);
2788 tcg_gen_shli_i32(tmp
, var
, 8);
2789 tcg_gen_or_i32(var
, var
, tmp
);
2790 tcg_gen_shli_i32(tmp
, var
, 16);
2791 tcg_gen_or_i32(var
, var
, tmp
);
2792 tcg_temp_free_i32(tmp
);
2795 static void gen_neon_dup_low16(TCGv_i32 var
)
2797 TCGv_i32 tmp
= tcg_temp_new_i32();
2798 tcg_gen_ext16u_i32(var
, var
);
2799 tcg_gen_shli_i32(tmp
, var
, 16);
2800 tcg_gen_or_i32(var
, var
, tmp
);
2801 tcg_temp_free_i32(tmp
);
2804 static void gen_neon_dup_high16(TCGv_i32 var
)
2806 TCGv_i32 tmp
= tcg_temp_new_i32();
2807 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2808 tcg_gen_shri_i32(tmp
, var
, 16);
2809 tcg_gen_or_i32(var
, var
, tmp
);
2810 tcg_temp_free_i32(tmp
);
2813 static TCGv_i32
gen_load_and_replicate(DisasContext
*s
, TCGv_i32 addr
, int size
)
2815 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2816 TCGv_i32 tmp
= tcg_temp_new_i32();
2819 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
2820 gen_neon_dup_u8(tmp
, 0);
2823 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
2824 gen_neon_dup_low16(tmp
);
2827 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
2829 default: /* Avoid compiler warnings. */
2835 static int handle_vsel(uint32_t insn
, uint32_t rd
, uint32_t rn
, uint32_t rm
,
2838 uint32_t cc
= extract32(insn
, 20, 2);
2841 TCGv_i64 frn
, frm
, dest
;
2842 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
2844 zero
= tcg_const_i64(0);
2846 frn
= tcg_temp_new_i64();
2847 frm
= tcg_temp_new_i64();
2848 dest
= tcg_temp_new_i64();
2850 zf
= tcg_temp_new_i64();
2851 nf
= tcg_temp_new_i64();
2852 vf
= tcg_temp_new_i64();
2854 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
2855 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
2856 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
2858 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2859 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2862 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
2866 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
2869 case 2: /* ge: N == V -> N ^ V == 0 */
2870 tmp
= tcg_temp_new_i64();
2871 tcg_gen_xor_i64(tmp
, vf
, nf
);
2872 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2874 tcg_temp_free_i64(tmp
);
2876 case 3: /* gt: !Z && N == V */
2877 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
2879 tmp
= tcg_temp_new_i64();
2880 tcg_gen_xor_i64(tmp
, vf
, nf
);
2881 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2883 tcg_temp_free_i64(tmp
);
2886 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2887 tcg_temp_free_i64(frn
);
2888 tcg_temp_free_i64(frm
);
2889 tcg_temp_free_i64(dest
);
2891 tcg_temp_free_i64(zf
);
2892 tcg_temp_free_i64(nf
);
2893 tcg_temp_free_i64(vf
);
2895 tcg_temp_free_i64(zero
);
2897 TCGv_i32 frn
, frm
, dest
;
2900 zero
= tcg_const_i32(0);
2902 frn
= tcg_temp_new_i32();
2903 frm
= tcg_temp_new_i32();
2904 dest
= tcg_temp_new_i32();
2905 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2906 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2909 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
2913 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
2916 case 2: /* ge: N == V -> N ^ V == 0 */
2917 tmp
= tcg_temp_new_i32();
2918 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
2919 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
2921 tcg_temp_free_i32(tmp
);
2923 case 3: /* gt: !Z && N == V */
2924 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
2926 tmp
= tcg_temp_new_i32();
2927 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
2928 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
2930 tcg_temp_free_i32(tmp
);
2933 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2934 tcg_temp_free_i32(frn
);
2935 tcg_temp_free_i32(frm
);
2936 tcg_temp_free_i32(dest
);
2938 tcg_temp_free_i32(zero
);
2944 static int handle_vminmaxnm(uint32_t insn
, uint32_t rd
, uint32_t rn
,
2945 uint32_t rm
, uint32_t dp
)
2947 uint32_t vmin
= extract32(insn
, 6, 1);
2948 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2951 TCGv_i64 frn
, frm
, dest
;
2953 frn
= tcg_temp_new_i64();
2954 frm
= tcg_temp_new_i64();
2955 dest
= tcg_temp_new_i64();
2957 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2958 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2960 gen_helper_vfp_minnumd(dest
, frn
, frm
, fpst
);
2962 gen_helper_vfp_maxnumd(dest
, frn
, frm
, fpst
);
2964 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2965 tcg_temp_free_i64(frn
);
2966 tcg_temp_free_i64(frm
);
2967 tcg_temp_free_i64(dest
);
2969 TCGv_i32 frn
, frm
, dest
;
2971 frn
= tcg_temp_new_i32();
2972 frm
= tcg_temp_new_i32();
2973 dest
= tcg_temp_new_i32();
2975 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2976 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2978 gen_helper_vfp_minnums(dest
, frn
, frm
, fpst
);
2980 gen_helper_vfp_maxnums(dest
, frn
, frm
, fpst
);
2982 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2983 tcg_temp_free_i32(frn
);
2984 tcg_temp_free_i32(frm
);
2985 tcg_temp_free_i32(dest
);
2988 tcg_temp_free_ptr(fpst
);
2992 static int handle_vrint(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
2995 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2998 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
2999 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3004 tcg_op
= tcg_temp_new_i64();
3005 tcg_res
= tcg_temp_new_i64();
3006 tcg_gen_ld_f64(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
3007 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
3008 tcg_gen_st_f64(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
3009 tcg_temp_free_i64(tcg_op
);
3010 tcg_temp_free_i64(tcg_res
);
3014 tcg_op
= tcg_temp_new_i32();
3015 tcg_res
= tcg_temp_new_i32();
3016 tcg_gen_ld_f32(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
3017 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
3018 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
3019 tcg_temp_free_i32(tcg_op
);
3020 tcg_temp_free_i32(tcg_res
);
3023 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3024 tcg_temp_free_i32(tcg_rmode
);
3026 tcg_temp_free_ptr(fpst
);
3030 static int handle_vcvt(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
3033 bool is_signed
= extract32(insn
, 7, 1);
3034 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3035 TCGv_i32 tcg_rmode
, tcg_shift
;
3037 tcg_shift
= tcg_const_i32(0);
3039 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
3040 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3043 TCGv_i64 tcg_double
, tcg_res
;
3045 /* Rd is encoded as a single precision register even when the source
3046 * is double precision.
3048 rd
= ((rd
<< 1) & 0x1e) | ((rd
>> 4) & 0x1);
3049 tcg_double
= tcg_temp_new_i64();
3050 tcg_res
= tcg_temp_new_i64();
3051 tcg_tmp
= tcg_temp_new_i32();
3052 tcg_gen_ld_f64(tcg_double
, cpu_env
, vfp_reg_offset(1, rm
));
3054 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
3056 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
3058 tcg_gen_extrl_i64_i32(tcg_tmp
, tcg_res
);
3059 tcg_gen_st_f32(tcg_tmp
, cpu_env
, vfp_reg_offset(0, rd
));
3060 tcg_temp_free_i32(tcg_tmp
);
3061 tcg_temp_free_i64(tcg_res
);
3062 tcg_temp_free_i64(tcg_double
);
3064 TCGv_i32 tcg_single
, tcg_res
;
3065 tcg_single
= tcg_temp_new_i32();
3066 tcg_res
= tcg_temp_new_i32();
3067 tcg_gen_ld_f32(tcg_single
, cpu_env
, vfp_reg_offset(0, rm
));
3069 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
3071 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
3073 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(0, rd
));
3074 tcg_temp_free_i32(tcg_res
);
3075 tcg_temp_free_i32(tcg_single
);
3078 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3079 tcg_temp_free_i32(tcg_rmode
);
3081 tcg_temp_free_i32(tcg_shift
);
3083 tcg_temp_free_ptr(fpst
);
3088 /* Table for converting the most common AArch32 encoding of
3089 * rounding mode to arm_fprounding order (which matches the
3090 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3092 static const uint8_t fp_decode_rm
[] = {
3099 static int disas_vfp_v8_insn(DisasContext
*s
, uint32_t insn
)
3101 uint32_t rd
, rn
, rm
, dp
= extract32(insn
, 8, 1);
3103 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
3108 VFP_DREG_D(rd
, insn
);
3109 VFP_DREG_N(rn
, insn
);
3110 VFP_DREG_M(rm
, insn
);
3112 rd
= VFP_SREG_D(insn
);
3113 rn
= VFP_SREG_N(insn
);
3114 rm
= VFP_SREG_M(insn
);
3117 if ((insn
& 0x0f800e50) == 0x0e000a00) {
3118 return handle_vsel(insn
, rd
, rn
, rm
, dp
);
3119 } else if ((insn
& 0x0fb00e10) == 0x0e800a00) {
3120 return handle_vminmaxnm(insn
, rd
, rn
, rm
, dp
);
3121 } else if ((insn
& 0x0fbc0ed0) == 0x0eb80a40) {
3122 /* VRINTA, VRINTN, VRINTP, VRINTM */
3123 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
3124 return handle_vrint(insn
, rd
, rm
, dp
, rounding
);
3125 } else if ((insn
& 0x0fbc0e50) == 0x0ebc0a40) {
3126 /* VCVTA, VCVTN, VCVTP, VCVTM */
3127 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
3128 return handle_vcvt(insn
, rd
, rm
, dp
, rounding
);
3133 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3134 (ie. an undefined instruction). */
3135 static int disas_vfp_insn(DisasContext
*s
, uint32_t insn
)
3137 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
3143 if (!arm_dc_feature(s
, ARM_FEATURE_VFP
)) {
3147 /* FIXME: this access check should not take precedence over UNDEF
3148 * for invalid encodings; we will generate incorrect syndrome information
3149 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3151 if (s
->fp_excp_el
) {
3152 gen_exception_insn(s
, 4, EXCP_UDEF
,
3153 syn_fp_access_trap(1, 0xe, false), s
->fp_excp_el
);
3157 if (!s
->vfp_enabled
) {
3158 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3159 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
3161 rn
= (insn
>> 16) & 0xf;
3162 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
&& rn
!= ARM_VFP_MVFR2
3163 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
) {
3168 if (extract32(insn
, 28, 4) == 0xf) {
3169 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3170 * only used in v8 and above.