4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
29 #include "qemu/bitops.h"
31 #include "exec/semihost.h"
33 #include "exec/helper-proto.h"
34 #include "exec/helper-gen.h"
36 #include "trace-tcg.h"
40 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
42 /* currently all emulated v5 cores are also v5TE, so don't bother */
43 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
44 #define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
45 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
51 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
53 #include "translate.h"
55 #if defined(CONFIG_USER_ONLY)
58 #define IS_USER(s) (s->user)
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
64 static TCGv_i32 cpu_R
[16];
65 TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
66 TCGv_i64 cpu_exclusive_addr
;
67 TCGv_i64 cpu_exclusive_val
;
69 /* FIXME: These should be removed. */
70 static TCGv_i32 cpu_F0s
, cpu_F1s
;
71 static TCGv_i64 cpu_F0d
, cpu_F1d
;
73 #include "exec/gen-icount.h"
75 static const char *regnames
[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79 /* initialize TCG globals. */
80 void arm_translate_init(void)
84 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
85 tcg_ctx
.tcg_env
= cpu_env
;
87 for (i
= 0; i
< 16; i
++) {
88 cpu_R
[i
] = tcg_global_mem_new_i32(cpu_env
,
89 offsetof(CPUARMState
, regs
[i
]),
92 cpu_CF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, CF
), "CF");
93 cpu_NF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, NF
), "NF");
94 cpu_VF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, VF
), "VF");
95 cpu_ZF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, ZF
), "ZF");
97 cpu_exclusive_addr
= tcg_global_mem_new_i64(cpu_env
,
98 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
99 cpu_exclusive_val
= tcg_global_mem_new_i64(cpu_env
,
100 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
102 a64_translate_init();
105 /* Flags for the disas_set_da_iss info argument:
106 * lower bits hold the Rt register number, higher bits are flags.
108 typedef enum ISSInfo
{
111 ISSInvalid
= (1 << 5),
112 ISSIsAcqRel
= (1 << 6),
113 ISSIsWrite
= (1 << 7),
114 ISSIs16Bit
= (1 << 8),
117 /* Save the syndrome information for a Data Abort */
118 static void disas_set_da_iss(DisasContext
*s
, TCGMemOp memop
, ISSInfo issinfo
)
121 int sas
= memop
& MO_SIZE
;
122 bool sse
= memop
& MO_SIGN
;
123 bool is_acqrel
= issinfo
& ISSIsAcqRel
;
124 bool is_write
= issinfo
& ISSIsWrite
;
125 bool is_16bit
= issinfo
& ISSIs16Bit
;
126 int srt
= issinfo
& ISSRegMask
;
128 if (issinfo
& ISSInvalid
) {
129 /* Some callsites want to conditionally provide ISS info,
130 * eg "only if this was not a writeback"
136 /* For AArch32, insns where the src/dest is R15 never generate
137 * ISS information. Catching that here saves checking at all
143 syn
= syn_data_abort_with_iss(0, sas
, sse
, srt
, 0, is_acqrel
,
144 0, 0, 0, is_write
, 0, is_16bit
);
145 disas_set_insn_syndrome(s
, syn
);
148 static inline int get_a32_user_mem_index(DisasContext
*s
)
150 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
152 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
153 * otherwise, access as if at PL0.
155 switch (s
->mmu_idx
) {
156 case ARMMMUIdx_S1E2
: /* this one is UNPREDICTABLE */
157 case ARMMMUIdx_S12NSE0
:
158 case ARMMMUIdx_S12NSE1
:
159 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0
);
161 case ARMMMUIdx_S1SE0
:
162 case ARMMMUIdx_S1SE1
:
163 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0
);
164 case ARMMMUIdx_MUser
:
165 case ARMMMUIdx_MPriv
:
166 case ARMMMUIdx_MNegPri
:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser
);
168 case ARMMMUIdx_MSUser
:
169 case ARMMMUIdx_MSPriv
:
170 case ARMMMUIdx_MSNegPri
:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser
);
174 g_assert_not_reached();
178 static inline TCGv_i32
load_cpu_offset(int offset
)
180 TCGv_i32 tmp
= tcg_temp_new_i32();
181 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
185 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
187 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
189 tcg_gen_st_i32(var
, cpu_env
, offset
);
190 tcg_temp_free_i32(var
);
193 #define store_cpu_field(var, name) \
194 store_cpu_offset(var, offsetof(CPUARMState, name))
196 /* Set a variable to the value of a CPU register. */
197 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
201 /* normally, since we updated PC, we need only to add one insn */
203 addr
= (long)s
->pc
+ 2;
205 addr
= (long)s
->pc
+ 4;
206 tcg_gen_movi_i32(var
, addr
);
208 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
212 /* Create a new temporary and set it to the value of a CPU register. */
213 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
215 TCGv_i32 tmp
= tcg_temp_new_i32();
216 load_reg_var(s
, tmp
, reg
);
220 /* Set a CPU register. The source must be a temporary and will be
222 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
225 /* In Thumb mode, we must ignore bit 0.
226 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
227 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
228 * We choose to ignore [1:0] in ARM mode for all architecture versions.
230 tcg_gen_andi_i32(var
, var
, s
->thumb ?
~1 : ~3);
231 s
->base
.is_jmp
= DISAS_JUMP
;
233 tcg_gen_mov_i32(cpu_R
[reg
], var
);
234 tcg_temp_free_i32(var
);
237 /* Value extensions. */
238 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
239 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
240 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
241 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
243 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
244 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
247 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
249 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
250 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
251 tcg_temp_free_i32(tmp_mask
);
253 /* Set NZCV flags from the high 4 bits of var. */
254 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
256 static void gen_exception_internal(int excp
)
258 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
260 assert(excp_is_internal(excp
));
261 gen_helper_exception_internal(cpu_env
, tcg_excp
);
262 tcg_temp_free_i32(tcg_excp
);
265 static void gen_exception(int excp
, uint32_t syndrome
, uint32_t target_el
)
267 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
268 TCGv_i32 tcg_syn
= tcg_const_i32(syndrome
);
269 TCGv_i32 tcg_el
= tcg_const_i32(target_el
);
271 gen_helper_exception_with_syndrome(cpu_env
, tcg_excp
,
274 tcg_temp_free_i32(tcg_el
);
275 tcg_temp_free_i32(tcg_syn
);
276 tcg_temp_free_i32(tcg_excp
);
279 static void gen_ss_advance(DisasContext
*s
)
281 /* If the singlestep state is Active-not-pending, advance to
286 gen_helper_clear_pstate_ss(cpu_env
);
290 static void gen_step_complete_exception(DisasContext
*s
)
292 /* We just completed step of an insn. Move from Active-not-pending
293 * to Active-pending, and then also take the swstep exception.
294 * This corresponds to making the (IMPDEF) choice to prioritize
295 * swstep exceptions over asynchronous exceptions taken to an exception
296 * level where debug is disabled. This choice has the advantage that
297 * we do not need to maintain internal state corresponding to the
298 * ISV/EX syndrome bits between completion of the step and generation
299 * of the exception, and our syndrome information is always correct.
302 gen_exception(EXCP_UDEF
, syn_swstep(s
->ss_same_el
, 1, s
->is_ldex
),
303 default_exception_el(s
));
304 s
->base
.is_jmp
= DISAS_NORETURN
;
307 static void gen_singlestep_exception(DisasContext
*s
)
309 /* Generate the right kind of exception for singlestep, which is
310 * either the architectural singlestep or EXCP_DEBUG for QEMU's
311 * gdb singlestepping.
314 gen_step_complete_exception(s
);
316 gen_exception_internal(EXCP_DEBUG
);
320 static inline bool is_singlestepping(DisasContext
*s
)
322 /* Return true if we are singlestepping either because of
323 * architectural singlestep or QEMU gdbstub singlestep. This does
324 * not include the command line '-singlestep' mode which is rather
325 * misnamed as it only means "one instruction per TB" and doesn't
326 * affect the code we generate.
328 return s
->base
.singlestep_enabled
|| s
->ss_active
;
331 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
333 TCGv_i32 tmp1
= tcg_temp_new_i32();
334 TCGv_i32 tmp2
= tcg_temp_new_i32();
335 tcg_gen_ext16s_i32(tmp1
, a
);
336 tcg_gen_ext16s_i32(tmp2
, b
);
337 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
338 tcg_temp_free_i32(tmp2
);
339 tcg_gen_sari_i32(a
, a
, 16);
340 tcg_gen_sari_i32(b
, b
, 16);
341 tcg_gen_mul_i32(b
, b
, a
);
342 tcg_gen_mov_i32(a
, tmp1
);
343 tcg_temp_free_i32(tmp1
);
346 /* Byteswap each halfword. */
347 static void gen_rev16(TCGv_i32 var
)
349 TCGv_i32 tmp
= tcg_temp_new_i32();
350 TCGv_i32 mask
= tcg_const_i32(0x00ff00ff);
351 tcg_gen_shri_i32(tmp
, var
, 8);
352 tcg_gen_and_i32(tmp
, tmp
, mask
);
353 tcg_gen_and_i32(var
, var
, mask
);
354 tcg_gen_shli_i32(var
, var
, 8);
355 tcg_gen_or_i32(var
, var
, tmp
);
356 tcg_temp_free_i32(mask
);
357 tcg_temp_free_i32(tmp
);
360 /* Byteswap low halfword and sign extend. */
361 static void gen_revsh(TCGv_i32 var
)
363 tcg_gen_ext16u_i32(var
, var
);
364 tcg_gen_bswap16_i32(var
, var
);
365 tcg_gen_ext16s_i32(var
, var
);
368 /* Return (b << 32) + a. Mark inputs as dead */
369 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv_i32 b
)
371 TCGv_i64 tmp64
= tcg_temp_new_i64();
373 tcg_gen_extu_i32_i64(tmp64
, b
);
374 tcg_temp_free_i32(b
);
375 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
376 tcg_gen_add_i64(a
, tmp64
, a
);
378 tcg_temp_free_i64(tmp64
);
382 /* Return (b << 32) - a. Mark inputs as dead. */
383 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv_i32 b
)
385 TCGv_i64 tmp64
= tcg_temp_new_i64();
387 tcg_gen_extu_i32_i64(tmp64
, b
);
388 tcg_temp_free_i32(b
);
389 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
390 tcg_gen_sub_i64(a
, tmp64
, a
);
392 tcg_temp_free_i64(tmp64
);
396 /* 32x32->64 multiply. Marks inputs as dead. */
397 static TCGv_i64
gen_mulu_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
399 TCGv_i32 lo
= tcg_temp_new_i32();
400 TCGv_i32 hi
= tcg_temp_new_i32();
403 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
404 tcg_temp_free_i32(a
);
405 tcg_temp_free_i32(b
);
407 ret
= tcg_temp_new_i64();
408 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
409 tcg_temp_free_i32(lo
);
410 tcg_temp_free_i32(hi
);
415 static TCGv_i64
gen_muls_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
417 TCGv_i32 lo
= tcg_temp_new_i32();
418 TCGv_i32 hi
= tcg_temp_new_i32();
421 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
422 tcg_temp_free_i32(a
);
423 tcg_temp_free_i32(b
);
425 ret
= tcg_temp_new_i64();
426 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
427 tcg_temp_free_i32(lo
);
428 tcg_temp_free_i32(hi
);
433 /* Swap low and high halfwords. */
434 static void gen_swap_half(TCGv_i32 var
)
436 TCGv_i32 tmp
= tcg_temp_new_i32();
437 tcg_gen_shri_i32(tmp
, var
, 16);
438 tcg_gen_shli_i32(var
, var
, 16);
439 tcg_gen_or_i32(var
, var
, tmp
);
440 tcg_temp_free_i32(tmp
);
443 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
444 tmp = (t0 ^ t1) & 0x8000;
447 t0 = (t0 + t1) ^ tmp;
450 static void gen_add16(TCGv_i32 t0
, TCGv_i32 t1
)
452 TCGv_i32 tmp
= tcg_temp_new_i32();
453 tcg_gen_xor_i32(tmp
, t0
, t1
);
454 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
455 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
456 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
457 tcg_gen_add_i32(t0
, t0
, t1
);
458 tcg_gen_xor_i32(t0
, t0
, tmp
);
459 tcg_temp_free_i32(tmp
);
460 tcg_temp_free_i32(t1
);
463 /* Set CF to the top bit of var. */
464 static void gen_set_CF_bit31(TCGv_i32 var
)
466 tcg_gen_shri_i32(cpu_CF
, var
, 31);
469 /* Set N and Z flags from var. */
470 static inline void gen_logic_CC(TCGv_i32 var
)
472 tcg_gen_mov_i32(cpu_NF
, var
);
473 tcg_gen_mov_i32(cpu_ZF
, var
);
477 static void gen_adc(TCGv_i32 t0
, TCGv_i32 t1
)
479 tcg_gen_add_i32(t0
, t0
, t1
);
480 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
483 /* dest = T0 + T1 + CF. */
484 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
486 tcg_gen_add_i32(dest
, t0
, t1
);
487 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
490 /* dest = T0 - T1 + CF - 1. */
491 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
493 tcg_gen_sub_i32(dest
, t0
, t1
);
494 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
495 tcg_gen_subi_i32(dest
, dest
, 1);
498 /* dest = T0 + T1. Compute C, N, V and Z flags */
499 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
501 TCGv_i32 tmp
= tcg_temp_new_i32();
502 tcg_gen_movi_i32(tmp
, 0);
503 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
504 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
505 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
506 tcg_gen_xor_i32(tmp
, t0
, t1
);
507 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
508 tcg_temp_free_i32(tmp
);
509 tcg_gen_mov_i32(dest
, cpu_NF
);
512 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
513 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
515 TCGv_i32 tmp
= tcg_temp_new_i32();
516 if (TCG_TARGET_HAS_add2_i32
) {
517 tcg_gen_movi_i32(tmp
, 0);
518 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
519 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
521 TCGv_i64 q0
= tcg_temp_new_i64();
522 TCGv_i64 q1
= tcg_temp_new_i64();
523 tcg_gen_extu_i32_i64(q0
, t0
);
524 tcg_gen_extu_i32_i64(q1
, t1
);
525 tcg_gen_add_i64(q0
, q0
, q1
);
526 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
527 tcg_gen_add_i64(q0
, q0
, q1
);
528 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
529 tcg_temp_free_i64(q0
);
530 tcg_temp_free_i64(q1
);
532 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
533 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
534 tcg_gen_xor_i32(tmp
, t0
, t1
);
535 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
536 tcg_temp_free_i32(tmp
);
537 tcg_gen_mov_i32(dest
, cpu_NF
);
540 /* dest = T0 - T1. Compute C, N, V and Z flags */
541 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
544 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
545 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
546 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
547 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
548 tmp
= tcg_temp_new_i32();
549 tcg_gen_xor_i32(tmp
, t0
, t1
);
550 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
551 tcg_temp_free_i32(tmp
);
552 tcg_gen_mov_i32(dest
, cpu_NF
);
555 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
556 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
558 TCGv_i32 tmp
= tcg_temp_new_i32();
559 tcg_gen_not_i32(tmp
, t1
);
560 gen_adc_CC(dest
, t0
, tmp
);
561 tcg_temp_free_i32(tmp
);
564 #define GEN_SHIFT(name) \
565 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
567 TCGv_i32 tmp1, tmp2, tmp3; \
568 tmp1 = tcg_temp_new_i32(); \
569 tcg_gen_andi_i32(tmp1, t1, 0xff); \
570 tmp2 = tcg_const_i32(0); \
571 tmp3 = tcg_const_i32(0x1f); \
572 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
573 tcg_temp_free_i32(tmp3); \
574 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
575 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
576 tcg_temp_free_i32(tmp2); \
577 tcg_temp_free_i32(tmp1); \
583 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
586 tmp1
= tcg_temp_new_i32();
587 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
588 tmp2
= tcg_const_i32(0x1f);
589 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
590 tcg_temp_free_i32(tmp2
);
591 tcg_gen_sar_i32(dest
, t0
, tmp1
);
592 tcg_temp_free_i32(tmp1
);
595 static void tcg_gen_abs_i32(TCGv_i32 dest
, TCGv_i32 src
)
597 TCGv_i32 c0
= tcg_const_i32(0);
598 TCGv_i32 tmp
= tcg_temp_new_i32();
599 tcg_gen_neg_i32(tmp
, src
);
600 tcg_gen_movcond_i32(TCG_COND_GT
, dest
, src
, c0
, src
, tmp
);
601 tcg_temp_free_i32(c0
);
602 tcg_temp_free_i32(tmp
);
605 static void shifter_out_im(TCGv_i32 var
, int shift
)
608 tcg_gen_andi_i32(cpu_CF
, var
, 1);
610 tcg_gen_shri_i32(cpu_CF
, var
, shift
);
612 tcg_gen_andi_i32(cpu_CF
, cpu_CF
, 1);
617 /* Shift by immediate. Includes special handling for shift == 0. */
618 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
619 int shift
, int flags
)
625 shifter_out_im(var
, 32 - shift
);
626 tcg_gen_shli_i32(var
, var
, shift
);
632 tcg_gen_shri_i32(cpu_CF
, var
, 31);
634 tcg_gen_movi_i32(var
, 0);
637 shifter_out_im(var
, shift
- 1);
638 tcg_gen_shri_i32(var
, var
, shift
);
645 shifter_out_im(var
, shift
- 1);
648 tcg_gen_sari_i32(var
, var
, shift
);
650 case 3: /* ROR/RRX */
653 shifter_out_im(var
, shift
- 1);
654 tcg_gen_rotri_i32(var
, var
, shift
); break;
656 TCGv_i32 tmp
= tcg_temp_new_i32();
657 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
659 shifter_out_im(var
, 0);
660 tcg_gen_shri_i32(var
, var
, 1);
661 tcg_gen_or_i32(var
, var
, tmp
);
662 tcg_temp_free_i32(tmp
);
667 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
668 TCGv_i32 shift
, int flags
)
672 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
673 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
674 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
675 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
680 gen_shl(var
, var
, shift
);
683 gen_shr(var
, var
, shift
);
686 gen_sar(var
, var
, shift
);
688 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
689 tcg_gen_rotr_i32(var
, var
, shift
); break;
692 tcg_temp_free_i32(shift
);
695 #define PAS_OP(pfx) \
697 case 0: gen_pas_helper(glue(pfx,add16)); break; \
698 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
699 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
700 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
701 case 4: gen_pas_helper(glue(pfx,add8)); break; \
702 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
704 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
709 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
711 tmp
= tcg_temp_new_ptr();
712 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
714 tcg_temp_free_ptr(tmp
);
717 tmp
= tcg_temp_new_ptr();
718 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
720 tcg_temp_free_ptr(tmp
);
722 #undef gen_pas_helper
723 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
736 #undef gen_pas_helper
741 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
742 #define PAS_OP(pfx) \
744 case 0: gen_pas_helper(glue(pfx,add8)); break; \
745 case 1: gen_pas_helper(glue(pfx,add16)); break; \
746 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
747 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
748 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
749 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
751 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
756 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
758 tmp
= tcg_temp_new_ptr();
759 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
761 tcg_temp_free_ptr(tmp
);
764 tmp
= tcg_temp_new_ptr();
765 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
767 tcg_temp_free_ptr(tmp
);
769 #undef gen_pas_helper
770 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
783 #undef gen_pas_helper
789 * Generate a conditional based on ARM condition code cc.
790 * This is common between ARM and Aarch64 targets.
792 void arm_test_cc(DisasCompare
*cmp
, int cc
)
823 case 8: /* hi: C && !Z */
824 case 9: /* ls: !C || Z -> !(C && !Z) */
826 value
= tcg_temp_new_i32();
828 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
829 ZF is non-zero for !Z; so AND the two subexpressions. */
830 tcg_gen_neg_i32(value
, cpu_CF
);
831 tcg_gen_and_i32(value
, value
, cpu_ZF
);
834 case 10: /* ge: N == V -> N ^ V == 0 */
835 case 11: /* lt: N != V -> N ^ V != 0 */
836 /* Since we're only interested in the sign bit, == 0 is >= 0. */
838 value
= tcg_temp_new_i32();
840 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
843 case 12: /* gt: !Z && N == V */
844 case 13: /* le: Z || N != V */
846 value
= tcg_temp_new_i32();
848 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
849 * the sign bit then AND with ZF to yield the result. */
850 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
851 tcg_gen_sari_i32(value
, value
, 31);
852 tcg_gen_andc_i32(value
, cpu_ZF
, value
);
855 case 14: /* always */
856 case 15: /* always */
857 /* Use the ALWAYS condition, which will fold early.
858 * It doesn't matter what we use for the value. */
859 cond
= TCG_COND_ALWAYS
;
864 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
869 cond
= tcg_invert_cond(cond
);
875 cmp
->value_global
= global
;
878 void arm_free_cc(DisasCompare
*cmp
)
880 if (!cmp
->value_global
) {
881 tcg_temp_free_i32(cmp
->value
);
885 void arm_jump_cc(DisasCompare
*cmp
, TCGLabel
*label
)
887 tcg_gen_brcondi_i32(cmp
->cond
, cmp
->value
, 0, label
);
890 void arm_gen_test_cc(int cc
, TCGLabel
*label
)
893 arm_test_cc(&cmp
, cc
);
894 arm_jump_cc(&cmp
, label
);
898 static const uint8_t table_logic_cc
[16] = {
917 static inline void gen_set_condexec(DisasContext
*s
)
919 if (s
->condexec_mask
) {
920 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
921 TCGv_i32 tmp
= tcg_temp_new_i32();
922 tcg_gen_movi_i32(tmp
, val
);
923 store_cpu_field(tmp
, condexec_bits
);
927 static inline void gen_set_pc_im(DisasContext
*s
, target_ulong val
)
929 tcg_gen_movi_i32(cpu_R
[15], val
);
932 /* Set PC and Thumb state from an immediate address. */
933 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
937 s
->base
.is_jmp
= DISAS_JUMP
;
938 if (s
->thumb
!= (addr
& 1)) {
939 tmp
= tcg_temp_new_i32();
940 tcg_gen_movi_i32(tmp
, addr
& 1);
941 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
942 tcg_temp_free_i32(tmp
);
944 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
947 /* Set PC and Thumb state from var. var is marked as dead. */
948 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
950 s
->base
.is_jmp
= DISAS_JUMP
;
951 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
952 tcg_gen_andi_i32(var
, var
, 1);
953 store_cpu_field(var
, thumb
);
956 /* Set PC and Thumb state from var. var is marked as dead.
957 * For M-profile CPUs, include logic to detect exception-return
958 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
959 * and BX reg, and no others, and happens only for code in Handler mode.
961 static inline void gen_bx_excret(DisasContext
*s
, TCGv_i32 var
)
963 /* Generate the same code here as for a simple bx, but flag via
964 * s->base.is_jmp that we need to do the rest of the work later.
967 if (arm_dc_feature(s
, ARM_FEATURE_M_SECURITY
) ||
968 (s
->v7m_handler_mode
&& arm_dc_feature(s
, ARM_FEATURE_M
))) {
969 s
->base
.is_jmp
= DISAS_BX_EXCRET
;
973 static inline void gen_bx_excret_final_code(DisasContext
*s
)
975 /* Generate the code to finish possible exception return and end the TB */
976 TCGLabel
*excret_label
= gen_new_label();
979 if (arm_dc_feature(s
, ARM_FEATURE_M_SECURITY
)) {
980 /* Covers FNC_RETURN and EXC_RETURN magic */
981 min_magic
= FNC_RETURN_MIN_MAGIC
;
983 /* EXC_RETURN magic only */
984 min_magic
= EXC_RETURN_MIN_MAGIC
;
987 /* Is the new PC value in the magic range indicating exception return? */
988 tcg_gen_brcondi_i32(TCG_COND_GEU
, cpu_R
[15], min_magic
, excret_label
);
989 /* No: end the TB as we would for a DISAS_JMP */
990 if (is_singlestepping(s
)) {
991 gen_singlestep_exception(s
);
995 gen_set_label(excret_label
);
996 /* Yes: this is an exception return.
997 * At this point in runtime env->regs[15] and env->thumb will hold
998 * the exception-return magic number, which do_v7m_exception_exit()
999 * will read. Nothing else will be able to see those values because
1000 * the cpu-exec main loop guarantees that we will always go straight
1001 * from raising the exception to the exception-handling code.
1003 * gen_ss_advance(s) does nothing on M profile currently but
1004 * calling it is conceptually the right thing as we have executed
1005 * this instruction (compare SWI, HVC, SMC handling).
1008 gen_exception_internal(EXCP_EXCEPTION_EXIT
);
1011 static inline void gen_bxns(DisasContext
*s
, int rm
)
1013 TCGv_i32 var
= load_reg(s
, rm
);
1015 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1016 * we need to sync state before calling it, but:
1017 * - we don't need to do gen_set_pc_im() because the bxns helper will
1018 * always set the PC itself
1019 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1020 * unless it's outside an IT block or the last insn in an IT block,
1021 * so we know that condexec == 0 (already set at the top of the TB)
1022 * is correct in the non-UNPREDICTABLE cases, and we can choose
1023 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1025 gen_helper_v7m_bxns(cpu_env
, var
);
1026 tcg_temp_free_i32(var
);
1027 s
->base
.is_jmp
= DISAS_EXIT
;
1030 static inline void gen_blxns(DisasContext
*s
, int rm
)
1032 TCGv_i32 var
= load_reg(s
, rm
);
1034 /* We don't need to sync condexec state, for the same reason as bxns.
1035 * We do however need to set the PC, because the blxns helper reads it.
1036 * The blxns helper may throw an exception.
1038 gen_set_pc_im(s
, s
->pc
);
1039 gen_helper_v7m_blxns(cpu_env
, var
);
1040 tcg_temp_free_i32(var
);
1041 s
->base
.is_jmp
= DISAS_EXIT
;
1044 /* Variant of store_reg which uses branch&exchange logic when storing
1045 to r15 in ARM architecture v7 and above. The source must be a temporary
1046 and will be marked as dead. */
1047 static inline void store_reg_bx(DisasContext
*s
, int reg
, TCGv_i32 var
)
1049 if (reg
== 15 && ENABLE_ARCH_7
) {
1052 store_reg(s
, reg
, var
);
1056 /* Variant of store_reg which uses branch&exchange logic when storing
1057 * to r15 in ARM architecture v5T and above. This is used for storing
1058 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1059 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1060 static inline void store_reg_from_load(DisasContext
*s
, int reg
, TCGv_i32 var
)
1062 if (reg
== 15 && ENABLE_ARCH_5
) {
1063 gen_bx_excret(s
, var
);
1065 store_reg(s
, reg
, var
);
1069 #ifdef CONFIG_USER_ONLY
1070 #define IS_USER_ONLY 1
1072 #define IS_USER_ONLY 0
1075 /* Abstractions of "generate code to do a guest load/store for
1076 * AArch32", where a vaddr is always 32 bits (and is zero
1077 * extended if we're a 64 bit core) and data is also
1078 * 32 bits unless specifically doing a 64 bit access.
1079 * These functions work like tcg_gen_qemu_{ld,st}* except
1080 * that the address argument is TCGv_i32 rather than TCGv.
1083 static inline TCGv
gen_aa32_addr(DisasContext
*s
, TCGv_i32 a32
, TCGMemOp op
)
1085 TCGv addr
= tcg_temp_new();
1086 tcg_gen_extu_i32_tl(addr
, a32
);
1088 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1089 if (!IS_USER_ONLY
&& s
->sctlr_b
&& (op
& MO_SIZE
) < MO_32
) {
1090 tcg_gen_xori_tl(addr
, addr
, 4 - (1 << (op
& MO_SIZE
)));
1095 static void gen_aa32_ld_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
1096 int index
, TCGMemOp opc
)
1098 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1099 tcg_gen_qemu_ld_i32(val
, addr
, index
, opc
);
1100 tcg_temp_free(addr
);
1103 static void gen_aa32_st_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
1104 int index
, TCGMemOp opc
)
1106 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1107 tcg_gen_qemu_st_i32(val
, addr
, index
, opc
);
1108 tcg_temp_free(addr
);
1111 #define DO_GEN_LD(SUFF, OPC) \
1112 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1113 TCGv_i32 a32, int index) \
1115 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1117 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1119 TCGv_i32 a32, int index, \
1122 gen_aa32_ld##SUFF(s, val, a32, index); \
1123 disas_set_da_iss(s, OPC, issinfo); \
1126 #define DO_GEN_ST(SUFF, OPC) \
1127 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1128 TCGv_i32 a32, int index) \
1130 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1132 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1134 TCGv_i32 a32, int index, \
1137 gen_aa32_st##SUFF(s, val, a32, index); \
1138 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1141 static inline void gen_aa32_frob64(DisasContext
*s
, TCGv_i64 val
)
1143 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1144 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
1145 tcg_gen_rotri_i64(val
, val
, 32);
1149 static void gen_aa32_ld_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
1150 int index
, TCGMemOp opc
)
1152 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1153 tcg_gen_qemu_ld_i64(val
, addr
, index
, opc
);
1154 gen_aa32_frob64(s
, val
);
1155 tcg_temp_free(addr
);
1158 static inline void gen_aa32_ld64(DisasContext
*s
, TCGv_i64 val
,
1159 TCGv_i32 a32
, int index
)
1161 gen_aa32_ld_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1164 static void gen_aa32_st_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
1165 int index
, TCGMemOp opc
)
1167 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1169 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1170 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
1171 TCGv_i64 tmp
= tcg_temp_new_i64();
1172 tcg_gen_rotri_i64(tmp
, val
, 32);
1173 tcg_gen_qemu_st_i64(tmp
, addr
, index
, opc
);
1174 tcg_temp_free_i64(tmp
);
1176 tcg_gen_qemu_st_i64(val
, addr
, index
, opc
);
1178 tcg_temp_free(addr
);
1181 static inline void gen_aa32_st64(DisasContext
*s
, TCGv_i64 val
,
1182 TCGv_i32 a32
, int index
)
1184 gen_aa32_st_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1187 DO_GEN_LD(8s
, MO_SB
)
1188 DO_GEN_LD(8u, MO_UB
)
1189 DO_GEN_LD(16s
, MO_SW
)
1190 DO_GEN_LD(16u, MO_UW
)
1191 DO_GEN_LD(32u, MO_UL
)
1193 DO_GEN_ST(16, MO_UW
)
1194 DO_GEN_ST(32, MO_UL
)
1196 static inline void gen_hvc(DisasContext
*s
, int imm16
)
1198 /* The pre HVC helper handles cases when HVC gets trapped
1199 * as an undefined insn by runtime configuration (ie before
1200 * the insn really executes).
1202 gen_set_pc_im(s
, s
->pc
- 4);
1203 gen_helper_pre_hvc(cpu_env
);
1204 /* Otherwise we will treat this as a real exception which
1205 * happens after execution of the insn. (The distinction matters
1206 * for the PC value reported to the exception handler and also
1207 * for single stepping.)
1210 gen_set_pc_im(s
, s
->pc
);
1211 s
->base
.is_jmp
= DISAS_HVC
;
1214 static inline void gen_smc(DisasContext
*s
)
1216 /* As with HVC, we may take an exception either before or after
1217 * the insn executes.
1221 gen_set_pc_im(s
, s
->pc
- 4);
1222 tmp
= tcg_const_i32(syn_aa32_smc());
1223 gen_helper_pre_smc(cpu_env
, tmp
);
1224 tcg_temp_free_i32(tmp
);
1225 gen_set_pc_im(s
, s
->pc
);
1226 s
->base
.is_jmp
= DISAS_SMC
;
1229 static void gen_exception_internal_insn(DisasContext
*s
, int offset
, int excp
)
1231 gen_set_condexec(s
);
1232 gen_set_pc_im(s
, s
->pc
- offset
);
1233 gen_exception_internal(excp
);
1234 s
->base
.is_jmp
= DISAS_NORETURN
;
1237 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
,
1238 int syn
, uint32_t target_el
)
1240 gen_set_condexec(s
);
1241 gen_set_pc_im(s
, s
->pc
- offset
);
1242 gen_exception(excp
, syn
, target_el
);
1243 s
->base
.is_jmp
= DISAS_NORETURN
;
1246 /* Force a TB lookup after an instruction that changes the CPU state. */
1247 static inline void gen_lookup_tb(DisasContext
*s
)
1249 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
1250 s
->base
.is_jmp
= DISAS_EXIT
;
1253 static inline void gen_hlt(DisasContext
*s
, int imm
)
1255 /* HLT. This has two purposes.
1256 * Architecturally, it is an external halting debug instruction.
1257 * Since QEMU doesn't implement external debug, we treat this as
1258 * it is required for halting debug disabled: it will UNDEF.
1259 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1260 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1261 * must trigger semihosting even for ARMv7 and earlier, where
1262 * HLT was an undefined encoding.
1263 * In system mode, we don't allow userspace access to
1264 * semihosting, to provide some semblance of security
1265 * (and for consistency with our 32-bit semihosting).
1267 if (semihosting_enabled() &&
1268 #ifndef CONFIG_USER_ONLY
1269 s
->current_el
!= 0 &&
1271 (imm
== (s
->thumb ?
0x3c : 0xf000))) {
1272 gen_exception_internal_insn(s
, 0, EXCP_SEMIHOST
);
1276 gen_exception_insn(s
, s
->thumb ?
2 : 4, EXCP_UDEF
, syn_uncategorized(),
1277 default_exception_el(s
));
1280 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
1283 int val
, rm
, shift
, shiftop
;
1286 if (!(insn
& (1 << 25))) {
1289 if (!(insn
& (1 << 23)))
1292 tcg_gen_addi_i32(var
, var
, val
);
1294 /* shift/register */
1296 shift
= (insn
>> 7) & 0x1f;
1297 shiftop
= (insn
>> 5) & 3;
1298 offset
= load_reg(s
, rm
);
1299 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
1300 if (!(insn
& (1 << 23)))
1301 tcg_gen_sub_i32(var
, var
, offset
);
1303 tcg_gen_add_i32(var
, var
, offset
);
1304 tcg_temp_free_i32(offset
);
1308 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
1309 int extra
, TCGv_i32 var
)
1314 if (insn
& (1 << 22)) {
1316 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
1317 if (!(insn
& (1 << 23)))
1321 tcg_gen_addi_i32(var
, var
, val
);
1325 tcg_gen_addi_i32(var
, var
, extra
);
1327 offset
= load_reg(s
, rm
);
1328 if (!(insn
& (1 << 23)))
1329 tcg_gen_sub_i32(var
, var
, offset
);
1331 tcg_gen_add_i32(var
, var
, offset
);
1332 tcg_temp_free_i32(offset
);
1336 static TCGv_ptr
get_fpstatus_ptr(int neon
)
1338 TCGv_ptr statusptr
= tcg_temp_new_ptr();
1341 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
1343 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
1345 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
1349 #define VFP_OP2(name) \
1350 static inline void gen_vfp_##name(int dp) \
1352 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1354 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1356 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1358 tcg_temp_free_ptr(fpst); \
1368 static inline void gen_vfp_F1_mul(int dp
)
1370 /* Like gen_vfp_mul() but put result in F1 */
1371 TCGv_ptr fpst
= get_fpstatus_ptr(0);
1373 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
1375 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
1377 tcg_temp_free_ptr(fpst
);
1380 static inline void gen_vfp_F1_neg(int dp
)
1382 /* Like gen_vfp_neg() but put result in F1 */
1384 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
1386 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
1390 static inline void gen_vfp_abs(int dp
)
1393 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
1395 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
1398 static inline void gen_vfp_neg(int dp
)
1401 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
1403 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
1406 static inline void gen_vfp_sqrt(int dp
)
1409 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
1411 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
1414 static inline void gen_vfp_cmp(int dp
)
1417 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1419 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1422 static inline void gen_vfp_cmpe(int dp
)
1425 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1427 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1430 static inline void gen_vfp_F1_ld0(int dp
)
1433 tcg_gen_movi_i64(cpu_F1d
, 0);
1435 tcg_gen_movi_i32(cpu_F1s
, 0);
1438 #define VFP_GEN_ITOF(name) \
1439 static inline void gen_vfp_##name(int dp, int neon) \
1441 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1443 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1445 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1447 tcg_temp_free_ptr(statusptr); \
1454 #define VFP_GEN_FTOI(name) \
1455 static inline void gen_vfp_##name(int dp, int neon) \
1457 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1459 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1461 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1463 tcg_temp_free_ptr(statusptr); \
1472 #define VFP_GEN_FIX(name, round) \
1473 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1475 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1476 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1478 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1481 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1484 tcg_temp_free_i32(tmp_shift); \
1485 tcg_temp_free_ptr(statusptr); \
1487 VFP_GEN_FIX(tosh
, _round_to_zero
)
1488 VFP_GEN_FIX(tosl
, _round_to_zero
)
1489 VFP_GEN_FIX(touh
, _round_to_zero
)
1490 VFP_GEN_FIX(toul
, _round_to_zero
)
1497 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1500 gen_aa32_ld64(s
, cpu_F0d
, addr
, get_mem_index(s
));
1502 gen_aa32_ld32u(s
, cpu_F0s
, addr
, get_mem_index(s
));
1506 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1509 gen_aa32_st64(s
, cpu_F0d
, addr
, get_mem_index(s
));
1511 gen_aa32_st32(s
, cpu_F0s
, addr
, get_mem_index(s
));
1516 vfp_reg_offset (int dp
, int reg
)
1519 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1521 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1522 + offsetof(CPU_DoubleU
, l
.upper
);
1524 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1525 + offsetof(CPU_DoubleU
, l
.lower
);
1529 /* Return the offset of a 32-bit piece of a NEON register.
1530 zero is the least significant end of the register. */
1532 neon_reg_offset (int reg
, int n
)
1536 return vfp_reg_offset(0, sreg
);
1539 static TCGv_i32
neon_load_reg(int reg
, int pass
)
1541 TCGv_i32 tmp
= tcg_temp_new_i32();
1542 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1546 static void neon_store_reg(int reg
, int pass
, TCGv_i32 var
)
1548 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1549 tcg_temp_free_i32(var
);
1552 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1554 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1557 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1559 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1562 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1563 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1564 #define tcg_gen_st_f32 tcg_gen_st_i32
1565 #define tcg_gen_st_f64 tcg_gen_st_i64
1567 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1570 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1572 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1575 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1578 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1580 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1583 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1586 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1588 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1591 #define ARM_CP_RW_BIT (1 << 20)
1593 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1595 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1598 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1600 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1603 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1605 TCGv_i32 var
= tcg_temp_new_i32();
1606 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1610 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1612 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1613 tcg_temp_free_i32(var
);
1616 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1618 iwmmxt_store_reg(cpu_M0
, rn
);
1621 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1623 iwmmxt_load_reg(cpu_M0
, rn
);
1626 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1628 iwmmxt_load_reg(cpu_V1
, rn
);
1629 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1632 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1634 iwmmxt_load_reg(cpu_V1
, rn
);
1635 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1638 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1640 iwmmxt_load_reg(cpu_V1
, rn
);
1641 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1644 #define IWMMXT_OP(name) \
1645 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1647 iwmmxt_load_reg(cpu_V1, rn); \
1648 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1651 #define IWMMXT_OP_ENV(name) \
1652 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1654 iwmmxt_load_reg(cpu_V1, rn); \
1655 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1658 #define IWMMXT_OP_ENV_SIZE(name) \
1659 IWMMXT_OP_ENV(name##b) \
1660 IWMMXT_OP_ENV(name##w) \
1661 IWMMXT_OP_ENV(name##l)
1663 #define IWMMXT_OP_ENV1(name) \
1664 static inline void gen_op_iwmmxt_##name##_M0(void) \
1666 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1680 IWMMXT_OP_ENV_SIZE(unpackl
)
1681 IWMMXT_OP_ENV_SIZE(unpackh
)
1683 IWMMXT_OP_ENV1(unpacklub
)
1684 IWMMXT_OP_ENV1(unpackluw
)
1685 IWMMXT_OP_ENV1(unpacklul
)
1686 IWMMXT_OP_ENV1(unpackhub
)
1687 IWMMXT_OP_ENV1(unpackhuw
)
1688 IWMMXT_OP_ENV1(unpackhul
)
1689 IWMMXT_OP_ENV1(unpacklsb
)
1690 IWMMXT_OP_ENV1(unpacklsw
)
1691 IWMMXT_OP_ENV1(unpacklsl
)
1692 IWMMXT_OP_ENV1(unpackhsb
)
1693 IWMMXT_OP_ENV1(unpackhsw
)
1694 IWMMXT_OP_ENV1(unpackhsl
)
1696 IWMMXT_OP_ENV_SIZE(cmpeq
)
1697 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1698 IWMMXT_OP_ENV_SIZE(cmpgts
)
1700 IWMMXT_OP_ENV_SIZE(mins
)
1701 IWMMXT_OP_ENV_SIZE(minu
)
1702 IWMMXT_OP_ENV_SIZE(maxs
)
1703 IWMMXT_OP_ENV_SIZE(maxu
)
1705 IWMMXT_OP_ENV_SIZE(subn
)
1706 IWMMXT_OP_ENV_SIZE(addn
)
1707 IWMMXT_OP_ENV_SIZE(subu
)
1708 IWMMXT_OP_ENV_SIZE(addu
)
1709 IWMMXT_OP_ENV_SIZE(subs
)
1710 IWMMXT_OP_ENV_SIZE(adds
)
1712 IWMMXT_OP_ENV(avgb0
)
1713 IWMMXT_OP_ENV(avgb1
)
1714 IWMMXT_OP_ENV(avgw0
)
1715 IWMMXT_OP_ENV(avgw1
)
1717 IWMMXT_OP_ENV(packuw
)
1718 IWMMXT_OP_ENV(packul
)
1719 IWMMXT_OP_ENV(packuq
)
1720 IWMMXT_OP_ENV(packsw
)
1721 IWMMXT_OP_ENV(packsl
)
1722 IWMMXT_OP_ENV(packsq
)
1724 static void gen_op_iwmmxt_set_mup(void)
1727 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1728 tcg_gen_ori_i32(tmp
, tmp
, 2);
1729 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1732 static void gen_op_iwmmxt_set_cup(void)
1735 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1736 tcg_gen_ori_i32(tmp
, tmp
, 1);
1737 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1740 static void gen_op_iwmmxt_setpsr_nz(void)
1742 TCGv_i32 tmp
= tcg_temp_new_i32();
1743 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1744 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1747 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1749 iwmmxt_load_reg(cpu_V1
, rn
);
1750 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1751 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1754 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1761 rd
= (insn
>> 16) & 0xf;
1762 tmp
= load_reg(s
, rd
);
1764 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1765 if (insn
& (1 << 24)) {
1767 if (insn
& (1 << 23))
1768 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1770 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1771 tcg_gen_mov_i32(dest
, tmp
);
1772 if (insn
& (1 << 21))
1773 store_reg(s
, rd
, tmp
);
1775 tcg_temp_free_i32(tmp
);
1776 } else if (insn
& (1 << 21)) {
1778 tcg_gen_mov_i32(dest
, tmp
);
1779 if (insn
& (1 << 23))
1780 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1782 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1783 store_reg(s
, rd
, tmp
);
1784 } else if (!(insn
& (1 << 23)))
1789 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1791 int rd
= (insn
>> 0) & 0xf;
1794 if (insn
& (1 << 8)) {
1795 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1798 tmp
= iwmmxt_load_creg(rd
);
1801 tmp
= tcg_temp_new_i32();
1802 iwmmxt_load_reg(cpu_V0
, rd
);
1803 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
1805 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1806 tcg_gen_mov_i32(dest
, tmp
);
1807 tcg_temp_free_i32(tmp
);
1811 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1812 (ie. an undefined instruction). */
1813 static int disas_iwmmxt_insn(DisasContext
*s
, uint32_t insn
)
1816 int rdhi
, rdlo
, rd0
, rd1
, i
;
1818 TCGv_i32 tmp
, tmp2
, tmp3
;
1820 if ((insn
& 0x0e000e00) == 0x0c000000) {
1821 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1823 rdlo
= (insn
>> 12) & 0xf;
1824 rdhi
= (insn
>> 16) & 0xf;
1825 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1826 iwmmxt_load_reg(cpu_V0
, wrd
);
1827 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1828 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1829 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1830 } else { /* TMCRR */
1831 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1832 iwmmxt_store_reg(cpu_V0
, wrd
);
1833 gen_op_iwmmxt_set_mup();
1838 wrd
= (insn
>> 12) & 0xf;
1839 addr
= tcg_temp_new_i32();
1840 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1841 tcg_temp_free_i32(addr
);
1844 if (insn
& ARM_CP_RW_BIT
) {
1845 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1846 tmp
= tcg_temp_new_i32();
1847 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1848 iwmmxt_store_creg(wrd
, tmp
);
1851 if (insn
& (1 << 8)) {
1852 if (insn
& (1 << 22)) { /* WLDRD */
1853 gen_aa32_ld64(s
, cpu_M0
, addr
, get_mem_index(s
));
1855 } else { /* WLDRW wRd */
1856 tmp
= tcg_temp_new_i32();
1857 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1860 tmp
= tcg_temp_new_i32();
1861 if (insn
& (1 << 22)) { /* WLDRH */
1862 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
1863 } else { /* WLDRB */
1864 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
1868 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1869 tcg_temp_free_i32(tmp
);
1871 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1874 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1875 tmp
= iwmmxt_load_creg(wrd
);
1876 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1878 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1879 tmp
= tcg_temp_new_i32();
1880 if (insn
& (1 << 8)) {
1881 if (insn
& (1 << 22)) { /* WSTRD */
1882 gen_aa32_st64(s
, cpu_M0
, addr
, get_mem_index(s
));
1883 } else { /* WSTRW wRd */
1884 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1885 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1888 if (insn
& (1 << 22)) { /* WSTRH */
1889 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1890 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
1891 } else { /* WSTRB */
1892 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1893 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
1897 tcg_temp_free_i32(tmp
);
1899 tcg_temp_free_i32(addr
);
1903 if ((insn
& 0x0f000000) != 0x0e000000)
1906 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1907 case 0x000: /* WOR */
1908 wrd
= (insn
>> 12) & 0xf;
1909 rd0
= (insn
>> 0) & 0xf;
1910 rd1
= (insn
>> 16) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1912 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1913 gen_op_iwmmxt_setpsr_nz();
1914 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1915 gen_op_iwmmxt_set_mup();
1916 gen_op_iwmmxt_set_cup();
1918 case 0x011: /* TMCR */
1921 rd
= (insn
>> 12) & 0xf;
1922 wrd
= (insn
>> 16) & 0xf;
1924 case ARM_IWMMXT_wCID
:
1925 case ARM_IWMMXT_wCASF
:
1927 case ARM_IWMMXT_wCon
:
1928 gen_op_iwmmxt_set_cup();
1930 case ARM_IWMMXT_wCSSF
:
1931 tmp
= iwmmxt_load_creg(wrd
);
1932 tmp2
= load_reg(s
, rd
);
1933 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1934 tcg_temp_free_i32(tmp2
);
1935 iwmmxt_store_creg(wrd
, tmp
);
1937 case ARM_IWMMXT_wCGR0
:
1938 case ARM_IWMMXT_wCGR1
:
1939 case ARM_IWMMXT_wCGR2
:
1940 case ARM_IWMMXT_wCGR3
:
1941 gen_op_iwmmxt_set_cup();
1942 tmp
= load_reg(s
, rd
);
1943 iwmmxt_store_creg(wrd
, tmp
);
1949 case 0x100: /* WXOR */
1950 wrd
= (insn
>> 12) & 0xf;
1951 rd0
= (insn
>> 0) & 0xf;
1952 rd1
= (insn
>> 16) & 0xf;
1953 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1954 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1955 gen_op_iwmmxt_setpsr_nz();
1956 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1957 gen_op_iwmmxt_set_mup();
1958 gen_op_iwmmxt_set_cup();
1960 case 0x111: /* TMRC */
1963 rd
= (insn
>> 12) & 0xf;
1964 wrd
= (insn
>> 16) & 0xf;
1965 tmp
= iwmmxt_load_creg(wrd
);
1966 store_reg(s
, rd
, tmp
);
1968 case 0x300: /* WANDN */
1969 wrd
= (insn
>> 12) & 0xf;
1970 rd0
= (insn
>> 0) & 0xf;
1971 rd1
= (insn
>> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1973 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1974 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1975 gen_op_iwmmxt_setpsr_nz();
1976 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1977 gen_op_iwmmxt_set_mup();
1978 gen_op_iwmmxt_set_cup();
1980 case 0x200: /* WAND */
1981 wrd
= (insn
>> 12) & 0xf;
1982 rd0
= (insn
>> 0) & 0xf;
1983 rd1
= (insn
>> 16) & 0xf;
1984 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1985 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1986 gen_op_iwmmxt_setpsr_nz();
1987 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1988 gen_op_iwmmxt_set_mup();
1989 gen_op_iwmmxt_set_cup();
1991 case 0x810: case 0xa10: /* WMADD */
1992 wrd
= (insn
>> 12) & 0xf;
1993 rd0
= (insn
>> 0) & 0xf;
1994 rd1
= (insn
>> 16) & 0xf;
1995 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1996 if (insn
& (1 << 21))
1997 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1999 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
2000 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2001 gen_op_iwmmxt_set_mup();
2003 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2004 wrd
= (insn
>> 12) & 0xf;
2005 rd0
= (insn
>> 16) & 0xf;
2006 rd1
= (insn
>> 0) & 0xf;
2007 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2008 switch ((insn
>> 22) & 3) {
2010 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
2013 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
2016 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
2021 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2022 gen_op_iwmmxt_set_mup();
2023 gen_op_iwmmxt_set_cup();
2025 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2026 wrd
= (insn
>> 12) & 0xf;
2027 rd0
= (insn
>> 16) & 0xf;
2028 rd1
= (insn
>> 0) & 0xf;
2029 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2030 switch ((insn
>> 22) & 3) {
2032 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
2035 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
2038 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
2043 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2044 gen_op_iwmmxt_set_mup();
2045 gen_op_iwmmxt_set_cup();
2047 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2048 wrd
= (insn
>> 12) & 0xf;
2049 rd0
= (insn
>> 16) & 0xf;
2050 rd1
= (insn
>> 0) & 0xf;
2051 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2052 if (insn
& (1 << 22))
2053 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
2055 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
2056 if (!(insn
& (1 << 20)))
2057 gen_op_iwmmxt_addl_M0_wRn(wrd
);
2058 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2059 gen_op_iwmmxt_set_mup();
2061 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2062 wrd
= (insn
>> 12) & 0xf;
2063 rd0
= (insn
>> 16) & 0xf;
2064 rd1
= (insn
>> 0) & 0xf;
2065 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2066 if (insn
& (1 << 21)) {
2067 if (insn
& (1 << 20))
2068 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
2070 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
2072 if (insn
& (1 << 20))
2073 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
2075 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
2077 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2078 gen_op_iwmmxt_set_mup();
2080 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2081 wrd
= (insn
>> 12) & 0xf;
2082 rd0
= (insn
>> 16) & 0xf;
2083 rd1
= (insn
>> 0) & 0xf;
2084 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2085 if (insn
& (1 << 21))
2086 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
2088 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
2089 if (!(insn
& (1 << 20))) {
2090 iwmmxt_load_reg(cpu_V1
, wrd
);
2091 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
2093 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2094 gen_op_iwmmxt_set_mup();
2096 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2097 wrd
= (insn
>> 12) & 0xf;
2098 rd0
= (insn
>> 16) & 0xf;
2099 rd1
= (insn
>> 0) & 0xf;
2100 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2101 switch ((insn
>> 22) & 3) {
2103 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
2106 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
2109 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
2114 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2115 gen_op_iwmmxt_set_mup();
2116 gen_op_iwmmxt_set_cup();
2118 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2119 wrd
= (insn
>> 12) & 0xf;
2120 rd0
= (insn
>> 16) & 0xf;
2121 rd1
= (insn
>> 0) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2123 if (insn
& (1 << 22)) {
2124 if (insn
& (1 << 20))
2125 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
2127 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
2129 if (insn
& (1 << 20))
2130 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
2132 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
2134 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2135 gen_op_iwmmxt_set_mup();
2136 gen_op_iwmmxt_set_cup();
2138 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2139 wrd
= (insn
>> 12) & 0xf;
2140 rd0
= (insn
>> 16) & 0xf;
2141 rd1
= (insn
>> 0) & 0xf;
2142 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2143 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
2144 tcg_gen_andi_i32(tmp
, tmp
, 7);
2145 iwmmxt_load_reg(cpu_V1
, rd1
);
2146 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2147 tcg_temp_free_i32(tmp
);
2148 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2149 gen_op_iwmmxt_set_mup();
2151 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2152 if (((insn
>> 6) & 3) == 3)
2154 rd
= (insn
>> 12) & 0xf;
2155 wrd
= (insn
>> 16) & 0xf;
2156 tmp
= load_reg(s
, rd
);
2157 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2158 switch ((insn
>> 6) & 3) {
2160 tmp2
= tcg_const_i32(0xff);
2161 tmp3
= tcg_const_i32((insn
& 7) << 3);
2164 tmp2
= tcg_const_i32(0xffff);
2165 tmp3
= tcg_const_i32((insn
& 3) << 4);
2168 tmp2
= tcg_const_i32(0xffffffff);
2169 tmp3
= tcg_const_i32((insn
& 1) << 5);
2172 TCGV_UNUSED_I32(tmp2
);
2173 TCGV_UNUSED_I32(tmp3
);
2175 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
2176 tcg_temp_free_i32(tmp3
);
2177 tcg_temp_free_i32(tmp2
);
2178 tcg_temp_free_i32(tmp
);
2179 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2180 gen_op_iwmmxt_set_mup();
2182 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2183 rd
= (insn
>> 12) & 0xf;
2184 wrd
= (insn
>> 16) & 0xf;
2185 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
2187 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2188 tmp
= tcg_temp_new_i32();
2189 switch ((insn
>> 22) & 3) {
2191 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
2192 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2194 tcg_gen_ext8s_i32(tmp
, tmp
);
2196 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
2200 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
2201 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2203 tcg_gen_ext16s_i32(tmp
, tmp
);
2205 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
2209 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
2210 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2213 store_reg(s
, rd
, tmp
);
2215 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2216 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2218 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2219 switch ((insn
>> 22) & 3) {
2221 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
2224 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
2227 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
2230 tcg_gen_shli_i32(tmp
, tmp
, 28);
2232 tcg_temp_free_i32(tmp
);
2234 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2235 if (((insn
>> 6) & 3) == 3)
2237 rd
= (insn
>> 12) & 0xf;
2238 wrd
= (insn
>> 16) & 0xf;
2239 tmp
= load_reg(s
, rd
);
2240 switch ((insn
>> 6) & 3) {
2242 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
2245 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
2248 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
2251 tcg_temp_free_i32(tmp
);
2252 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2253 gen_op_iwmmxt_set_mup();
2255 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2256 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2258 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2259 tmp2
= tcg_temp_new_i32();
2260 tcg_gen_mov_i32(tmp2
, tmp
);
2261 switch ((insn
>> 22) & 3) {
2263 for (i
= 0; i
< 7; i
++) {
2264 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2265 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2269 for (i
= 0; i
< 3; i
++) {
2270 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2271 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2275 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2276 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2280 tcg_temp_free_i32(tmp2
);
2281 tcg_temp_free_i32(tmp
);
2283 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2284 wrd
= (insn
>> 12) & 0xf;
2285 rd0
= (insn
>> 16) & 0xf;
2286 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2287 switch ((insn
>> 22) & 3) {
2289 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
2292 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
2295 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
2300 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2301 gen_op_iwmmxt_set_mup();
2303 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2304 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2306 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2307 tmp2
= tcg_temp_new_i32();
2308 tcg_gen_mov_i32(tmp2
, tmp
);
2309 switch ((insn
>> 22) & 3) {
2311 for (i
= 0; i
< 7; i
++) {
2312 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2313 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2317 for (i
= 0; i
< 3; i
++) {
2318 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2319 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2323 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2324 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2328 tcg_temp_free_i32(tmp2
);
2329 tcg_temp_free_i32(tmp
);
2331 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2332 rd
= (insn
>> 12) & 0xf;
2333 rd0
= (insn
>> 16) & 0xf;
2334 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
2336 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2337 tmp
= tcg_temp_new_i32();
2338 switch ((insn
>> 22) & 3) {
2340 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
2343 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
2346 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
2349 store_reg(s
, rd
, tmp
);
2351 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2352 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2353 wrd
= (insn
>> 12) & 0xf;
2354 rd0
= (insn
>> 16) & 0xf;
2355 rd1
= (insn
>> 0) & 0xf;
2356 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2357 switch ((insn
>> 22) & 3) {
2359 if (insn
& (1 << 21))
2360 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2362 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2365 if (insn
& (1 << 21))
2366 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2368 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2371 if (insn
& (1 << 21))
2372 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2374 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2379 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2380 gen_op_iwmmxt_set_mup();
2381 gen_op_iwmmxt_set_cup();
2383 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2384 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2385 wrd
= (insn
>> 12) & 0xf;
2386 rd0
= (insn
>> 16) & 0xf;
2387 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2388 switch ((insn
>> 22) & 3) {
2390 if (insn
& (1 << 21))
2391 gen_op_iwmmxt_unpacklsb_M0();
2393 gen_op_iwmmxt_unpacklub_M0();
2396 if (insn
& (1 << 21))
2397 gen_op_iwmmxt_unpacklsw_M0();
2399 gen_op_iwmmxt_unpackluw_M0();
2402 if (insn
& (1 << 21))
2403 gen_op_iwmmxt_unpacklsl_M0();
2405 gen_op_iwmmxt_unpacklul_M0();
2410 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2411 gen_op_iwmmxt_set_mup();
2412 gen_op_iwmmxt_set_cup();
2414 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2415 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2416 wrd
= (insn
>> 12) & 0xf;
2417 rd0
= (insn
>> 16) & 0xf;
2418 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2419 switch ((insn
>> 22) & 3) {
2421 if (insn
& (1 << 21))
2422 gen_op_iwmmxt_unpackhsb_M0();
2424 gen_op_iwmmxt_unpackhub_M0();
2427 if (insn
& (1 << 21))
2428 gen_op_iwmmxt_unpackhsw_M0();
2430 gen_op_iwmmxt_unpackhuw_M0();
2433 if (insn
& (1 << 21))
2434 gen_op_iwmmxt_unpackhsl_M0();
2436 gen_op_iwmmxt_unpackhul_M0();
2441 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2442 gen_op_iwmmxt_set_mup();
2443 gen_op_iwmmxt_set_cup();
2445 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2446 case 0x214: case 0x614: case 0xa14: case 0xe14:
2447 if (((insn
>> 22) & 3) == 0)
2449 wrd
= (insn
>> 12) & 0xf;
2450 rd0
= (insn
>> 16) & 0xf;
2451 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2452 tmp
= tcg_temp_new_i32();
2453 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2454 tcg_temp_free_i32(tmp
);
2457 switch ((insn
>> 22) & 3) {
2459 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2462 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2465 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2468 tcg_temp_free_i32(tmp
);
2469 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2470 gen_op_iwmmxt_set_mup();
2471 gen_op_iwmmxt_set_cup();
2473 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2474 case 0x014: case 0x414: case 0x814: case 0xc14:
2475 if (((insn
>> 22) & 3) == 0)
2477 wrd
= (insn
>> 12) & 0xf;
2478 rd0
= (insn
>> 16) & 0xf;
2479 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2480 tmp
= tcg_temp_new_i32();
2481 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2482 tcg_temp_free_i32(tmp
);
2485 switch ((insn
>> 22) & 3) {
2487 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2490 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2493 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2496 tcg_temp_free_i32(tmp
);
2497 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2498 gen_op_iwmmxt_set_mup();
2499 gen_op_iwmmxt_set_cup();
2501 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2502 case 0x114: case 0x514: case 0x914: case 0xd14:
2503 if (((insn
>> 22) & 3) == 0)
2505 wrd
= (insn
>> 12) & 0xf;
2506 rd0
= (insn
>> 16) & 0xf;
2507 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2508 tmp
= tcg_temp_new_i32();
2509 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2510 tcg_temp_free_i32(tmp
);
2513 switch ((insn
>> 22) & 3) {
2515 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2518 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2521 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2524 tcg_temp_free_i32(tmp
);
2525 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2526 gen_op_iwmmxt_set_mup();
2527 gen_op_iwmmxt_set_cup();
2529 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2530 case 0x314: case 0x714: case 0xb14: case 0xf14:
2531 if (((insn
>> 22) & 3) == 0)
2533 wrd
= (insn
>> 12) & 0xf;
2534 rd0
= (insn
>> 16) & 0xf;
2535 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2536 tmp
= tcg_temp_new_i32();
2537 switch ((insn
>> 22) & 3) {
2539 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2540 tcg_temp_free_i32(tmp
);
2543 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2546 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2547 tcg_temp_free_i32(tmp
);
2550 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2553 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2554 tcg_temp_free_i32(tmp
);
2557 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2560 tcg_temp_free_i32(tmp
);
2561 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2562 gen_op_iwmmxt_set_mup();
2563 gen_op_iwmmxt_set_cup();
2565 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2566 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2567 wrd
= (insn
>> 12) & 0xf;
2568 rd0
= (insn
>> 16) & 0xf;
2569 rd1
= (insn
>> 0) & 0xf;
2570 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2571 switch ((insn
>> 22) & 3) {
2573 if (insn
& (1 << 21))
2574 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2576 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2579 if (insn
& (1 << 21))
2580 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2582 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2585 if (insn
& (1 << 21))
2586 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2588 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2593 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2594 gen_op_iwmmxt_set_mup();
2596 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2597 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2598 wrd
= (insn
>> 12) & 0xf;
2599 rd0
= (insn
>> 16) & 0xf;
2600 rd1
= (insn
>> 0) & 0xf;
2601 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2602 switch ((insn
>> 22) & 3) {
2604 if (insn
& (1 << 21))
2605 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2607 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2610 if (insn
& (1 << 21))
2611 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2613 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2616 if (insn
& (1 << 21))
2617 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2619 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2624 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2625 gen_op_iwmmxt_set_mup();
2627 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2628 case 0x402: case 0x502: case 0x602: case 0x702:
2629 wrd
= (insn
>> 12) & 0xf;
2630 rd0
= (insn
>> 16) & 0xf;
2631 rd1
= (insn
>> 0) & 0xf;
2632 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2633 tmp
= tcg_const_i32((insn
>> 20) & 3);
2634 iwmmxt_load_reg(cpu_V1
, rd1
);
2635 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2636 tcg_temp_free_i32(tmp
);
2637 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2638 gen_op_iwmmxt_set_mup();
2640 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2641 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2642 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2643 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2644 wrd
= (insn
>> 12) & 0xf;
2645 rd0
= (insn
>> 16) & 0xf;
2646 rd1
= (insn
>> 0) & 0xf;
2647 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2648 switch ((insn
>> 20) & 0xf) {
2650 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2653 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2656 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2659 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2662 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2665 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2668 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2671 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2674 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2679 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2680 gen_op_iwmmxt_set_mup();
2681 gen_op_iwmmxt_set_cup();
2683 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2684 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2685 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2686 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2687 wrd
= (insn
>> 12) & 0xf;
2688 rd0
= (insn
>> 16) & 0xf;
2689 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2690 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2691 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2692 tcg_temp_free_i32(tmp
);
2693 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2694 gen_op_iwmmxt_set_mup();
2695 gen_op_iwmmxt_set_cup();
2697 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2698 case 0x418: case 0x518: case 0x618: case 0x718:
2699 case 0x818: case 0x918: case 0xa18: case 0xb18:
2700 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2701 wrd
= (insn
>> 12) & 0xf;
2702 rd0
= (insn
>> 16) & 0xf;
2703 rd1
= (insn
>> 0) & 0xf;
2704 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2705 switch ((insn
>> 20) & 0xf) {
2707 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2710 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2713 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2716 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2719 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2722 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2725 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2728 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2731 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2736 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2737 gen_op_iwmmxt_set_mup();
2738 gen_op_iwmmxt_set_cup();
2740 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2741 case 0x408: case 0x508: case 0x608: case 0x708:
2742 case 0x808: case 0x908: case 0xa08: case 0xb08:
2743 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2744 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2746 wrd
= (insn
>> 12) & 0xf;
2747 rd0
= (insn
>> 16) & 0xf;
2748 rd1
= (insn
>> 0) & 0xf;
2749 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2750 switch ((insn
>> 22) & 3) {
2752 if (insn
& (1 << 21))
2753 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2755 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2758 if (insn
& (1 << 21))
2759 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2761 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2764 if (insn
& (1 << 21))
2765 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2767 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2770 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2771 gen_op_iwmmxt_set_mup();
2772 gen_op_iwmmxt_set_cup();
2774 case 0x201: case 0x203: case 0x205: case 0x207:
2775 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2776 case 0x211: case 0x213: case 0x215: case 0x217:
2777 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2778 wrd
= (insn
>> 5) & 0xf;
2779 rd0
= (insn
>> 12) & 0xf;
2780 rd1
= (insn
>> 0) & 0xf;
2781 if (rd0
== 0xf || rd1
== 0xf)
2783 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2784 tmp
= load_reg(s
, rd0
);
2785 tmp2
= load_reg(s
, rd1
);
2786 switch ((insn
>> 16) & 0xf) {
2787 case 0x0: /* TMIA */
2788 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2790 case 0x8: /* TMIAPH */
2791 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2793 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2794 if (insn
& (1 << 16))
2795 tcg_gen_shri_i32(tmp
, tmp
, 16);
2796 if (insn
& (1 << 17))
2797 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2798 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2801 tcg_temp_free_i32(tmp2
);
2802 tcg_temp_free_i32(tmp
);
2805 tcg_temp_free_i32(tmp2
);
2806 tcg_temp_free_i32(tmp
);
2807 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2808 gen_op_iwmmxt_set_mup();
2817 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2818 (ie. an undefined instruction). */
2819 static int disas_dsp_insn(DisasContext
*s
, uint32_t insn
)
2821 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2824 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2825 /* Multiply with Internal Accumulate Format */
2826 rd0
= (insn
>> 12) & 0xf;
2828 acc
= (insn
>> 5) & 7;
2833 tmp
= load_reg(s
, rd0
);
2834 tmp2
= load_reg(s
, rd1
);
2835 switch ((insn
>> 16) & 0xf) {
2837 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2839 case 0x8: /* MIAPH */
2840 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2842 case 0xc: /* MIABB */
2843 case 0xd: /* MIABT */
2844 case 0xe: /* MIATB */
2845 case 0xf: /* MIATT */
2846 if (insn
& (1 << 16))
2847 tcg_gen_shri_i32(tmp
, tmp
, 16);
2848 if (insn
& (1 << 17))
2849 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2850 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2855 tcg_temp_free_i32(tmp2
);
2856 tcg_temp_free_i32(tmp
);
2858 gen_op_iwmmxt_movq_wRn_M0(acc
);
2862 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2863 /* Internal Accumulator Access Format */
2864 rdhi
= (insn
>> 16) & 0xf;
2865 rdlo
= (insn
>> 12) & 0xf;
2871 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2872 iwmmxt_load_reg(cpu_V0
, acc
);
2873 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2874 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2875 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2876 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2878 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2879 iwmmxt_store_reg(cpu_V0
, acc
);
2887 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2888 #define VFP_SREG(insn, bigbit, smallbit) \
2889 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2890 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2891 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2892 reg = (((insn) >> (bigbit)) & 0x0f) \
2893 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2895 if (insn & (1 << (smallbit))) \
2897 reg = ((insn) >> (bigbit)) & 0x0f; \
2900 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2901 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2902 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2903 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2904 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2905 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2907 /* Move between integer and VFP cores. */
2908 static TCGv_i32
gen_vfp_mrs(void)
2910 TCGv_i32 tmp
= tcg_temp_new_i32();
2911 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2915 static void gen_vfp_msr(TCGv_i32 tmp
)
2917 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2918 tcg_temp_free_i32(tmp
);
2921 static void gen_neon_dup_u8(TCGv_i32 var
, int shift
)
2923 TCGv_i32 tmp
= tcg_temp_new_i32();
2925 tcg_gen_shri_i32(var
, var
, shift
);
2926 tcg_gen_ext8u_i32(var
, var
);
2927 tcg_gen_shli_i32(tmp
, var
, 8);
2928 tcg_gen_or_i32(var
, var
, tmp
);
2929 tcg_gen_shli_i32(tmp
, var
, 16);
2930 tcg_gen_or_i32(var
, var
, tmp
);
2931 tcg_temp_free_i32(tmp
);
2934 static void gen_neon_dup_low16(TCGv_i32 var
)
2936 TCGv_i32 tmp
= tcg_temp_new_i32();
2937 tcg_gen_ext16u_i32(var
, var
);
2938 tcg_gen_shli_i32(tmp
, var
, 16);
2939 tcg_gen_or_i32(var
, var
, tmp
);
2940 tcg_temp_free_i32(tmp
);
2943 static void gen_neon_dup_high16(TCGv_i32 var
)
2945 TCGv_i32 tmp
= tcg_temp_new_i32();
2946 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2947 tcg_gen_shri_i32(tmp
, var
, 16);
2948 tcg_gen_or_i32(var
, var
, tmp
);
2949 tcg_temp_free_i32(tmp
);
2952 static TCGv_i32
gen_load_and_replicate(DisasContext
*s
, TCGv_i32 addr
, int size
)
2954 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2955 TCGv_i32 tmp
= tcg_temp_new_i32();
2958 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
2959 gen_neon_dup_u8(tmp
, 0);
2962 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
2963 gen_neon_dup_low16(tmp
);
2966 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
2968 default: /* Avoid compiler warnings. */
2974 static int handle_vsel(uint32_t insn
, uint32_t rd
, uint32_t rn
, uint32_t rm
,
2977 uint32_t cc
= extract32(insn
, 20, 2);
2980 TCGv_i64 frn
, frm
, dest
;
2981 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
2983 zero
= tcg_const_i64(0);
2985 frn
= tcg_temp_new_i64();
2986 frm
= tcg_temp_new_i64();
2987 dest
= tcg_temp_new_i64();
2989 zf
= tcg_temp_new_i64();
2990 nf
= tcg_temp_new_i64();
2991 vf
= tcg_temp_new_i64();
2993 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
2994 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
2995 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
2997 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2998 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
3001 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
3005 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
3008 case 2: /* ge: N == V -> N ^ V == 0 */
3009 tmp
= tcg_temp_new_i64();
3010 tcg_gen_xor_i64(tmp
, vf
, nf
);
3011 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
3013 tcg_temp_free_i64(tmp
);
3015 case 3: /* gt: !Z && N == V */
3016 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
3018 tmp
= tcg_temp_new_i64();
3019 tcg_gen_xor_i64(tmp
, vf
, nf
);
3020 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
3022 tcg_temp_free_i64(tmp
);
3025 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
3026 tcg_temp_free_i64(frn
);
3027 tcg_temp_free_i64(frm
);
3028 tcg_temp_free_i64(dest
);
3030 tcg_temp_free_i64(zf
);
3031 tcg_temp_free_i64(nf
);
3032 tcg_temp_free_i64(vf
);
3034 tcg_temp_free_i64(zero
);
3036 TCGv_i32 frn
, frm
, dest
;
3039 zero
= tcg_const_i32(0);
3041 frn
= tcg_temp_new_i32();
3042 frm
= tcg_temp_new_i32();
3043 dest
= tcg_temp_new_i32();
3044 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
3045 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
3048 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
3052 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
3055 case 2: /* ge: N == V -> N ^ V == 0 */
3056 tmp
= tcg_temp_new_i32();
3057 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
3058 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
3060 tcg_temp_free_i32(tmp
);
3062 case 3: /* gt: !Z && N == V */
3063 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
3065 tmp
= tcg_temp_new_i32();
3066 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
3067 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
3069 tcg_temp_free_i32(tmp
);
3072 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
3073 tcg_temp_free_i32(frn
);
3074 tcg_temp_free_i32(frm
);
3075 tcg_temp_free_i32(dest
);
3077 tcg_temp_free_i32(zero
);
3083 static int handle_vminmaxnm(uint32_t insn
, uint32_t rd
, uint32_t rn
,
3084 uint32_t rm
, uint32_t dp
)
3086 uint32_t vmin
= extract32(insn
, 6, 1);