4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "disas/disas.h"
31 #include "qemu/bitops.h"
37 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39 /* currently all emulated v5 cores are also v5TE, so don't bother */
40 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
41 #define ENABLE_ARCH_5J 0
42 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
48 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
50 #include "translate.h"
51 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
53 #if defined(CONFIG_USER_ONLY)
56 #define IS_USER(s) (s->user)
60 /* We reuse the same 64-bit temporaries for efficiency. */
61 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
62 static TCGv_i32 cpu_R
[16];
63 static TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
64 static TCGv_i64 cpu_exclusive_addr
;
65 static TCGv_i64 cpu_exclusive_val
;
66 #ifdef CONFIG_USER_ONLY
67 static TCGv_i64 cpu_exclusive_test
;
68 static TCGv_i32 cpu_exclusive_info
;
71 /* FIXME: These should be removed. */
72 static TCGv_i32 cpu_F0s
, cpu_F1s
;
73 static TCGv_i64 cpu_F0d
, cpu_F1d
;
75 #include "exec/gen-icount.h"
77 static const char *regnames
[] =
78 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
79 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
81 /* initialize TCG globals. */
82 void arm_translate_init(void)
86 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
88 for (i
= 0; i
< 16; i
++) {
89 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
90 offsetof(CPUARMState
, regs
[i
]),
93 cpu_CF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, CF
), "CF");
94 cpu_NF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, NF
), "NF");
95 cpu_VF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, VF
), "VF");
96 cpu_ZF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, ZF
), "ZF");
98 cpu_exclusive_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
99 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
100 cpu_exclusive_val
= tcg_global_mem_new_i64(TCG_AREG0
,
101 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
102 #ifdef CONFIG_USER_ONLY
103 cpu_exclusive_test
= tcg_global_mem_new_i64(TCG_AREG0
,
104 offsetof(CPUARMState
, exclusive_test
), "exclusive_test");
105 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
106 offsetof(CPUARMState
, exclusive_info
), "exclusive_info");
109 a64_translate_init();
112 static inline TCGv_i32
load_cpu_offset(int offset
)
114 TCGv_i32 tmp
= tcg_temp_new_i32();
115 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
119 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
121 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
123 tcg_gen_st_i32(var
, cpu_env
, offset
);
124 tcg_temp_free_i32(var
);
127 #define store_cpu_field(var, name) \
128 store_cpu_offset(var, offsetof(CPUARMState, name))
130 /* Set a variable to the value of a CPU register. */
131 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
135 /* normally, since we updated PC, we need only to add one insn */
137 addr
= (long)s
->pc
+ 2;
139 addr
= (long)s
->pc
+ 4;
140 tcg_gen_movi_i32(var
, addr
);
142 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
146 /* Create a new temporary and set it to the value of a CPU register. */
147 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
149 TCGv_i32 tmp
= tcg_temp_new_i32();
150 load_reg_var(s
, tmp
, reg
);
154 /* Set a CPU register. The source must be a temporary and will be
156 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
159 tcg_gen_andi_i32(var
, var
, ~1);
160 s
->is_jmp
= DISAS_JUMP
;
162 tcg_gen_mov_i32(cpu_R
[reg
], var
);
163 tcg_temp_free_i32(var
);
166 /* Value extensions. */
167 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
168 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
169 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
170 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
172 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
173 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
176 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
178 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
179 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
180 tcg_temp_free_i32(tmp_mask
);
182 /* Set NZCV flags from the high 4 bits of var. */
183 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
185 static void gen_exception(int excp
)
187 TCGv_i32 tmp
= tcg_temp_new_i32();
188 tcg_gen_movi_i32(tmp
, excp
);
189 gen_helper_exception(cpu_env
, tmp
);
190 tcg_temp_free_i32(tmp
);
193 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
195 TCGv_i32 tmp1
= tcg_temp_new_i32();
196 TCGv_i32 tmp2
= tcg_temp_new_i32();
197 tcg_gen_ext16s_i32(tmp1
, a
);
198 tcg_gen_ext16s_i32(tmp2
, b
);
199 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
200 tcg_temp_free_i32(tmp2
);
201 tcg_gen_sari_i32(a
, a
, 16);
202 tcg_gen_sari_i32(b
, b
, 16);
203 tcg_gen_mul_i32(b
, b
, a
);
204 tcg_gen_mov_i32(a
, tmp1
);
205 tcg_temp_free_i32(tmp1
);
208 /* Byteswap each halfword. */
209 static void gen_rev16(TCGv_i32 var
)
211 TCGv_i32 tmp
= tcg_temp_new_i32();
212 tcg_gen_shri_i32(tmp
, var
, 8);
213 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
214 tcg_gen_shli_i32(var
, var
, 8);
215 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
216 tcg_gen_or_i32(var
, var
, tmp
);
217 tcg_temp_free_i32(tmp
);
220 /* Byteswap low halfword and sign extend. */
221 static void gen_revsh(TCGv_i32 var
)
223 tcg_gen_ext16u_i32(var
, var
);
224 tcg_gen_bswap16_i32(var
, var
);
225 tcg_gen_ext16s_i32(var
, var
);
228 /* Unsigned bitfield extract. */
229 static void gen_ubfx(TCGv_i32 var
, int shift
, uint32_t mask
)
232 tcg_gen_shri_i32(var
, var
, shift
);
233 tcg_gen_andi_i32(var
, var
, mask
);
236 /* Signed bitfield extract. */
237 static void gen_sbfx(TCGv_i32 var
, int shift
, int width
)
242 tcg_gen_sari_i32(var
, var
, shift
);
243 if (shift
+ width
< 32) {
244 signbit
= 1u << (width
- 1);
245 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
246 tcg_gen_xori_i32(var
, var
, signbit
);
247 tcg_gen_subi_i32(var
, var
, signbit
);
251 /* Return (b << 32) + a. Mark inputs as dead */
252 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv_i32 b
)
254 TCGv_i64 tmp64
= tcg_temp_new_i64();
256 tcg_gen_extu_i32_i64(tmp64
, b
);
257 tcg_temp_free_i32(b
);
258 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
259 tcg_gen_add_i64(a
, tmp64
, a
);
261 tcg_temp_free_i64(tmp64
);
265 /* Return (b << 32) - a. Mark inputs as dead. */
266 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv_i32 b
)
268 TCGv_i64 tmp64
= tcg_temp_new_i64();
270 tcg_gen_extu_i32_i64(tmp64
, b
);
271 tcg_temp_free_i32(b
);
272 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
273 tcg_gen_sub_i64(a
, tmp64
, a
);
275 tcg_temp_free_i64(tmp64
);
279 /* 32x32->64 multiply. Marks inputs as dead. */
280 static TCGv_i64
gen_mulu_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
282 TCGv_i32 lo
= tcg_temp_new_i32();
283 TCGv_i32 hi
= tcg_temp_new_i32();
286 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
287 tcg_temp_free_i32(a
);
288 tcg_temp_free_i32(b
);
290 ret
= tcg_temp_new_i64();
291 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
292 tcg_temp_free_i32(lo
);
293 tcg_temp_free_i32(hi
);
298 static TCGv_i64
gen_muls_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
300 TCGv_i32 lo
= tcg_temp_new_i32();
301 TCGv_i32 hi
= tcg_temp_new_i32();
304 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
305 tcg_temp_free_i32(a
);
306 tcg_temp_free_i32(b
);
308 ret
= tcg_temp_new_i64();
309 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
310 tcg_temp_free_i32(lo
);
311 tcg_temp_free_i32(hi
);
316 /* Swap low and high halfwords. */
317 static void gen_swap_half(TCGv_i32 var
)
319 TCGv_i32 tmp
= tcg_temp_new_i32();
320 tcg_gen_shri_i32(tmp
, var
, 16);
321 tcg_gen_shli_i32(var
, var
, 16);
322 tcg_gen_or_i32(var
, var
, tmp
);
323 tcg_temp_free_i32(tmp
);
326 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
327 tmp = (t0 ^ t1) & 0x8000;
330 t0 = (t0 + t1) ^ tmp;
333 static void gen_add16(TCGv_i32 t0
, TCGv_i32 t1
)
335 TCGv_i32 tmp
= tcg_temp_new_i32();
336 tcg_gen_xor_i32(tmp
, t0
, t1
);
337 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
338 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
339 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
340 tcg_gen_add_i32(t0
, t0
, t1
);
341 tcg_gen_xor_i32(t0
, t0
, tmp
);
342 tcg_temp_free_i32(tmp
);
343 tcg_temp_free_i32(t1
);
346 /* Set CF to the top bit of var. */
347 static void gen_set_CF_bit31(TCGv_i32 var
)
349 tcg_gen_shri_i32(cpu_CF
, var
, 31);
352 /* Set N and Z flags from var. */
353 static inline void gen_logic_CC(TCGv_i32 var
)
355 tcg_gen_mov_i32(cpu_NF
, var
);
356 tcg_gen_mov_i32(cpu_ZF
, var
);
360 static void gen_adc(TCGv_i32 t0
, TCGv_i32 t1
)
362 tcg_gen_add_i32(t0
, t0
, t1
);
363 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
366 /* dest = T0 + T1 + CF. */
367 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
369 tcg_gen_add_i32(dest
, t0
, t1
);
370 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
373 /* dest = T0 - T1 + CF - 1. */
374 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
376 tcg_gen_sub_i32(dest
, t0
, t1
);
377 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
378 tcg_gen_subi_i32(dest
, dest
, 1);
381 /* dest = T0 + T1. Compute C, N, V and Z flags */
382 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
384 TCGv_i32 tmp
= tcg_temp_new_i32();
385 tcg_gen_movi_i32(tmp
, 0);
386 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
387 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
388 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
389 tcg_gen_xor_i32(tmp
, t0
, t1
);
390 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
391 tcg_temp_free_i32(tmp
);
392 tcg_gen_mov_i32(dest
, cpu_NF
);
395 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
396 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
398 TCGv_i32 tmp
= tcg_temp_new_i32();
399 if (TCG_TARGET_HAS_add2_i32
) {
400 tcg_gen_movi_i32(tmp
, 0);
401 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
402 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
404 TCGv_i64 q0
= tcg_temp_new_i64();
405 TCGv_i64 q1
= tcg_temp_new_i64();
406 tcg_gen_extu_i32_i64(q0
, t0
);
407 tcg_gen_extu_i32_i64(q1
, t1
);
408 tcg_gen_add_i64(q0
, q0
, q1
);
409 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
410 tcg_gen_add_i64(q0
, q0
, q1
);
411 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
412 tcg_temp_free_i64(q0
);
413 tcg_temp_free_i64(q1
);
415 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
416 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
417 tcg_gen_xor_i32(tmp
, t0
, t1
);
418 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
419 tcg_temp_free_i32(tmp
);
420 tcg_gen_mov_i32(dest
, cpu_NF
);
423 /* dest = T0 - T1. Compute C, N, V and Z flags */
424 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
427 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
428 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
429 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
430 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
431 tmp
= tcg_temp_new_i32();
432 tcg_gen_xor_i32(tmp
, t0
, t1
);
433 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
434 tcg_temp_free_i32(tmp
);
435 tcg_gen_mov_i32(dest
, cpu_NF
);
438 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
439 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
441 TCGv_i32 tmp
= tcg_temp_new_i32();
442 tcg_gen_not_i32(tmp
, t1
);
443 gen_adc_CC(dest
, t0
, tmp
);
444 tcg_temp_free_i32(tmp
);
447 #define GEN_SHIFT(name) \
448 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
450 TCGv_i32 tmp1, tmp2, tmp3; \
451 tmp1 = tcg_temp_new_i32(); \
452 tcg_gen_andi_i32(tmp1, t1, 0xff); \
453 tmp2 = tcg_const_i32(0); \
454 tmp3 = tcg_const_i32(0x1f); \
455 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
456 tcg_temp_free_i32(tmp3); \
457 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
458 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
459 tcg_temp_free_i32(tmp2); \
460 tcg_temp_free_i32(tmp1); \
466 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
469 tmp1
= tcg_temp_new_i32();
470 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
471 tmp2
= tcg_const_i32(0x1f);
472 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
473 tcg_temp_free_i32(tmp2
);
474 tcg_gen_sar_i32(dest
, t0
, tmp1
);
475 tcg_temp_free_i32(tmp1
);
478 static void tcg_gen_abs_i32(TCGv_i32 dest
, TCGv_i32 src
)
480 TCGv_i32 c0
= tcg_const_i32(0);
481 TCGv_i32 tmp
= tcg_temp_new_i32();
482 tcg_gen_neg_i32(tmp
, src
);
483 tcg_gen_movcond_i32(TCG_COND_GT
, dest
, src
, c0
, src
, tmp
);
484 tcg_temp_free_i32(c0
);
485 tcg_temp_free_i32(tmp
);
488 static void shifter_out_im(TCGv_i32 var
, int shift
)
491 tcg_gen_andi_i32(cpu_CF
, var
, 1);
493 tcg_gen_shri_i32(cpu_CF
, var
, shift
);
495 tcg_gen_andi_i32(cpu_CF
, cpu_CF
, 1);
500 /* Shift by immediate. Includes special handling for shift == 0. */
501 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
502 int shift
, int flags
)
508 shifter_out_im(var
, 32 - shift
);
509 tcg_gen_shli_i32(var
, var
, shift
);
515 tcg_gen_shri_i32(cpu_CF
, var
, 31);
517 tcg_gen_movi_i32(var
, 0);
520 shifter_out_im(var
, shift
- 1);
521 tcg_gen_shri_i32(var
, var
, shift
);
528 shifter_out_im(var
, shift
- 1);
531 tcg_gen_sari_i32(var
, var
, shift
);
533 case 3: /* ROR/RRX */
536 shifter_out_im(var
, shift
- 1);
537 tcg_gen_rotri_i32(var
, var
, shift
); break;
539 TCGv_i32 tmp
= tcg_temp_new_i32();
540 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
542 shifter_out_im(var
, 0);
543 tcg_gen_shri_i32(var
, var
, 1);
544 tcg_gen_or_i32(var
, var
, tmp
);
545 tcg_temp_free_i32(tmp
);
550 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
551 TCGv_i32 shift
, int flags
)
555 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
556 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
557 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
558 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
563 gen_shl(var
, var
, shift
);
566 gen_shr(var
, var
, shift
);
569 gen_sar(var
, var
, shift
);
571 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
572 tcg_gen_rotr_i32(var
, var
, shift
); break;
575 tcg_temp_free_i32(shift
);
578 #define PAS_OP(pfx) \
580 case 0: gen_pas_helper(glue(pfx,add16)); break; \
581 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
582 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
583 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
584 case 4: gen_pas_helper(glue(pfx,add8)); break; \
585 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
587 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
592 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
594 tmp
= tcg_temp_new_ptr();
595 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
597 tcg_temp_free_ptr(tmp
);
600 tmp
= tcg_temp_new_ptr();
601 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
603 tcg_temp_free_ptr(tmp
);
605 #undef gen_pas_helper
606 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
619 #undef gen_pas_helper
624 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
625 #define PAS_OP(pfx) \
627 case 0: gen_pas_helper(glue(pfx,add8)); break; \
628 case 1: gen_pas_helper(glue(pfx,add16)); break; \
629 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
630 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
631 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
632 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
634 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
639 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
641 tmp
= tcg_temp_new_ptr();
642 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
644 tcg_temp_free_ptr(tmp
);
647 tmp
= tcg_temp_new_ptr();
648 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
650 tcg_temp_free_ptr(tmp
);
652 #undef gen_pas_helper
653 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
666 #undef gen_pas_helper
672 * generate a conditional branch based on ARM condition code cc.
673 * This is common between ARM and Aarch64 targets.
675 void arm_gen_test_cc(int cc
, int label
)
682 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
685 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_ZF
, 0, label
);
688 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_CF
, 0, label
);
691 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, label
);
694 tcg_gen_brcondi_i32(TCG_COND_LT
, cpu_NF
, 0, label
);
697 tcg_gen_brcondi_i32(TCG_COND_GE
, cpu_NF
, 0, label
);
700 tcg_gen_brcondi_i32(TCG_COND_LT
, cpu_VF
, 0, label
);
703 tcg_gen_brcondi_i32(TCG_COND_GE
, cpu_VF
, 0, label
);
705 case 8: /* hi: C && !Z */
706 inv
= gen_new_label();
707 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, inv
);
708 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_ZF
, 0, label
);
711 case 9: /* ls: !C || Z */
712 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, label
);
713 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
715 case 10: /* ge: N == V -> N ^ V == 0 */
716 tmp
= tcg_temp_new_i32();
717 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
718 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
719 tcg_temp_free_i32(tmp
);
721 case 11: /* lt: N != V -> N ^ V != 0 */
722 tmp
= tcg_temp_new_i32();
723 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
724 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
725 tcg_temp_free_i32(tmp
);
727 case 12: /* gt: !Z && N == V */
728 inv
= gen_new_label();
729 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, inv
);
730 tmp
= tcg_temp_new_i32();
731 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
732 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
733 tcg_temp_free_i32(tmp
);
736 case 13: /* le: Z || N != V */
737 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
738 tmp
= tcg_temp_new_i32();
739 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
740 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
741 tcg_temp_free_i32(tmp
);
744 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
749 static const uint8_t table_logic_cc
[16] = {
768 /* Set PC and Thumb state from an immediate address. */
769 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
773 s
->is_jmp
= DISAS_UPDATE
;
774 if (s
->thumb
!= (addr
& 1)) {
775 tmp
= tcg_temp_new_i32();
776 tcg_gen_movi_i32(tmp
, addr
& 1);
777 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
778 tcg_temp_free_i32(tmp
);
780 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
783 /* Set PC and Thumb state from var. var is marked as dead. */
784 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
786 s
->is_jmp
= DISAS_UPDATE
;
787 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
788 tcg_gen_andi_i32(var
, var
, 1);
789 store_cpu_field(var
, thumb
);
792 /* Variant of store_reg which uses branch&exchange logic when storing
793 to r15 in ARM architecture v7 and above. The source must be a temporary
794 and will be marked as dead. */
795 static inline void store_reg_bx(CPUARMState
*env
, DisasContext
*s
,
796 int reg
, TCGv_i32 var
)
798 if (reg
== 15 && ENABLE_ARCH_7
) {
801 store_reg(s
, reg
, var
);
805 /* Variant of store_reg which uses branch&exchange logic when storing
806 * to r15 in ARM architecture v5T and above. This is used for storing
807 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
808 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
809 static inline void store_reg_from_load(CPUARMState
*env
, DisasContext
*s
,
810 int reg
, TCGv_i32 var
)
812 if (reg
== 15 && ENABLE_ARCH_5
) {
815 store_reg(s
, reg
, var
);
819 /* Abstractions of "generate code to do a guest load/store for
820 * AArch32", where a vaddr is always 32 bits (and is zero
821 * extended if we're a 64 bit core) and data is also
822 * 32 bits unless specifically doing a 64 bit access.
823 * These functions work like tcg_gen_qemu_{ld,st}* except
824 * that the address argument is TCGv_i32 rather than TCGv.
826 #if TARGET_LONG_BITS == 32
828 #define DO_GEN_LD(SUFF, OPC) \
829 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
831 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
834 #define DO_GEN_ST(SUFF, OPC) \
835 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
837 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
840 static inline void gen_aa32_ld64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
842 tcg_gen_qemu_ld_i64(val
, addr
, index
, MO_TEQ
);
845 static inline void gen_aa32_st64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
847 tcg_gen_qemu_st_i64(val
, addr
, index
, MO_TEQ
);
852 #define DO_GEN_LD(SUFF, OPC) \
853 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
855 TCGv addr64 = tcg_temp_new(); \
856 tcg_gen_extu_i32_i64(addr64, addr); \
857 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
858 tcg_temp_free(addr64); \
861 #define DO_GEN_ST(SUFF, OPC) \
862 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
864 TCGv addr64 = tcg_temp_new(); \
865 tcg_gen_extu_i32_i64(addr64, addr); \
866 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
867 tcg_temp_free(addr64); \
870 static inline void gen_aa32_ld64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
872 TCGv addr64
= tcg_temp_new();
873 tcg_gen_extu_i32_i64(addr64
, addr
);
874 tcg_gen_qemu_ld_i64(val
, addr64
, index
, MO_TEQ
);
875 tcg_temp_free(addr64
);
878 static inline void gen_aa32_st64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
880 TCGv addr64
= tcg_temp_new();
881 tcg_gen_extu_i32_i64(addr64
, addr
);
882 tcg_gen_qemu_st_i64(val
, addr64
, index
, MO_TEQ
);
883 tcg_temp_free(addr64
);
890 DO_GEN_LD(16s
, MO_TESW
)
891 DO_GEN_LD(16u, MO_TEUW
)
892 DO_GEN_LD(32u, MO_TEUL
)
894 DO_GEN_ST(16, MO_TEUW
)
895 DO_GEN_ST(32, MO_TEUL
)
897 static inline void gen_set_pc_im(DisasContext
*s
, target_ulong val
)
899 tcg_gen_movi_i32(cpu_R
[15], val
);
902 /* Force a TB lookup after an instruction that changes the CPU state. */
903 static inline void gen_lookup_tb(DisasContext
*s
)
905 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
906 s
->is_jmp
= DISAS_UPDATE
;
909 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
912 int val
, rm
, shift
, shiftop
;
915 if (!(insn
& (1 << 25))) {
918 if (!(insn
& (1 << 23)))
921 tcg_gen_addi_i32(var
, var
, val
);
925 shift
= (insn
>> 7) & 0x1f;
926 shiftop
= (insn
>> 5) & 3;
927 offset
= load_reg(s
, rm
);
928 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
929 if (!(insn
& (1 << 23)))
930 tcg_gen_sub_i32(var
, var
, offset
);
932 tcg_gen_add_i32(var
, var
, offset
);
933 tcg_temp_free_i32(offset
);
937 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
938 int extra
, TCGv_i32 var
)
943 if (insn
& (1 << 22)) {
945 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
946 if (!(insn
& (1 << 23)))
950 tcg_gen_addi_i32(var
, var
, val
);
954 tcg_gen_addi_i32(var
, var
, extra
);
956 offset
= load_reg(s
, rm
);
957 if (!(insn
& (1 << 23)))
958 tcg_gen_sub_i32(var
, var
, offset
);
960 tcg_gen_add_i32(var
, var
, offset
);
961 tcg_temp_free_i32(offset
);
965 static TCGv_ptr
get_fpstatus_ptr(int neon
)
967 TCGv_ptr statusptr
= tcg_temp_new_ptr();
970 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
972 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
974 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
978 #define VFP_OP2(name) \
979 static inline void gen_vfp_##name(int dp) \
981 TCGv_ptr fpst = get_fpstatus_ptr(0); \
983 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
985 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
987 tcg_temp_free_ptr(fpst); \
997 static inline void gen_vfp_F1_mul(int dp
)
999 /* Like gen_vfp_mul() but put result in F1 */
1000 TCGv_ptr fpst
= get_fpstatus_ptr(0);
1002 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
1004 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
1006 tcg_temp_free_ptr(fpst
);
1009 static inline void gen_vfp_F1_neg(int dp
)
1011 /* Like gen_vfp_neg() but put result in F1 */
1013 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
1015 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
1019 static inline void gen_vfp_abs(int dp
)
1022 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
1024 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
1027 static inline void gen_vfp_neg(int dp
)
1030 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
1032 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
1035 static inline void gen_vfp_sqrt(int dp
)
1038 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
1040 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
1043 static inline void gen_vfp_cmp(int dp
)
1046 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1048 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1051 static inline void gen_vfp_cmpe(int dp
)
1054 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1056 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1059 static inline void gen_vfp_F1_ld0(int dp
)
1062 tcg_gen_movi_i64(cpu_F1d
, 0);
1064 tcg_gen_movi_i32(cpu_F1s
, 0);
1067 #define VFP_GEN_ITOF(name) \
1068 static inline void gen_vfp_##name(int dp, int neon) \
1070 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1072 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1074 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1076 tcg_temp_free_ptr(statusptr); \
1083 #define VFP_GEN_FTOI(name) \
1084 static inline void gen_vfp_##name(int dp, int neon) \
1086 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1088 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1090 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1092 tcg_temp_free_ptr(statusptr); \
1101 #define VFP_GEN_FIX(name, round) \
1102 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1104 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1105 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1107 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1110 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1113 tcg_temp_free_i32(tmp_shift); \
1114 tcg_temp_free_ptr(statusptr); \
1116 VFP_GEN_FIX(tosh
, _round_to_zero
)
1117 VFP_GEN_FIX(tosl
, _round_to_zero
)
1118 VFP_GEN_FIX(touh
, _round_to_zero
)
1119 VFP_GEN_FIX(toul
, _round_to_zero
)
1126 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1129 gen_aa32_ld64(cpu_F0d
, addr
, IS_USER(s
));
1131 gen_aa32_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1135 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1138 gen_aa32_st64(cpu_F0d
, addr
, IS_USER(s
));
1140 gen_aa32_st32(cpu_F0s
, addr
, IS_USER(s
));
1145 vfp_reg_offset (int dp
, int reg
)
1148 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1150 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1151 + offsetof(CPU_DoubleU
, l
.upper
);
1153 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1154 + offsetof(CPU_DoubleU
, l
.lower
);
1158 /* Return the offset of a 32-bit piece of a NEON register.
1159 zero is the least significant end of the register. */
1161 neon_reg_offset (int reg
, int n
)
1165 return vfp_reg_offset(0, sreg
);
1168 static TCGv_i32
neon_load_reg(int reg
, int pass
)
1170 TCGv_i32 tmp
= tcg_temp_new_i32();
1171 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1175 static void neon_store_reg(int reg
, int pass
, TCGv_i32 var
)
1177 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1178 tcg_temp_free_i32(var
);
1181 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1183 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1186 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1188 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1191 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1192 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1193 #define tcg_gen_st_f32 tcg_gen_st_i32
1194 #define tcg_gen_st_f64 tcg_gen_st_i64
1196 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1199 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1201 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1204 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1207 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1209 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1212 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1215 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1217 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1220 #define ARM_CP_RW_BIT (1 << 20)
1222 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1224 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1227 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1229 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1232 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1234 TCGv_i32 var
= tcg_temp_new_i32();
1235 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1239 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1241 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1242 tcg_temp_free_i32(var
);
1245 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1247 iwmmxt_store_reg(cpu_M0
, rn
);
1250 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1252 iwmmxt_load_reg(cpu_M0
, rn
);
1255 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1257 iwmmxt_load_reg(cpu_V1
, rn
);
1258 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1261 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1263 iwmmxt_load_reg(cpu_V1
, rn
);
1264 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1267 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1269 iwmmxt_load_reg(cpu_V1
, rn
);
1270 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1273 #define IWMMXT_OP(name) \
1274 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1276 iwmmxt_load_reg(cpu_V1, rn); \
1277 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1280 #define IWMMXT_OP_ENV(name) \
1281 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1283 iwmmxt_load_reg(cpu_V1, rn); \
1284 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1287 #define IWMMXT_OP_ENV_SIZE(name) \
1288 IWMMXT_OP_ENV(name##b) \
1289 IWMMXT_OP_ENV(name##w) \
1290 IWMMXT_OP_ENV(name##l)
1292 #define IWMMXT_OP_ENV1(name) \
1293 static inline void gen_op_iwmmxt_##name##_M0(void) \
1295 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1309 IWMMXT_OP_ENV_SIZE(unpackl
)
1310 IWMMXT_OP_ENV_SIZE(unpackh
)
1312 IWMMXT_OP_ENV1(unpacklub
)
1313 IWMMXT_OP_ENV1(unpackluw
)
1314 IWMMXT_OP_ENV1(unpacklul
)
1315 IWMMXT_OP_ENV1(unpackhub
)
1316 IWMMXT_OP_ENV1(unpackhuw
)
1317 IWMMXT_OP_ENV1(unpackhul
)
1318 IWMMXT_OP_ENV1(unpacklsb
)
1319 IWMMXT_OP_ENV1(unpacklsw
)
1320 IWMMXT_OP_ENV1(unpacklsl
)
1321 IWMMXT_OP_ENV1(unpackhsb
)
1322 IWMMXT_OP_ENV1(unpackhsw
)
1323 IWMMXT_OP_ENV1(unpackhsl
)
1325 IWMMXT_OP_ENV_SIZE(cmpeq
)
1326 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1327 IWMMXT_OP_ENV_SIZE(cmpgts
)
1329 IWMMXT_OP_ENV_SIZE(mins
)
1330 IWMMXT_OP_ENV_SIZE(minu
)
1331 IWMMXT_OP_ENV_SIZE(maxs
)
1332 IWMMXT_OP_ENV_SIZE(maxu
)
1334 IWMMXT_OP_ENV_SIZE(subn
)
1335 IWMMXT_OP_ENV_SIZE(addn
)
1336 IWMMXT_OP_ENV_SIZE(subu
)
1337 IWMMXT_OP_ENV_SIZE(addu
)
1338 IWMMXT_OP_ENV_SIZE(subs
)
1339 IWMMXT_OP_ENV_SIZE(adds
)
1341 IWMMXT_OP_ENV(avgb0
)
1342 IWMMXT_OP_ENV(avgb1
)
1343 IWMMXT_OP_ENV(avgw0
)
1344 IWMMXT_OP_ENV(avgw1
)
1348 IWMMXT_OP_ENV(packuw
)
1349 IWMMXT_OP_ENV(packul
)
1350 IWMMXT_OP_ENV(packuq
)
1351 IWMMXT_OP_ENV(packsw
)
1352 IWMMXT_OP_ENV(packsl
)
1353 IWMMXT_OP_ENV(packsq
)
1355 static void gen_op_iwmmxt_set_mup(void)
1358 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1359 tcg_gen_ori_i32(tmp
, tmp
, 2);
1360 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1363 static void gen_op_iwmmxt_set_cup(void)
1366 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1367 tcg_gen_ori_i32(tmp
, tmp
, 1);
1368 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1371 static void gen_op_iwmmxt_setpsr_nz(void)
1373 TCGv_i32 tmp
= tcg_temp_new_i32();
1374 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1375 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1378 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1380 iwmmxt_load_reg(cpu_V1
, rn
);
1381 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1382 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1385 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1392 rd
= (insn
>> 16) & 0xf;
1393 tmp
= load_reg(s
, rd
);
1395 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1396 if (insn
& (1 << 24)) {
1398 if (insn
& (1 << 23))
1399 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1401 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1402 tcg_gen_mov_i32(dest
, tmp
);
1403 if (insn
& (1 << 21))
1404 store_reg(s
, rd
, tmp
);
1406 tcg_temp_free_i32(tmp
);
1407 } else if (insn
& (1 << 21)) {
1409 tcg_gen_mov_i32(dest
, tmp
);
1410 if (insn
& (1 << 23))
1411 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1413 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1414 store_reg(s
, rd
, tmp
);
1415 } else if (!(insn
& (1 << 23)))
1420 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1422 int rd
= (insn
>> 0) & 0xf;
1425 if (insn
& (1 << 8)) {
1426 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1429 tmp
= iwmmxt_load_creg(rd
);
1432 tmp
= tcg_temp_new_i32();
1433 iwmmxt_load_reg(cpu_V0
, rd
);
1434 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1436 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1437 tcg_gen_mov_i32(dest
, tmp
);
1438 tcg_temp_free_i32(tmp
);
1442 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1443 (ie. an undefined instruction). */
1444 static int disas_iwmmxt_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
1447 int rdhi
, rdlo
, rd0
, rd1
, i
;
1449 TCGv_i32 tmp
, tmp2
, tmp3
;
1451 if ((insn
& 0x0e000e00) == 0x0c000000) {
1452 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1454 rdlo
= (insn
>> 12) & 0xf;
1455 rdhi
= (insn
>> 16) & 0xf;
1456 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1457 iwmmxt_load_reg(cpu_V0
, wrd
);
1458 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1459 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1460 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1461 } else { /* TMCRR */
1462 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1463 iwmmxt_store_reg(cpu_V0
, wrd
);
1464 gen_op_iwmmxt_set_mup();
1469 wrd
= (insn
>> 12) & 0xf;
1470 addr
= tcg_temp_new_i32();
1471 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1472 tcg_temp_free_i32(addr
);
1475 if (insn
& ARM_CP_RW_BIT
) {
1476 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1477 tmp
= tcg_temp_new_i32();
1478 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
1479 iwmmxt_store_creg(wrd
, tmp
);
1482 if (insn
& (1 << 8)) {
1483 if (insn
& (1 << 22)) { /* WLDRD */
1484 gen_aa32_ld64(cpu_M0
, addr
, IS_USER(s
));
1486 } else { /* WLDRW wRd */
1487 tmp
= tcg_temp_new_i32();
1488 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
1491 tmp
= tcg_temp_new_i32();
1492 if (insn
& (1 << 22)) { /* WLDRH */
1493 gen_aa32_ld16u(tmp
, addr
, IS_USER(s
));
1494 } else { /* WLDRB */
1495 gen_aa32_ld8u(tmp
, addr
, IS_USER(s
));
1499 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1500 tcg_temp_free_i32(tmp
);
1502 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1505 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1506 tmp
= iwmmxt_load_creg(wrd
);
1507 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
1509 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1510 tmp
= tcg_temp_new_i32();
1511 if (insn
& (1 << 8)) {
1512 if (insn
& (1 << 22)) { /* WSTRD */
1513 gen_aa32_st64(cpu_M0
, addr
, IS_USER(s
));
1514 } else { /* WSTRW wRd */
1515 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1516 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
1519 if (insn
& (1 << 22)) { /* WSTRH */
1520 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1521 gen_aa32_st16(tmp
, addr
, IS_USER(s
));
1522 } else { /* WSTRB */
1523 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1524 gen_aa32_st8(tmp
, addr
, IS_USER(s
));
1528 tcg_temp_free_i32(tmp
);
1530 tcg_temp_free_i32(addr
);
1534 if ((insn
& 0x0f000000) != 0x0e000000)
1537 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1538 case 0x000: /* WOR */
1539 wrd
= (insn
>> 12) & 0xf;
1540 rd0
= (insn
>> 0) & 0xf;
1541 rd1
= (insn
>> 16) & 0xf;
1542 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1543 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1544 gen_op_iwmmxt_setpsr_nz();
1545 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1546 gen_op_iwmmxt_set_mup();
1547 gen_op_iwmmxt_set_cup();
1549 case 0x011: /* TMCR */
1552 rd
= (insn
>> 12) & 0xf;
1553 wrd
= (insn
>> 16) & 0xf;
1555 case ARM_IWMMXT_wCID
:
1556 case ARM_IWMMXT_wCASF
:
1558 case ARM_IWMMXT_wCon
:
1559 gen_op_iwmmxt_set_cup();
1561 case ARM_IWMMXT_wCSSF
:
1562 tmp
= iwmmxt_load_creg(wrd
);
1563 tmp2
= load_reg(s
, rd
);
1564 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1565 tcg_temp_free_i32(tmp2
);
1566 iwmmxt_store_creg(wrd
, tmp
);
1568 case ARM_IWMMXT_wCGR0
:
1569 case ARM_IWMMXT_wCGR1
:
1570 case ARM_IWMMXT_wCGR2
:
1571 case ARM_IWMMXT_wCGR3
:
1572 gen_op_iwmmxt_set_cup();
1573 tmp
= load_reg(s
, rd
);
1574 iwmmxt_store_creg(wrd
, tmp
);
1580 case 0x100: /* WXOR */
1581 wrd
= (insn
>> 12) & 0xf;
1582 rd0
= (insn
>> 0) & 0xf;
1583 rd1
= (insn
>> 16) & 0xf;
1584 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1585 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1586 gen_op_iwmmxt_setpsr_nz();
1587 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1588 gen_op_iwmmxt_set_mup();
1589 gen_op_iwmmxt_set_cup();
1591 case 0x111: /* TMRC */
1594 rd
= (insn
>> 12) & 0xf;
1595 wrd
= (insn
>> 16) & 0xf;
1596 tmp
= iwmmxt_load_creg(wrd
);
1597 store_reg(s
, rd
, tmp
);
1599 case 0x300: /* WANDN */
1600 wrd
= (insn
>> 12) & 0xf;
1601 rd0
= (insn
>> 0) & 0xf;
1602 rd1
= (insn
>> 16) & 0xf;
1603 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1604 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1605 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1606 gen_op_iwmmxt_setpsr_nz();
1607 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1608 gen_op_iwmmxt_set_mup();
1609 gen_op_iwmmxt_set_cup();
1611 case 0x200: /* WAND */
1612 wrd
= (insn
>> 12) & 0xf;
1613 rd0
= (insn
>> 0) & 0xf;
1614 rd1
= (insn
>> 16) & 0xf;
1615 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1616 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1617 gen_op_iwmmxt_setpsr_nz();
1618 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1619 gen_op_iwmmxt_set_mup();
1620 gen_op_iwmmxt_set_cup();
1622 case 0x810: case 0xa10: /* WMADD */
1623 wrd
= (insn
>> 12) & 0xf;
1624 rd0
= (insn
>> 0) & 0xf;
1625 rd1
= (insn
>> 16) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1627 if (insn
& (1 << 21))
1628 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1630 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1631 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1632 gen_op_iwmmxt_set_mup();
1634 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1635 wrd
= (insn
>> 12) & 0xf;
1636 rd0
= (insn
>> 16) & 0xf;
1637 rd1
= (insn
>> 0) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1639 switch ((insn
>> 22) & 3) {
1641 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1644 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1647 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1652 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1656 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1657 wrd
= (insn
>> 12) & 0xf;
1658 rd0
= (insn
>> 16) & 0xf;
1659 rd1
= (insn
>> 0) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1661 switch ((insn
>> 22) & 3) {
1663 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1666 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1669 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1674 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1675 gen_op_iwmmxt_set_mup();
1676 gen_op_iwmmxt_set_cup();
1678 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1679 wrd
= (insn
>> 12) & 0xf;
1680 rd0
= (insn
>> 16) & 0xf;
1681 rd1
= (insn
>> 0) & 0xf;
1682 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1683 if (insn
& (1 << 22))
1684 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1686 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1687 if (!(insn
& (1 << 20)))
1688 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1690 gen_op_iwmmxt_set_mup();
1692 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1693 wrd
= (insn
>> 12) & 0xf;
1694 rd0
= (insn
>> 16) & 0xf;
1695 rd1
= (insn
>> 0) & 0xf;
1696 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1697 if (insn
& (1 << 21)) {
1698 if (insn
& (1 << 20))
1699 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1701 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1703 if (insn
& (1 << 20))
1704 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1706 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1708 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1709 gen_op_iwmmxt_set_mup();
1711 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1712 wrd
= (insn
>> 12) & 0xf;
1713 rd0
= (insn
>> 16) & 0xf;
1714 rd1
= (insn
>> 0) & 0xf;
1715 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1716 if (insn
& (1 << 21))
1717 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1719 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1720 if (!(insn
& (1 << 20))) {
1721 iwmmxt_load_reg(cpu_V1
, wrd
);
1722 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1724 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1725 gen_op_iwmmxt_set_mup();
1727 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1728 wrd
= (insn
>> 12) & 0xf;
1729 rd0
= (insn
>> 16) & 0xf;
1730 rd1
= (insn
>> 0) & 0xf;
1731 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1732 switch ((insn
>> 22) & 3) {
1734 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1737 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1740 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1745 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1746 gen_op_iwmmxt_set_mup();
1747 gen_op_iwmmxt_set_cup();
1749 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1750 wrd
= (insn
>> 12) & 0xf;
1751 rd0
= (insn
>> 16) & 0xf;
1752 rd1
= (insn
>> 0) & 0xf;
1753 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1754 if (insn
& (1 << 22)) {
1755 if (insn
& (1 << 20))
1756 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1758 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1760 if (insn
& (1 << 20))
1761 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1763 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1765 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1766 gen_op_iwmmxt_set_mup();
1767 gen_op_iwmmxt_set_cup();
1769 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1770 wrd
= (insn
>> 12) & 0xf;
1771 rd0
= (insn
>> 16) & 0xf;
1772 rd1
= (insn
>> 0) & 0xf;
1773 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1774 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1775 tcg_gen_andi_i32(tmp
, tmp
, 7);
1776 iwmmxt_load_reg(cpu_V1
, rd1
);
1777 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1778 tcg_temp_free_i32(tmp
);
1779 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1780 gen_op_iwmmxt_set_mup();
1782 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1783 if (((insn
>> 6) & 3) == 3)
1785 rd
= (insn
>> 12) & 0xf;
1786 wrd
= (insn
>> 16) & 0xf;
1787 tmp
= load_reg(s
, rd
);
1788 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1789 switch ((insn
>> 6) & 3) {
1791 tmp2
= tcg_const_i32(0xff);
1792 tmp3
= tcg_const_i32((insn
& 7) << 3);
1795 tmp2
= tcg_const_i32(0xffff);
1796 tmp3
= tcg_const_i32((insn
& 3) << 4);
1799 tmp2
= tcg_const_i32(0xffffffff);
1800 tmp3
= tcg_const_i32((insn
& 1) << 5);
1803 TCGV_UNUSED_I32(tmp2
);
1804 TCGV_UNUSED_I32(tmp3
);
1806 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1807 tcg_temp_free_i32(tmp3
);
1808 tcg_temp_free_i32(tmp2
);
1809 tcg_temp_free_i32(tmp
);
1810 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1811 gen_op_iwmmxt_set_mup();
1813 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1814 rd
= (insn
>> 12) & 0xf;
1815 wrd
= (insn
>> 16) & 0xf;
1816 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1818 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1819 tmp
= tcg_temp_new_i32();
1820 switch ((insn
>> 22) & 3) {
1822 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1823 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1825 tcg_gen_ext8s_i32(tmp
, tmp
);
1827 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1831 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1832 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1834 tcg_gen_ext16s_i32(tmp
, tmp
);
1836 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1840 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1841 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1844 store_reg(s
, rd
, tmp
);
1846 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1847 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1849 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1850 switch ((insn
>> 22) & 3) {
1852 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1855 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1858 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1861 tcg_gen_shli_i32(tmp
, tmp
, 28);
1863 tcg_temp_free_i32(tmp
);
1865 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1866 if (((insn
>> 6) & 3) == 3)
1868 rd
= (insn
>> 12) & 0xf;
1869 wrd
= (insn
>> 16) & 0xf;
1870 tmp
= load_reg(s
, rd
);
1871 switch ((insn
>> 6) & 3) {
1873 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1876 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1879 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1882 tcg_temp_free_i32(tmp
);
1883 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1884 gen_op_iwmmxt_set_mup();
1886 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1887 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1889 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1890 tmp2
= tcg_temp_new_i32();
1891 tcg_gen_mov_i32(tmp2
, tmp
);
1892 switch ((insn
>> 22) & 3) {
1894 for (i
= 0; i
< 7; i
++) {
1895 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1896 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1900 for (i
= 0; i
< 3; i
++) {
1901 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1902 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1906 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1907 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1911 tcg_temp_free_i32(tmp2
);
1912 tcg_temp_free_i32(tmp
);
1914 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1915 wrd
= (insn
>> 12) & 0xf;
1916 rd0
= (insn
>> 16) & 0xf;
1917 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1918 switch ((insn
>> 22) & 3) {
1920 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1923 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1926 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1931 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1932 gen_op_iwmmxt_set_mup();
1934 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1935 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1937 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1938 tmp2
= tcg_temp_new_i32();
1939 tcg_gen_mov_i32(tmp2
, tmp
);
1940 switch ((insn
>> 22) & 3) {
1942 for (i
= 0; i
< 7; i
++) {
1943 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1944 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1948 for (i
= 0; i
< 3; i
++) {
1949 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1950 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1954 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1955 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1959 tcg_temp_free_i32(tmp2
);
1960 tcg_temp_free_i32(tmp
);
1962 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1963 rd
= (insn
>> 12) & 0xf;
1964 rd0
= (insn
>> 16) & 0xf;
1965 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1967 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1968 tmp
= tcg_temp_new_i32();
1969 switch ((insn
>> 22) & 3) {
1971 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1974 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1977 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1980 store_reg(s
, rd
, tmp
);
1982 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1983 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1984 wrd
= (insn
>> 12) & 0xf;
1985 rd0
= (insn
>> 16) & 0xf;
1986 rd1
= (insn
>> 0) & 0xf;
1987 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1988 switch ((insn
>> 22) & 3) {
1990 if (insn
& (1 << 21))
1991 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1993 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1996 if (insn
& (1 << 21))
1997 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1999 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2002 if (insn
& (1 << 21))
2003 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2005 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2010 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2011 gen_op_iwmmxt_set_mup();
2012 gen_op_iwmmxt_set_cup();
2014 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2015 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2016 wrd
= (insn
>> 12) & 0xf;
2017 rd0
= (insn
>> 16) & 0xf;
2018 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2019 switch ((insn
>> 22) & 3) {
2021 if (insn
& (1 << 21))
2022 gen_op_iwmmxt_unpacklsb_M0();
2024 gen_op_iwmmxt_unpacklub_M0();
2027 if (insn
& (1 << 21))
2028 gen_op_iwmmxt_unpacklsw_M0();
2030 gen_op_iwmmxt_unpackluw_M0();
2033 if (insn
& (1 << 21))
2034 gen_op_iwmmxt_unpacklsl_M0();
2036 gen_op_iwmmxt_unpacklul_M0();
2041 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2042 gen_op_iwmmxt_set_mup();
2043 gen_op_iwmmxt_set_cup();
2045 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2046 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2047 wrd
= (insn
>> 12) & 0xf;
2048 rd0
= (insn
>> 16) & 0xf;
2049 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2050 switch ((insn
>> 22) & 3) {
2052 if (insn
& (1 << 21))
2053 gen_op_iwmmxt_unpackhsb_M0();
2055 gen_op_iwmmxt_unpackhub_M0();
2058 if (insn
& (1 << 21))
2059 gen_op_iwmmxt_unpackhsw_M0();
2061 gen_op_iwmmxt_unpackhuw_M0();
2064 if (insn
& (1 << 21))
2065 gen_op_iwmmxt_unpackhsl_M0();
2067 gen_op_iwmmxt_unpackhul_M0();
2072 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2073 gen_op_iwmmxt_set_mup();
2074 gen_op_iwmmxt_set_cup();
2076 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2077 case 0x214: case 0x614: case 0xa14: case 0xe14:
2078 if (((insn
>> 22) & 3) == 0)
2080 wrd
= (insn
>> 12) & 0xf;
2081 rd0
= (insn
>> 16) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2083 tmp
= tcg_temp_new_i32();
2084 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2085 tcg_temp_free_i32(tmp
);
2088 switch ((insn
>> 22) & 3) {
2090 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2093 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2096 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2099 tcg_temp_free_i32(tmp
);
2100 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2101 gen_op_iwmmxt_set_mup();
2102 gen_op_iwmmxt_set_cup();
2104 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2105 case 0x014: case 0x414: case 0x814: case 0xc14:
2106 if (((insn
>> 22) & 3) == 0)
2108 wrd
= (insn
>> 12) & 0xf;
2109 rd0
= (insn
>> 16) & 0xf;
2110 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2111 tmp
= tcg_temp_new_i32();
2112 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2113 tcg_temp_free_i32(tmp
);
2116 switch ((insn
>> 22) & 3) {
2118 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2121 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2124 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2127 tcg_temp_free_i32(tmp
);
2128 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2129 gen_op_iwmmxt_set_mup();
2130 gen_op_iwmmxt_set_cup();
2132 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2133 case 0x114: case 0x514: case 0x914: case 0xd14:
2134 if (((insn
>> 22) & 3) == 0)
2136 wrd
= (insn
>> 12) & 0xf;
2137 rd0
= (insn
>> 16) & 0xf;
2138 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2139 tmp
= tcg_temp_new_i32();
2140 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2141 tcg_temp_free_i32(tmp
);
2144 switch ((insn
>> 22) & 3) {
2146 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2149 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2152 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2155 tcg_temp_free_i32(tmp
);
2156 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2157 gen_op_iwmmxt_set_mup();
2158 gen_op_iwmmxt_set_cup();
2160 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2161 case 0x314: case 0x714: case 0xb14: case 0xf14:
2162 if (((insn
>> 22) & 3) == 0)
2164 wrd
= (insn
>> 12) & 0xf;
2165 rd0
= (insn
>> 16) & 0xf;
2166 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2167 tmp
= tcg_temp_new_i32();
2168 switch ((insn
>> 22) & 3) {
2170 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2171 tcg_temp_free_i32(tmp
);
2174 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2177 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2178 tcg_temp_free_i32(tmp
);
2181 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2184 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2185 tcg_temp_free_i32(tmp
);
2188 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2191 tcg_temp_free_i32(tmp
);
2192 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2193 gen_op_iwmmxt_set_mup();
2194 gen_op_iwmmxt_set_cup();
2196 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2197 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2198 wrd
= (insn
>> 12) & 0xf;
2199 rd0
= (insn
>> 16) & 0xf;
2200 rd1
= (insn
>> 0) & 0xf;
2201 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2202 switch ((insn
>> 22) & 3) {
2204 if (insn
& (1 << 21))
2205 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2207 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2210 if (insn
& (1 << 21))
2211 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2213 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2216 if (insn
& (1 << 21))
2217 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2219 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2224 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2225 gen_op_iwmmxt_set_mup();
2227 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2228 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2229 wrd
= (insn
>> 12) & 0xf;
2230 rd0
= (insn
>> 16) & 0xf;
2231 rd1
= (insn
>> 0) & 0xf;
2232 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2233 switch ((insn
>> 22) & 3) {
2235 if (insn
& (1 << 21))
2236 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2238 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2241 if (insn
& (1 << 21))
2242 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2244 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2247 if (insn
& (1 << 21))
2248 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2250 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2255 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2256 gen_op_iwmmxt_set_mup();
2258 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2259 case 0x402: case 0x502: case 0x602: case 0x702:
2260 wrd
= (insn
>> 12) & 0xf;
2261 rd0
= (insn
>> 16) & 0xf;
2262 rd1
= (insn
>> 0) & 0xf;
2263 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2264 tmp
= tcg_const_i32((insn
>> 20) & 3);
2265 iwmmxt_load_reg(cpu_V1
, rd1
);
2266 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2267 tcg_temp_free_i32(tmp
);
2268 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2269 gen_op_iwmmxt_set_mup();
2271 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2272 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2273 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2274 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2275 wrd
= (insn
>> 12) & 0xf;
2276 rd0
= (insn
>> 16) & 0xf;
2277 rd1
= (insn
>> 0) & 0xf;
2278 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2279 switch ((insn
>> 20) & 0xf) {
2281 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2284 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2287 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2290 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2293 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2296 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2299 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2302 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2305 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2310 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2311 gen_op_iwmmxt_set_mup();
2312 gen_op_iwmmxt_set_cup();
2314 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2315 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2316 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2317 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2318 wrd
= (insn
>> 12) & 0xf;
2319 rd0
= (insn
>> 16) & 0xf;
2320 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2321 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2322 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2323 tcg_temp_free_i32(tmp
);
2324 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2325 gen_op_iwmmxt_set_mup();
2326 gen_op_iwmmxt_set_cup();
2328 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2329 case 0x418: case 0x518: case 0x618: case 0x718:
2330 case 0x818: case 0x918: case 0xa18: case 0xb18:
2331 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2332 wrd
= (insn
>> 12) & 0xf;
2333 rd0
= (insn
>> 16) & 0xf;
2334 rd1
= (insn
>> 0) & 0xf;
2335 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2336 switch ((insn
>> 20) & 0xf) {
2338 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2341 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2344 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2347 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2350 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2353 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2356 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2359 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2362 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2367 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2368 gen_op_iwmmxt_set_mup();
2369 gen_op_iwmmxt_set_cup();
2371 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2372 case 0x408: case 0x508: case 0x608: case 0x708:
2373 case 0x808: case 0x908: case 0xa08: case 0xb08:
2374 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2375 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2377 wrd
= (insn
>> 12) & 0xf;
2378 rd0
= (insn
>> 16) & 0xf;
2379 rd1
= (insn
>> 0) & 0xf;
2380 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2381 switch ((insn
>> 22) & 3) {
2383 if (insn
& (1 << 21))
2384 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2386 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2389 if (insn
& (1 << 21))
2390 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2392 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2395 if (insn
& (1 << 21))
2396 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2398 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2401 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2402 gen_op_iwmmxt_set_mup();
2403 gen_op_iwmmxt_set_cup();
2405 case 0x201: case 0x203: case 0x205: case 0x207:
2406 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2407 case 0x211: case 0x213: case 0x215: case 0x217:
2408 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2409 wrd
= (insn
>> 5) & 0xf;
2410 rd0
= (insn
>> 12) & 0xf;
2411 rd1
= (insn
>> 0) & 0xf;
2412 if (rd0
== 0xf || rd1
== 0xf)
2414 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2415 tmp
= load_reg(s
, rd0
);
2416 tmp2
= load_reg(s
, rd1
);
2417 switch ((insn
>> 16) & 0xf) {
2418 case 0x0: /* TMIA */
2419 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2421 case 0x8: /* TMIAPH */
2422 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2424 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2425 if (insn
& (1 << 16))
2426 tcg_gen_shri_i32(tmp
, tmp
, 16);
2427 if (insn
& (1 << 17))
2428 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2429 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2432 tcg_temp_free_i32(tmp2
);
2433 tcg_temp_free_i32(tmp
);
2436 tcg_temp_free_i32(tmp2
);
2437 tcg_temp_free_i32(tmp
);
2438 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2439 gen_op_iwmmxt_set_mup();
2448 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2449 (ie. an undefined instruction). */
2450 static int disas_dsp_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
2452 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2455 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2456 /* Multiply with Internal Accumulate Format */
2457 rd0
= (insn
>> 12) & 0xf;
2459 acc
= (insn
>> 5) & 7;
2464 tmp
= load_reg(s
, rd0
);
2465 tmp2
= load_reg(s
, rd1
);
2466 switch ((insn
>> 16) & 0xf) {
2468 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2470 case 0x8: /* MIAPH */
2471 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2473 case 0xc: /* MIABB */
2474 case 0xd: /* MIABT */
2475 case 0xe: /* MIATB */
2476 case 0xf: /* MIATT */
2477 if (insn
& (1 << 16))
2478 tcg_gen_shri_i32(tmp
, tmp
, 16);
2479 if (insn
& (1 << 17))
2480 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2481 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2486 tcg_temp_free_i32(tmp2
);
2487 tcg_temp_free_i32(tmp
);
2489 gen_op_iwmmxt_movq_wRn_M0(acc
);
2493 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2494 /* Internal Accumulator Access Format */
2495 rdhi
= (insn
>> 16) & 0xf;
2496 rdlo
= (insn
>> 12) & 0xf;
2502 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2503 iwmmxt_load_reg(cpu_V0
, acc
);
2504 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2505 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2506 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2507 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2509 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2510 iwmmxt_store_reg(cpu_V0
, acc
);
2518 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2519 #define VFP_SREG(insn, bigbit, smallbit) \
2520 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2521 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2522 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2523 reg = (((insn) >> (bigbit)) & 0x0f) \
2524 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2526 if (insn & (1 << (smallbit))) \
2528 reg = ((insn) >> (bigbit)) & 0x0f; \
2531 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2532 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2533 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2534 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2535 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2536 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2538 /* Move between integer and VFP cores. */
2539 static TCGv_i32
gen_vfp_mrs(void)
2541 TCGv_i32 tmp
= tcg_temp_new_i32();
2542 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2546 static void gen_vfp_msr(TCGv_i32 tmp
)
2548 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2549 tcg_temp_free_i32(tmp
);
2552 static void gen_neon_dup_u8(TCGv_i32 var
, int shift
)
2554 TCGv_i32 tmp
= tcg_temp_new_i32();
2556 tcg_gen_shri_i32(var
, var
, shift
);
2557 tcg_gen_ext8u_i32(var
, var
);
2558 tcg_gen_shli_i32(tmp
, var
, 8);
2559 tcg_gen_or_i32(var
, var
, tmp
);
2560 tcg_gen_shli_i32(tmp
, var
, 16);
2561 tcg_gen_or_i32(var
, var
, tmp
);
2562 tcg_temp_free_i32(tmp
);
2565 static void gen_neon_dup_low16(TCGv_i32 var
)
2567 TCGv_i32 tmp
= tcg_temp_new_i32();
2568 tcg_gen_ext16u_i32(var
, var
);
2569 tcg_gen_shli_i32(tmp
, var
, 16);
2570 tcg_gen_or_i32(var
, var
, tmp
);
2571 tcg_temp_free_i32(tmp
);
2574 static void gen_neon_dup_high16(TCGv_i32 var
)
2576 TCGv_i32 tmp
= tcg_temp_new_i32();
2577 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2578 tcg_gen_shri_i32(tmp
, var
, 16);
2579 tcg_gen_or_i32(var
, var
, tmp
);
2580 tcg_temp_free_i32(tmp
);
2583 static TCGv_i32
gen_load_and_replicate(DisasContext
*s
, TCGv_i32 addr
, int size
)
2585 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2586 TCGv_i32 tmp
= tcg_temp_new_i32();
2589 gen_aa32_ld8u(tmp
, addr
, IS_USER(s
));
2590 gen_neon_dup_u8(tmp
, 0);
2593 gen_aa32_ld16u(tmp
, addr
, IS_USER(s
));
2594 gen_neon_dup_low16(tmp
);
2597 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
2599 default: /* Avoid compiler warnings. */
2605 static int handle_vsel(uint32_t insn
, uint32_t rd
, uint32_t rn
, uint32_t rm
,
2608 uint32_t cc
= extract32(insn
, 20, 2);
2611 TCGv_i64 frn
, frm
, dest
;
2612 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
2614 zero
= tcg_const_i64(0);
2616 frn
= tcg_temp_new_i64();
2617 frm
= tcg_temp_new_i64();
2618 dest
= tcg_temp_new_i64();
2620 zf
= tcg_temp_new_i64();
2621 nf
= tcg_temp_new_i64();
2622 vf
= tcg_temp_new_i64();
2624 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
2625 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
2626 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
2628 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2629 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2632 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
2636 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
2639 case 2: /* ge: N == V -> N ^ V == 0 */
2640 tmp
= tcg_temp_new_i64();
2641 tcg_gen_xor_i64(tmp
, vf
, nf
);
2642 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2644 tcg_temp_free_i64(tmp
);
2646 case 3: /* gt: !Z && N == V */
2647 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
2649 tmp
= tcg_temp_new_i64();
2650 tcg_gen_xor_i64(tmp
, vf
, nf
);
2651 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2653 tcg_temp_free_i64(tmp
);
2656 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2657 tcg_temp_free_i64(frn
);
2658 tcg_temp_free_i64(frm
);
2659 tcg_temp_free_i64(dest
);
2661 tcg_temp_free_i64(zf
);
2662 tcg_temp_free_i64(nf
);
2663 tcg_temp_free_i64(vf
);
2665 tcg_temp_free_i64(zero
);
2667 TCGv_i32 frn
, frm
, dest
;
2670 zero
= tcg_const_i32(0);
2672 frn
= tcg_temp_new_i32();
2673 frm
= tcg_temp_new_i32();
2674 dest
= tcg_temp_new_i32();
2675 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2676 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2679 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
2683 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
2686 case 2: /* ge: N == V -> N ^ V == 0 */
2687 tmp
= tcg_temp_new_i32();
2688 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
2689 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
2691 tcg_temp_free_i32(tmp
);
2693 case 3: /* gt: !Z && N == V */
2694 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
2696 tmp
= tcg_temp_new_i32();
2697 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
2698 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
2700 tcg_temp_free_i32(tmp
);
2703 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2704 tcg_temp_free_i32(frn
);
2705 tcg_temp_free_i32(frm
);
2706 tcg_temp_free_i32(dest
);
2708 tcg_temp_free_i32(zero
);
2714 static int handle_vminmaxnm(uint32_t insn
, uint32_t rd
, uint32_t rn
,
2715 uint32_t rm
, uint32_t dp
)
2717 uint32_t vmin
= extract32(insn
, 6, 1);
2718 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2721 TCGv_i64 frn
, frm
, dest
;
2723 frn
= tcg_temp_new_i64();
2724 frm
= tcg_temp_new_i64();
2725 dest
= tcg_temp_new_i64();
2727 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2728 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2730 gen_helper_vfp_minnumd(dest
, frn
, frm
, fpst
);
2732 gen_helper_vfp_maxnumd(dest
, frn
, frm
, fpst
);
2734 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2735 tcg_temp_free_i64(frn
);
2736 tcg_temp_free_i64(frm
);
2737 tcg_temp_free_i64(dest
);
2739 TCGv_i32 frn
, frm
, dest
;
2741 frn
= tcg_temp_new_i32();
2742 frm
= tcg_temp_new_i32();
2743 dest
= tcg_temp_new_i32();
2745 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2746 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2748 gen_helper_vfp_minnums(dest
, frn
, frm
, fpst
);
2750 gen_helper_vfp_maxnums(dest
, frn
, frm
, fpst
);
2752 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2753 tcg_temp_free_i32(frn
);
2754 tcg_temp_free_i32(frm
);
2755 tcg_temp_free_i32(dest
);
2758 tcg_temp_free_ptr(fpst
);
2762 static int handle_vrint(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
2765 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2768 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
2769 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2774 tcg_op
= tcg_temp_new_i64();
2775 tcg_res
= tcg_temp_new_i64();
2776 tcg_gen_ld_f64(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
2777 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
2778 tcg_gen_st_f64(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
2779 tcg_temp_free_i64(tcg_op
);
2780 tcg_temp_free_i64(tcg_res
);
2784 tcg_op
= tcg_temp_new_i32();
2785 tcg_res
= tcg_temp_new_i32();
2786 tcg_gen_ld_f32(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
2787 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
2788 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
2789 tcg_temp_free_i32(tcg_op
);
2790 tcg_temp_free_i32(tcg_res
);
2793 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2794 tcg_temp_free_i32(tcg_rmode
);
2796 tcg_temp_free_ptr(fpst
);
2800 static int handle_vcvt(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
2803 bool is_signed
= extract32(insn
, 7, 1);
2804 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2805 TCGv_i32 tcg_rmode
, tcg_shift
;
2807 tcg_shift
= tcg_const_i32(0);
2809 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
2810 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2813 TCGv_i64 tcg_double
, tcg_res
;
2815 /* Rd is encoded as a single precision register even when the source
2816 * is double precision.
2818 rd
= ((rd
<< 1) & 0x1e) | ((rd
>> 4) & 0x1);
2819 tcg_double
= tcg_temp_new_i64();
2820 tcg_res
= tcg_temp_new_i64();
2821 tcg_tmp
= tcg_temp_new_i32();
2822 tcg_gen_ld_f64(tcg_double
, cpu_env
, vfp_reg_offset(1, rm
));
2824 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
2826 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
2828 tcg_gen_trunc_i64_i32(tcg_tmp
, tcg_res
);
2829 tcg_gen_st_f32(tcg_tmp
, cpu_env
, vfp_reg_offset(0, rd
));
2830 tcg_temp_free_i32(tcg_tmp
);
2831 tcg_temp_free_i64(tcg_res
);
2832 tcg_temp_free_i64(tcg_double
);
2834 TCGv_i32 tcg_single
, tcg_res
;
2835 tcg_single
= tcg_temp_new_i32();
2836 tcg_res
= tcg_temp_new_i32();
2837 tcg_gen_ld_f32(tcg_single
, cpu_env
, vfp_reg_offset(0, rm
));
2839 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
2841 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
2843 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(0, rd
));
2844 tcg_temp_free_i32(tcg_res
);
2845 tcg_temp_free_i32(tcg_single
);
2848 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2849 tcg_temp_free_i32(tcg_rmode
);
2851 tcg_temp_free_i32(tcg_shift
);
2853 tcg_temp_free_ptr(fpst
);
2858 /* Table for converting the most common AArch32 encoding of
2859 * rounding mode to arm_fprounding order (which matches the
2860 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2862 static const uint8_t fp_decode_rm
[] = {
2869 static int disas_vfp_v8_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
2871 uint32_t rd
, rn
, rm
, dp
= extract32(insn
, 8, 1);
2873 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2878 VFP_DREG_D(rd
, insn
);
2879 VFP_DREG_N(rn
, insn
);
2880 VFP_DREG_M(rm
, insn
);
2882 rd
= VFP_SREG_D(insn
);
2883 rn
= VFP_SREG_N(insn
);
2884 rm
= VFP_SREG_M(insn
);
2887 if ((insn
& 0x0f800e50) == 0x0e000a00) {
2888 return handle_vsel(insn
, rd
, rn
, rm
, dp
);
2889 } else if ((insn
& 0x0fb00e10) == 0x0e800a00) {
2890 return handle_vminmaxnm(insn
, rd
, rn
, rm
, dp
);
2891 } else if ((insn
& 0x0fbc0ed0) == 0x0eb80a40) {
2892 /* VRINTA, VRINTN, VRINTP, VRINTM */
2893 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
2894 return handle_vrint(insn
, rd
, rm
, dp
, rounding
);
2895 } else if ((insn
& 0x0fbc0e50) == 0x0ebc0a40) {
2896 /* VCVTA, VCVTN, VCVTP, VCVTM */
2897 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
2898 return handle_vcvt(insn
, rd
, rm
, dp
, rounding
);
2903 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2904 (ie. an undefined instruction). */
2905 static int disas_vfp_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
2907 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2913 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2916 if (!s
->vfp_enabled
) {
2917 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2918 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2920 rn
= (insn
>> 16) & 0xf;
2921 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2922 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2926 if (extract32(insn
, 28, 4) == 0xf) {
2927 /* Encodings with T=1 (Thumb) or unconditional (ARM):
2928 * only used in v8 and above.
2930 return disas_vfp_v8_insn(env
, s
, insn
);
2933 dp
= ((insn
& 0xf00) == 0xb00);
2934 switch ((insn
>> 24) & 0xf) {
2936 if (insn
& (1 << 4)) {
2937 /* single register transfer */
2938 rd
= (insn
>> 12) & 0xf;
2943 VFP_DREG_N(rn
, insn
);
2946 if (insn
& 0x00c00060
2947 && !arm_feature(env
, ARM_FEATURE_NEON
))
2950 pass
= (insn
>> 21) & 1;
2951 if (insn
& (1 << 22)) {
2953 offset
= ((insn
>> 5) & 3) * 8;
2954 } else if (insn
& (1 << 5)) {
2956 offset
= (insn
& (1 << 6)) ?
16 : 0;
2961 if (insn
& ARM_CP_RW_BIT
) {
2963 tmp
= neon_load_reg(rn
, pass
);
2967 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2968 if (insn
& (1 << 23))
2974 if (insn
& (1 << 23)) {
2976 tcg_gen_shri_i32(tmp
, tmp
, 16);
2982 tcg_gen_sari_i32(tmp
, tmp
, 16);
2991 store_reg(s
, rd
, tmp
);
2994 tmp
= load_reg(s
, rd
);
2995 if (insn
& (1 << 23)) {
2998 gen_neon_dup_u8(tmp
, 0);
2999 } else if (size
== 1) {
3000 gen_neon_dup_low16(tmp
);
3002 for (n
= 0; n
<= pass
* 2; n
++) {
3003 tmp2
= tcg_temp_new_i32();
3004 tcg_gen_mov_i32(tmp2
, tmp
);
3005 neon_store_reg(rn
, n
, tmp2
);
3007 neon_store_reg(rn
, n
, tmp
);
3012 tmp2
= neon_load_reg(rn
, pass
);
3013 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 8);
3014 tcg_temp_free_i32(tmp2
);
3017 tmp2
= neon_load_reg(rn
, pass
);
3018 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 16);
3019 tcg_temp_free_i32(tmp2
);
3024 neon_store_reg(rn
, pass
, tmp
);
3028 if ((insn
& 0x6f) != 0x00)
3030 rn
= VFP_SREG_N(insn
);
3031 if (insn
& ARM_CP_RW_BIT
) {
3033 if (insn
& (1 << 21)) {
3034 /* system register */
3039 /* VFP2 allows access to FSID from userspace.
3040 VFP3 restricts all id registers to privileged
3043 && arm_feature(env
, ARM_FEATURE_VFP3
))
3045 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3050 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3052 case ARM_VFP_FPINST
:
3053 case ARM_VFP_FPINST2
:
3054 /* Not present in VFP3. */
3056 || arm_feature(env
, ARM_FEATURE_VFP3
))
3058 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3062 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
3063 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
3065 tmp
= tcg_temp_new_i32();
3066 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
3072 || !arm_feature(env
, ARM_FEATURE_MVFR
))
3074 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3080 gen_mov_F0_vreg(0, rn
);
3081 tmp
= gen_vfp_mrs();
3084 /* Set the 4 flag bits in the CPSR. */
3086 tcg_temp_free_i32(tmp
);
3088 store_reg(s
, rd
, tmp
);
3092 if (insn
& (1 << 21)) {
3094 /* system register */
3099 /* Writes are ignored. */
3102 tmp
= load_reg(s
, rd
);
3103 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
3104 tcg_temp_free_i32(tmp
);
3110 /* TODO: VFP subarchitecture support.
3111 * For now, keep the EN bit only */
3112 tmp
= load_reg(s
, rd
);
3113 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
3114 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3117 case ARM_VFP_FPINST
:
3118 case ARM_VFP_FPINST2
:
3119 tmp
= load_reg(s
, rd
);
3120 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3126 tmp
= load_reg(s
, rd
);
3128 gen_mov_vreg_F0(0, rn
);
3133 /* data processing */
3134 /* The opcode is in bits 23, 21, 20 and 6. */
3135 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
3139 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);