target-arm: Simplify insn_crosses_page()
[qemu.git] / target / arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "qemu/log.h"
29 #include "qemu/bitops.h"
30 #include "arm_ldst.h"
31 #include "exec/semihost.h"
32
33 #include "exec/helper-proto.h"
34 #include "exec/helper-gen.h"
35
36 #include "trace-tcg.h"
37 #include "exec/log.h"
38
39
40 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
42 /* currently all emulated v5 cores are also v5TE, so don't bother */
43 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
44 #define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
45 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
50
51 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
52
53 #include "translate.h"
54
55 #if defined(CONFIG_USER_ONLY)
56 #define IS_USER(s) 1
57 #else
58 #define IS_USER(s) (s->user)
59 #endif
60
61 TCGv_env cpu_env;
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
64 static TCGv_i32 cpu_R[16];
65 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66 TCGv_i64 cpu_exclusive_addr;
67 TCGv_i64 cpu_exclusive_val;
68
69 /* FIXME: These should be removed. */
70 static TCGv_i32 cpu_F0s, cpu_F1s;
71 static TCGv_i64 cpu_F0d, cpu_F1d;
72
73 #include "exec/gen-icount.h"
74
75 static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
79 /* initialize TCG globals. */
80 void arm_translate_init(void)
81 {
82 int i;
83
84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
85 tcg_ctx.tcg_env = cpu_env;
86
87 for (i = 0; i < 16; i++) {
88 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
89 offsetof(CPUARMState, regs[i]),
90 regnames[i]);
91 }
92 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
93 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
94 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
95 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
96
97 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
98 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
99 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
100 offsetof(CPUARMState, exclusive_val), "exclusive_val");
101
102 a64_translate_init();
103 }
104
105 /* Flags for the disas_set_da_iss info argument:
106 * lower bits hold the Rt register number, higher bits are flags.
107 */
108 typedef enum ISSInfo {
109 ISSNone = 0,
110 ISSRegMask = 0x1f,
111 ISSInvalid = (1 << 5),
112 ISSIsAcqRel = (1 << 6),
113 ISSIsWrite = (1 << 7),
114 ISSIs16Bit = (1 << 8),
115 } ISSInfo;
116
117 /* Save the syndrome information for a Data Abort */
118 static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
119 {
120 uint32_t syn;
121 int sas = memop & MO_SIZE;
122 bool sse = memop & MO_SIGN;
123 bool is_acqrel = issinfo & ISSIsAcqRel;
124 bool is_write = issinfo & ISSIsWrite;
125 bool is_16bit = issinfo & ISSIs16Bit;
126 int srt = issinfo & ISSRegMask;
127
128 if (issinfo & ISSInvalid) {
129 /* Some callsites want to conditionally provide ISS info,
130 * eg "only if this was not a writeback"
131 */
132 return;
133 }
134
135 if (srt == 15) {
136 /* For AArch32, insns where the src/dest is R15 never generate
137 * ISS information. Catching that here saves checking at all
138 * the call sites.
139 */
140 return;
141 }
142
143 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
144 0, 0, 0, is_write, 0, is_16bit);
145 disas_set_insn_syndrome(s, syn);
146 }
147
148 static inline int get_a32_user_mem_index(DisasContext *s)
149 {
150 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
151 * insns:
152 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
153 * otherwise, access as if at PL0.
154 */
155 switch (s->mmu_idx) {
156 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
157 case ARMMMUIdx_S12NSE0:
158 case ARMMMUIdx_S12NSE1:
159 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
160 case ARMMMUIdx_S1E3:
161 case ARMMMUIdx_S1SE0:
162 case ARMMMUIdx_S1SE1:
163 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
164 case ARMMMUIdx_MUser:
165 case ARMMMUIdx_MPriv:
166 case ARMMMUIdx_MNegPri:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
168 case ARMMMUIdx_MSUser:
169 case ARMMMUIdx_MSPriv:
170 case ARMMMUIdx_MSNegPri:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
172 case ARMMMUIdx_S2NS:
173 default:
174 g_assert_not_reached();
175 }
176 }
177
178 static inline TCGv_i32 load_cpu_offset(int offset)
179 {
180 TCGv_i32 tmp = tcg_temp_new_i32();
181 tcg_gen_ld_i32(tmp, cpu_env, offset);
182 return tmp;
183 }
184
185 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
186
187 static inline void store_cpu_offset(TCGv_i32 var, int offset)
188 {
189 tcg_gen_st_i32(var, cpu_env, offset);
190 tcg_temp_free_i32(var);
191 }
192
193 #define store_cpu_field(var, name) \
194 store_cpu_offset(var, offsetof(CPUARMState, name))
195
196 /* Set a variable to the value of a CPU register. */
197 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
198 {
199 if (reg == 15) {
200 uint32_t addr;
201 /* normally, since we updated PC, we need only to add one insn */
202 if (s->thumb)
203 addr = (long)s->pc + 2;
204 else
205 addr = (long)s->pc + 4;
206 tcg_gen_movi_i32(var, addr);
207 } else {
208 tcg_gen_mov_i32(var, cpu_R[reg]);
209 }
210 }
211
212 /* Create a new temporary and set it to the value of a CPU register. */
213 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
214 {
215 TCGv_i32 tmp = tcg_temp_new_i32();
216 load_reg_var(s, tmp, reg);
217 return tmp;
218 }
219
220 /* Set a CPU register. The source must be a temporary and will be
221 marked as dead. */
222 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
223 {
224 if (reg == 15) {
225 /* In Thumb mode, we must ignore bit 0.
226 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
227 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
228 * We choose to ignore [1:0] in ARM mode for all architecture versions.
229 */
230 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
231 s->base.is_jmp = DISAS_JUMP;
232 }
233 tcg_gen_mov_i32(cpu_R[reg], var);
234 tcg_temp_free_i32(var);
235 }
236
237 /* Value extensions. */
238 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
239 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
240 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
241 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
242
243 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
244 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
245
246
247 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
248 {
249 TCGv_i32 tmp_mask = tcg_const_i32(mask);
250 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
251 tcg_temp_free_i32(tmp_mask);
252 }
253 /* Set NZCV flags from the high 4 bits of var. */
254 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
255
256 static void gen_exception_internal(int excp)
257 {
258 TCGv_i32 tcg_excp = tcg_const_i32(excp);
259
260 assert(excp_is_internal(excp));
261 gen_helper_exception_internal(cpu_env, tcg_excp);
262 tcg_temp_free_i32(tcg_excp);
263 }
264
265 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
266 {
267 TCGv_i32 tcg_excp = tcg_const_i32(excp);
268 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
269 TCGv_i32 tcg_el = tcg_const_i32(target_el);
270
271 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
272 tcg_syn, tcg_el);
273
274 tcg_temp_free_i32(tcg_el);
275 tcg_temp_free_i32(tcg_syn);
276 tcg_temp_free_i32(tcg_excp);
277 }
278
279 static void gen_ss_advance(DisasContext *s)
280 {
281 /* If the singlestep state is Active-not-pending, advance to
282 * Active-pending.
283 */
284 if (s->ss_active) {
285 s->pstate_ss = 0;
286 gen_helper_clear_pstate_ss(cpu_env);
287 }
288 }
289
290 static void gen_step_complete_exception(DisasContext *s)
291 {
292 /* We just completed step of an insn. Move from Active-not-pending
293 * to Active-pending, and then also take the swstep exception.
294 * This corresponds to making the (IMPDEF) choice to prioritize
295 * swstep exceptions over asynchronous exceptions taken to an exception
296 * level where debug is disabled. This choice has the advantage that
297 * we do not need to maintain internal state corresponding to the
298 * ISV/EX syndrome bits between completion of the step and generation
299 * of the exception, and our syndrome information is always correct.
300 */
301 gen_ss_advance(s);
302 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
303 default_exception_el(s));
304 s->base.is_jmp = DISAS_NORETURN;
305 }
306
307 static void gen_singlestep_exception(DisasContext *s)
308 {
309 /* Generate the right kind of exception for singlestep, which is
310 * either the architectural singlestep or EXCP_DEBUG for QEMU's
311 * gdb singlestepping.
312 */
313 if (s->ss_active) {
314 gen_step_complete_exception(s);
315 } else {
316 gen_exception_internal(EXCP_DEBUG);
317 }
318 }
319
320 static inline bool is_singlestepping(DisasContext *s)
321 {
322 /* Return true if we are singlestepping either because of
323 * architectural singlestep or QEMU gdbstub singlestep. This does
324 * not include the command line '-singlestep' mode which is rather
325 * misnamed as it only means "one instruction per TB" and doesn't
326 * affect the code we generate.
327 */
328 return s->base.singlestep_enabled || s->ss_active;
329 }
330
331 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
332 {
333 TCGv_i32 tmp1 = tcg_temp_new_i32();
334 TCGv_i32 tmp2 = tcg_temp_new_i32();
335 tcg_gen_ext16s_i32(tmp1, a);
336 tcg_gen_ext16s_i32(tmp2, b);
337 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
338 tcg_temp_free_i32(tmp2);
339 tcg_gen_sari_i32(a, a, 16);
340 tcg_gen_sari_i32(b, b, 16);
341 tcg_gen_mul_i32(b, b, a);
342 tcg_gen_mov_i32(a, tmp1);
343 tcg_temp_free_i32(tmp1);
344 }
345
346 /* Byteswap each halfword. */
347 static void gen_rev16(TCGv_i32 var)
348 {
349 TCGv_i32 tmp = tcg_temp_new_i32();
350 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
351 tcg_gen_shri_i32(tmp, var, 8);
352 tcg_gen_and_i32(tmp, tmp, mask);
353 tcg_gen_and_i32(var, var, mask);
354 tcg_gen_shli_i32(var, var, 8);
355 tcg_gen_or_i32(var, var, tmp);
356 tcg_temp_free_i32(mask);
357 tcg_temp_free_i32(tmp);
358 }
359
360 /* Byteswap low halfword and sign extend. */
361 static void gen_revsh(TCGv_i32 var)
362 {
363 tcg_gen_ext16u_i32(var, var);
364 tcg_gen_bswap16_i32(var, var);
365 tcg_gen_ext16s_i32(var, var);
366 }
367
368 /* Return (b << 32) + a. Mark inputs as dead */
369 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
370 {
371 TCGv_i64 tmp64 = tcg_temp_new_i64();
372
373 tcg_gen_extu_i32_i64(tmp64, b);
374 tcg_temp_free_i32(b);
375 tcg_gen_shli_i64(tmp64, tmp64, 32);
376 tcg_gen_add_i64(a, tmp64, a);
377
378 tcg_temp_free_i64(tmp64);
379 return a;
380 }
381
382 /* Return (b << 32) - a. Mark inputs as dead. */
383 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
384 {
385 TCGv_i64 tmp64 = tcg_temp_new_i64();
386
387 tcg_gen_extu_i32_i64(tmp64, b);
388 tcg_temp_free_i32(b);
389 tcg_gen_shli_i64(tmp64, tmp64, 32);
390 tcg_gen_sub_i64(a, tmp64, a);
391
392 tcg_temp_free_i64(tmp64);
393 return a;
394 }
395
396 /* 32x32->64 multiply. Marks inputs as dead. */
397 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
398 {
399 TCGv_i32 lo = tcg_temp_new_i32();
400 TCGv_i32 hi = tcg_temp_new_i32();
401 TCGv_i64 ret;
402
403 tcg_gen_mulu2_i32(lo, hi, a, b);
404 tcg_temp_free_i32(a);
405 tcg_temp_free_i32(b);
406
407 ret = tcg_temp_new_i64();
408 tcg_gen_concat_i32_i64(ret, lo, hi);
409 tcg_temp_free_i32(lo);
410 tcg_temp_free_i32(hi);
411
412 return ret;
413 }
414
415 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
416 {
417 TCGv_i32 lo = tcg_temp_new_i32();
418 TCGv_i32 hi = tcg_temp_new_i32();
419 TCGv_i64 ret;
420
421 tcg_gen_muls2_i32(lo, hi, a, b);
422 tcg_temp_free_i32(a);
423 tcg_temp_free_i32(b);
424
425 ret = tcg_temp_new_i64();
426 tcg_gen_concat_i32_i64(ret, lo, hi);
427 tcg_temp_free_i32(lo);
428 tcg_temp_free_i32(hi);
429
430 return ret;
431 }
432
433 /* Swap low and high halfwords. */
434 static void gen_swap_half(TCGv_i32 var)
435 {
436 TCGv_i32 tmp = tcg_temp_new_i32();
437 tcg_gen_shri_i32(tmp, var, 16);
438 tcg_gen_shli_i32(var, var, 16);
439 tcg_gen_or_i32(var, var, tmp);
440 tcg_temp_free_i32(tmp);
441 }
442
443 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
444 tmp = (t0 ^ t1) & 0x8000;
445 t0 &= ~0x8000;
446 t1 &= ~0x8000;
447 t0 = (t0 + t1) ^ tmp;
448 */
449
450 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
451 {
452 TCGv_i32 tmp = tcg_temp_new_i32();
453 tcg_gen_xor_i32(tmp, t0, t1);
454 tcg_gen_andi_i32(tmp, tmp, 0x8000);
455 tcg_gen_andi_i32(t0, t0, ~0x8000);
456 tcg_gen_andi_i32(t1, t1, ~0x8000);
457 tcg_gen_add_i32(t0, t0, t1);
458 tcg_gen_xor_i32(t0, t0, tmp);
459 tcg_temp_free_i32(tmp);
460 tcg_temp_free_i32(t1);
461 }
462
463 /* Set CF to the top bit of var. */
464 static void gen_set_CF_bit31(TCGv_i32 var)
465 {
466 tcg_gen_shri_i32(cpu_CF, var, 31);
467 }
468
469 /* Set N and Z flags from var. */
470 static inline void gen_logic_CC(TCGv_i32 var)
471 {
472 tcg_gen_mov_i32(cpu_NF, var);
473 tcg_gen_mov_i32(cpu_ZF, var);
474 }
475
476 /* T0 += T1 + CF. */
477 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
478 {
479 tcg_gen_add_i32(t0, t0, t1);
480 tcg_gen_add_i32(t0, t0, cpu_CF);
481 }
482
483 /* dest = T0 + T1 + CF. */
484 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
485 {
486 tcg_gen_add_i32(dest, t0, t1);
487 tcg_gen_add_i32(dest, dest, cpu_CF);
488 }
489
490 /* dest = T0 - T1 + CF - 1. */
491 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
492 {
493 tcg_gen_sub_i32(dest, t0, t1);
494 tcg_gen_add_i32(dest, dest, cpu_CF);
495 tcg_gen_subi_i32(dest, dest, 1);
496 }
497
498 /* dest = T0 + T1. Compute C, N, V and Z flags */
499 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
500 {
501 TCGv_i32 tmp = tcg_temp_new_i32();
502 tcg_gen_movi_i32(tmp, 0);
503 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
504 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
505 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
506 tcg_gen_xor_i32(tmp, t0, t1);
507 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
508 tcg_temp_free_i32(tmp);
509 tcg_gen_mov_i32(dest, cpu_NF);
510 }
511
512 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
513 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
514 {
515 TCGv_i32 tmp = tcg_temp_new_i32();
516 if (TCG_TARGET_HAS_add2_i32) {
517 tcg_gen_movi_i32(tmp, 0);
518 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
519 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
520 } else {
521 TCGv_i64 q0 = tcg_temp_new_i64();
522 TCGv_i64 q1 = tcg_temp_new_i64();
523 tcg_gen_extu_i32_i64(q0, t0);
524 tcg_gen_extu_i32_i64(q1, t1);
525 tcg_gen_add_i64(q0, q0, q1);
526 tcg_gen_extu_i32_i64(q1, cpu_CF);
527 tcg_gen_add_i64(q0, q0, q1);
528 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
529 tcg_temp_free_i64(q0);
530 tcg_temp_free_i64(q1);
531 }
532 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
533 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
534 tcg_gen_xor_i32(tmp, t0, t1);
535 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
536 tcg_temp_free_i32(tmp);
537 tcg_gen_mov_i32(dest, cpu_NF);
538 }
539
540 /* dest = T0 - T1. Compute C, N, V and Z flags */
541 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
542 {
543 TCGv_i32 tmp;
544 tcg_gen_sub_i32(cpu_NF, t0, t1);
545 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
546 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
547 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
548 tmp = tcg_temp_new_i32();
549 tcg_gen_xor_i32(tmp, t0, t1);
550 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
551 tcg_temp_free_i32(tmp);
552 tcg_gen_mov_i32(dest, cpu_NF);
553 }
554
555 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
556 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
557 {
558 TCGv_i32 tmp = tcg_temp_new_i32();
559 tcg_gen_not_i32(tmp, t1);
560 gen_adc_CC(dest, t0, tmp);
561 tcg_temp_free_i32(tmp);
562 }
563
564 #define GEN_SHIFT(name) \
565 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
566 { \
567 TCGv_i32 tmp1, tmp2, tmp3; \
568 tmp1 = tcg_temp_new_i32(); \
569 tcg_gen_andi_i32(tmp1, t1, 0xff); \
570 tmp2 = tcg_const_i32(0); \
571 tmp3 = tcg_const_i32(0x1f); \
572 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
573 tcg_temp_free_i32(tmp3); \
574 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
575 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
576 tcg_temp_free_i32(tmp2); \
577 tcg_temp_free_i32(tmp1); \
578 }
579 GEN_SHIFT(shl)
580 GEN_SHIFT(shr)
581 #undef GEN_SHIFT
582
583 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
584 {
585 TCGv_i32 tmp1, tmp2;
586 tmp1 = tcg_temp_new_i32();
587 tcg_gen_andi_i32(tmp1, t1, 0xff);
588 tmp2 = tcg_const_i32(0x1f);
589 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
590 tcg_temp_free_i32(tmp2);
591 tcg_gen_sar_i32(dest, t0, tmp1);
592 tcg_temp_free_i32(tmp1);
593 }
594
595 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
596 {
597 TCGv_i32 c0 = tcg_const_i32(0);
598 TCGv_i32 tmp = tcg_temp_new_i32();
599 tcg_gen_neg_i32(tmp, src);
600 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
601 tcg_temp_free_i32(c0);
602 tcg_temp_free_i32(tmp);
603 }
604
605 static void shifter_out_im(TCGv_i32 var, int shift)
606 {
607 if (shift == 0) {
608 tcg_gen_andi_i32(cpu_CF, var, 1);
609 } else {
610 tcg_gen_shri_i32(cpu_CF, var, shift);
611 if (shift != 31) {
612 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
613 }
614 }
615 }
616
617 /* Shift by immediate. Includes special handling for shift == 0. */
618 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
619 int shift, int flags)
620 {
621 switch (shiftop) {
622 case 0: /* LSL */
623 if (shift != 0) {
624 if (flags)
625 shifter_out_im(var, 32 - shift);
626 tcg_gen_shli_i32(var, var, shift);
627 }
628 break;
629 case 1: /* LSR */
630 if (shift == 0) {
631 if (flags) {
632 tcg_gen_shri_i32(cpu_CF, var, 31);
633 }
634 tcg_gen_movi_i32(var, 0);
635 } else {
636 if (flags)
637 shifter_out_im(var, shift - 1);
638 tcg_gen_shri_i32(var, var, shift);
639 }
640 break;
641 case 2: /* ASR */
642 if (shift == 0)
643 shift = 32;
644 if (flags)
645 shifter_out_im(var, shift - 1);
646 if (shift == 32)
647 shift = 31;
648 tcg_gen_sari_i32(var, var, shift);
649 break;
650 case 3: /* ROR/RRX */
651 if (shift != 0) {
652 if (flags)
653 shifter_out_im(var, shift - 1);
654 tcg_gen_rotri_i32(var, var, shift); break;
655 } else {
656 TCGv_i32 tmp = tcg_temp_new_i32();
657 tcg_gen_shli_i32(tmp, cpu_CF, 31);
658 if (flags)
659 shifter_out_im(var, 0);
660 tcg_gen_shri_i32(var, var, 1);
661 tcg_gen_or_i32(var, var, tmp);
662 tcg_temp_free_i32(tmp);
663 }
664 }
665 };
666
667 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
668 TCGv_i32 shift, int flags)
669 {
670 if (flags) {
671 switch (shiftop) {
672 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
673 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
674 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
675 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
676 }
677 } else {
678 switch (shiftop) {
679 case 0:
680 gen_shl(var, var, shift);
681 break;
682 case 1:
683 gen_shr(var, var, shift);
684 break;
685 case 2:
686 gen_sar(var, var, shift);
687 break;
688 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
689 tcg_gen_rotr_i32(var, var, shift); break;
690 }
691 }
692 tcg_temp_free_i32(shift);
693 }
694
695 #define PAS_OP(pfx) \
696 switch (op2) { \
697 case 0: gen_pas_helper(glue(pfx,add16)); break; \
698 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
699 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
700 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
701 case 4: gen_pas_helper(glue(pfx,add8)); break; \
702 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
703 }
704 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
705 {
706 TCGv_ptr tmp;
707
708 switch (op1) {
709 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
710 case 1:
711 tmp = tcg_temp_new_ptr();
712 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
713 PAS_OP(s)
714 tcg_temp_free_ptr(tmp);
715 break;
716 case 5:
717 tmp = tcg_temp_new_ptr();
718 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
719 PAS_OP(u)
720 tcg_temp_free_ptr(tmp);
721 break;
722 #undef gen_pas_helper
723 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
724 case 2:
725 PAS_OP(q);
726 break;
727 case 3:
728 PAS_OP(sh);
729 break;
730 case 6:
731 PAS_OP(uq);
732 break;
733 case 7:
734 PAS_OP(uh);
735 break;
736 #undef gen_pas_helper
737 }
738 }
739 #undef PAS_OP
740
741 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
742 #define PAS_OP(pfx) \
743 switch (op1) { \
744 case 0: gen_pas_helper(glue(pfx,add8)); break; \
745 case 1: gen_pas_helper(glue(pfx,add16)); break; \
746 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
747 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
748 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
749 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
750 }
751 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
752 {
753 TCGv_ptr tmp;
754
755 switch (op2) {
756 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
757 case 0:
758 tmp = tcg_temp_new_ptr();
759 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
760 PAS_OP(s)
761 tcg_temp_free_ptr(tmp);
762 break;
763 case 4:
764 tmp = tcg_temp_new_ptr();
765 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
766 PAS_OP(u)
767 tcg_temp_free_ptr(tmp);
768 break;
769 #undef gen_pas_helper
770 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
771 case 1:
772 PAS_OP(q);
773 break;
774 case 2:
775 PAS_OP(sh);
776 break;
777 case 5:
778 PAS_OP(uq);
779 break;
780 case 6:
781 PAS_OP(uh);
782 break;
783 #undef gen_pas_helper
784 }
785 }
786 #undef PAS_OP
787
788 /*
789 * Generate a conditional based on ARM condition code cc.
790 * This is common between ARM and Aarch64 targets.
791 */
792 void arm_test_cc(DisasCompare *cmp, int cc)
793 {
794 TCGv_i32 value;
795 TCGCond cond;
796 bool global = true;
797
798 switch (cc) {
799 case 0: /* eq: Z */
800 case 1: /* ne: !Z */
801 cond = TCG_COND_EQ;
802 value = cpu_ZF;
803 break;
804
805 case 2: /* cs: C */
806 case 3: /* cc: !C */
807 cond = TCG_COND_NE;
808 value = cpu_CF;
809 break;
810
811 case 4: /* mi: N */
812 case 5: /* pl: !N */
813 cond = TCG_COND_LT;
814 value = cpu_NF;
815 break;
816
817 case 6: /* vs: V */
818 case 7: /* vc: !V */
819 cond = TCG_COND_LT;
820 value = cpu_VF;
821 break;
822
823 case 8: /* hi: C && !Z */
824 case 9: /* ls: !C || Z -> !(C && !Z) */
825 cond = TCG_COND_NE;
826 value = tcg_temp_new_i32();
827 global = false;
828 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
829 ZF is non-zero for !Z; so AND the two subexpressions. */
830 tcg_gen_neg_i32(value, cpu_CF);
831 tcg_gen_and_i32(value, value, cpu_ZF);
832 break;
833
834 case 10: /* ge: N == V -> N ^ V == 0 */
835 case 11: /* lt: N != V -> N ^ V != 0 */
836 /* Since we're only interested in the sign bit, == 0 is >= 0. */
837 cond = TCG_COND_GE;
838 value = tcg_temp_new_i32();
839 global = false;
840 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
841 break;
842
843 case 12: /* gt: !Z && N == V */
844 case 13: /* le: Z || N != V */
845 cond = TCG_COND_NE;
846 value = tcg_temp_new_i32();
847 global = false;
848 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
849 * the sign bit then AND with ZF to yield the result. */
850 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
851 tcg_gen_sari_i32(value, value, 31);
852 tcg_gen_andc_i32(value, cpu_ZF, value);
853 break;
854
855 case 14: /* always */
856 case 15: /* always */
857 /* Use the ALWAYS condition, which will fold early.
858 * It doesn't matter what we use for the value. */
859 cond = TCG_COND_ALWAYS;
860 value = cpu_ZF;
861 goto no_invert;
862
863 default:
864 fprintf(stderr, "Bad condition code 0x%x\n", cc);
865 abort();
866 }
867
868 if (cc & 1) {
869 cond = tcg_invert_cond(cond);
870 }
871
872 no_invert:
873 cmp->cond = cond;
874 cmp->value = value;
875 cmp->value_global = global;
876 }
877
878 void arm_free_cc(DisasCompare *cmp)
879 {
880 if (!cmp->value_global) {
881 tcg_temp_free_i32(cmp->value);
882 }
883 }
884
885 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
886 {
887 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
888 }
889
890 void arm_gen_test_cc(int cc, TCGLabel *label)
891 {
892 DisasCompare cmp;
893 arm_test_cc(&cmp, cc);
894 arm_jump_cc(&cmp, label);
895 arm_free_cc(&cmp);
896 }
897
898 static const uint8_t table_logic_cc[16] = {
899 1, /* and */
900 1, /* xor */
901 0, /* sub */
902 0, /* rsb */
903 0, /* add */
904 0, /* adc */
905 0, /* sbc */
906 0, /* rsc */
907 1, /* andl */
908 1, /* xorl */
909 0, /* cmp */
910 0, /* cmn */
911 1, /* orr */
912 1, /* mov */
913 1, /* bic */
914 1, /* mvn */
915 };
916
917 static inline void gen_set_condexec(DisasContext *s)
918 {
919 if (s->condexec_mask) {
920 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
921 TCGv_i32 tmp = tcg_temp_new_i32();
922 tcg_gen_movi_i32(tmp, val);
923 store_cpu_field(tmp, condexec_bits);
924 }
925 }
926
927 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
928 {
929 tcg_gen_movi_i32(cpu_R[15], val);
930 }
931
932 /* Set PC and Thumb state from an immediate address. */
933 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
934 {
935 TCGv_i32 tmp;
936
937 s->base.is_jmp = DISAS_JUMP;
938 if (s->thumb != (addr & 1)) {
939 tmp = tcg_temp_new_i32();
940 tcg_gen_movi_i32(tmp, addr & 1);
941 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
942 tcg_temp_free_i32(tmp);
943 }
944 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
945 }
946
947 /* Set PC and Thumb state from var. var is marked as dead. */
948 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
949 {
950 s->base.is_jmp = DISAS_JUMP;
951 tcg_gen_andi_i32(cpu_R[15], var, ~1);
952 tcg_gen_andi_i32(var, var, 1);
953 store_cpu_field(var, thumb);
954 }
955
956 /* Set PC and Thumb state from var. var is marked as dead.
957 * For M-profile CPUs, include logic to detect exception-return
958 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
959 * and BX reg, and no others, and happens only for code in Handler mode.
960 */
961 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
962 {
963 /* Generate the same code here as for a simple bx, but flag via
964 * s->base.is_jmp that we need to do the rest of the work later.
965 */
966 gen_bx(s, var);
967 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
968 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
969 s->base.is_jmp = DISAS_BX_EXCRET;
970 }
971 }
972
973 static inline void gen_bx_excret_final_code(DisasContext *s)
974 {
975 /* Generate the code to finish possible exception return and end the TB */
976 TCGLabel *excret_label = gen_new_label();
977 uint32_t min_magic;
978
979 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
980 /* Covers FNC_RETURN and EXC_RETURN magic */
981 min_magic = FNC_RETURN_MIN_MAGIC;
982 } else {
983 /* EXC_RETURN magic only */
984 min_magic = EXC_RETURN_MIN_MAGIC;
985 }
986
987 /* Is the new PC value in the magic range indicating exception return? */
988 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
989 /* No: end the TB as we would for a DISAS_JMP */
990 if (is_singlestepping(s)) {
991 gen_singlestep_exception(s);
992 } else {
993 tcg_gen_exit_tb(0);
994 }
995 gen_set_label(excret_label);
996 /* Yes: this is an exception return.
997 * At this point in runtime env->regs[15] and env->thumb will hold
998 * the exception-return magic number, which do_v7m_exception_exit()
999 * will read. Nothing else will be able to see those values because
1000 * the cpu-exec main loop guarantees that we will always go straight
1001 * from raising the exception to the exception-handling code.
1002 *
1003 * gen_ss_advance(s) does nothing on M profile currently but
1004 * calling it is conceptually the right thing as we have executed
1005 * this instruction (compare SWI, HVC, SMC handling).
1006 */
1007 gen_ss_advance(s);
1008 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1009 }
1010
1011 static inline void gen_bxns(DisasContext *s, int rm)
1012 {
1013 TCGv_i32 var = load_reg(s, rm);
1014
1015 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1016 * we need to sync state before calling it, but:
1017 * - we don't need to do gen_set_pc_im() because the bxns helper will
1018 * always set the PC itself
1019 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1020 * unless it's outside an IT block or the last insn in an IT block,
1021 * so we know that condexec == 0 (already set at the top of the TB)
1022 * is correct in the non-UNPREDICTABLE cases, and we can choose
1023 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1024 */
1025 gen_helper_v7m_bxns(cpu_env, var);
1026 tcg_temp_free_i32(var);
1027 s->base.is_jmp = DISAS_EXIT;
1028 }
1029
1030 static inline void gen_blxns(DisasContext *s, int rm)
1031 {
1032 TCGv_i32 var = load_reg(s, rm);
1033
1034 /* We don't need to sync condexec state, for the same reason as bxns.
1035 * We do however need to set the PC, because the blxns helper reads it.
1036 * The blxns helper may throw an exception.
1037 */
1038 gen_set_pc_im(s, s->pc);
1039 gen_helper_v7m_blxns(cpu_env, var);
1040 tcg_temp_free_i32(var);
1041 s->base.is_jmp = DISAS_EXIT;
1042 }
1043
1044 /* Variant of store_reg which uses branch&exchange logic when storing
1045 to r15 in ARM architecture v7 and above. The source must be a temporary
1046 and will be marked as dead. */
1047 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
1048 {
1049 if (reg == 15 && ENABLE_ARCH_7) {
1050 gen_bx(s, var);
1051 } else {
1052 store_reg(s, reg, var);
1053 }
1054 }
1055
1056 /* Variant of store_reg which uses branch&exchange logic when storing
1057 * to r15 in ARM architecture v5T and above. This is used for storing
1058 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1059 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1060 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
1061 {
1062 if (reg == 15 && ENABLE_ARCH_5) {
1063 gen_bx_excret(s, var);
1064 } else {
1065 store_reg(s, reg, var);
1066 }
1067 }
1068
1069 #ifdef CONFIG_USER_ONLY
1070 #define IS_USER_ONLY 1
1071 #else
1072 #define IS_USER_ONLY 0
1073 #endif
1074
1075 /* Abstractions of "generate code to do a guest load/store for
1076 * AArch32", where a vaddr is always 32 bits (and is zero
1077 * extended if we're a 64 bit core) and data is also
1078 * 32 bits unless specifically doing a 64 bit access.
1079 * These functions work like tcg_gen_qemu_{ld,st}* except
1080 * that the address argument is TCGv_i32 rather than TCGv.
1081 */
1082
1083 static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
1084 {
1085 TCGv addr = tcg_temp_new();
1086 tcg_gen_extu_i32_tl(addr, a32);
1087
1088 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1089 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1090 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
1091 }
1092 return addr;
1093 }
1094
1095 static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1096 int index, TCGMemOp opc)
1097 {
1098 TCGv addr = gen_aa32_addr(s, a32, opc);
1099 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1100 tcg_temp_free(addr);
1101 }
1102
1103 static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1104 int index, TCGMemOp opc)
1105 {
1106 TCGv addr = gen_aa32_addr(s, a32, opc);
1107 tcg_gen_qemu_st_i32(val, addr, index, opc);
1108 tcg_temp_free(addr);
1109 }
1110
1111 #define DO_GEN_LD(SUFF, OPC) \
1112 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1113 TCGv_i32 a32, int index) \
1114 { \
1115 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1116 } \
1117 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1118 TCGv_i32 val, \
1119 TCGv_i32 a32, int index, \
1120 ISSInfo issinfo) \
1121 { \
1122 gen_aa32_ld##SUFF(s, val, a32, index); \
1123 disas_set_da_iss(s, OPC, issinfo); \
1124 }
1125
1126 #define DO_GEN_ST(SUFF, OPC) \
1127 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1128 TCGv_i32 a32, int index) \
1129 { \
1130 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1131 } \
1132 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1133 TCGv_i32 val, \
1134 TCGv_i32 a32, int index, \
1135 ISSInfo issinfo) \
1136 { \
1137 gen_aa32_st##SUFF(s, val, a32, index); \
1138 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1139 }
1140
1141 static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
1142 {
1143 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1144 if (!IS_USER_ONLY && s->sctlr_b) {
1145 tcg_gen_rotri_i64(val, val, 32);
1146 }
1147 }
1148
1149 static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1150 int index, TCGMemOp opc)
1151 {
1152 TCGv addr = gen_aa32_addr(s, a32, opc);
1153 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1154 gen_aa32_frob64(s, val);
1155 tcg_temp_free(addr);
1156 }
1157
1158 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1159 TCGv_i32 a32, int index)
1160 {
1161 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1162 }
1163
1164 static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1165 int index, TCGMemOp opc)
1166 {
1167 TCGv addr = gen_aa32_addr(s, a32, opc);
1168
1169 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1170 if (!IS_USER_ONLY && s->sctlr_b) {
1171 TCGv_i64 tmp = tcg_temp_new_i64();
1172 tcg_gen_rotri_i64(tmp, val, 32);
1173 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1174 tcg_temp_free_i64(tmp);
1175 } else {
1176 tcg_gen_qemu_st_i64(val, addr, index, opc);
1177 }
1178 tcg_temp_free(addr);
1179 }
1180
1181 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1182 TCGv_i32 a32, int index)
1183 {
1184 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1185 }
1186
1187 DO_GEN_LD(8s, MO_SB)
1188 DO_GEN_LD(8u, MO_UB)
1189 DO_GEN_LD(16s, MO_SW)
1190 DO_GEN_LD(16u, MO_UW)
1191 DO_GEN_LD(32u, MO_UL)
1192 DO_GEN_ST(8, MO_UB)
1193 DO_GEN_ST(16, MO_UW)
1194 DO_GEN_ST(32, MO_UL)
1195
1196 static inline void gen_hvc(DisasContext *s, int imm16)
1197 {
1198 /* The pre HVC helper handles cases when HVC gets trapped
1199 * as an undefined insn by runtime configuration (ie before
1200 * the insn really executes).
1201 */
1202 gen_set_pc_im(s, s->pc - 4);
1203 gen_helper_pre_hvc(cpu_env);
1204 /* Otherwise we will treat this as a real exception which
1205 * happens after execution of the insn. (The distinction matters
1206 * for the PC value reported to the exception handler and also
1207 * for single stepping.)
1208 */
1209 s->svc_imm = imm16;
1210 gen_set_pc_im(s, s->pc);
1211 s->base.is_jmp = DISAS_HVC;
1212 }
1213
1214 static inline void gen_smc(DisasContext *s)
1215 {
1216 /* As with HVC, we may take an exception either before or after
1217 * the insn executes.
1218 */
1219 TCGv_i32 tmp;
1220
1221 gen_set_pc_im(s, s->pc - 4);
1222 tmp = tcg_const_i32(syn_aa32_smc());
1223 gen_helper_pre_smc(cpu_env, tmp);
1224 tcg_temp_free_i32(tmp);
1225 gen_set_pc_im(s, s->pc);
1226 s->base.is_jmp = DISAS_SMC;
1227 }
1228
1229 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1230 {
1231 gen_set_condexec(s);
1232 gen_set_pc_im(s, s->pc - offset);
1233 gen_exception_internal(excp);
1234 s->base.is_jmp = DISAS_NORETURN;
1235 }
1236
1237 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1238 int syn, uint32_t target_el)
1239 {
1240 gen_set_condexec(s);
1241 gen_set_pc_im(s, s->pc - offset);
1242 gen_exception(excp, syn, target_el);
1243 s->base.is_jmp = DISAS_NORETURN;
1244 }
1245
1246 /* Force a TB lookup after an instruction that changes the CPU state. */
1247 static inline void gen_lookup_tb(DisasContext *s)
1248 {
1249 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1250 s->base.is_jmp = DISAS_EXIT;
1251 }
1252
1253 static inline void gen_hlt(DisasContext *s, int imm)
1254 {
1255 /* HLT. This has two purposes.
1256 * Architecturally, it is an external halting debug instruction.
1257 * Since QEMU doesn't implement external debug, we treat this as
1258 * it is required for halting debug disabled: it will UNDEF.
1259 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1260 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1261 * must trigger semihosting even for ARMv7 and earlier, where
1262 * HLT was an undefined encoding.
1263 * In system mode, we don't allow userspace access to
1264 * semihosting, to provide some semblance of security
1265 * (and for consistency with our 32-bit semihosting).
1266 */
1267 if (semihosting_enabled() &&
1268 #ifndef CONFIG_USER_ONLY
1269 s->current_el != 0 &&
1270 #endif
1271 (imm == (s->thumb ? 0x3c : 0xf000))) {
1272 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1273 return;
1274 }
1275
1276 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1277 default_exception_el(s));
1278 }
1279
1280 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1281 TCGv_i32 var)
1282 {
1283 int val, rm, shift, shiftop;
1284 TCGv_i32 offset;
1285
1286 if (!(insn & (1 << 25))) {
1287 /* immediate */
1288 val = insn & 0xfff;
1289 if (!(insn & (1 << 23)))
1290 val = -val;
1291 if (val != 0)
1292 tcg_gen_addi_i32(var, var, val);
1293 } else {
1294 /* shift/register */
1295 rm = (insn) & 0xf;
1296 shift = (insn >> 7) & 0x1f;
1297 shiftop = (insn >> 5) & 3;
1298 offset = load_reg(s, rm);
1299 gen_arm_shift_im(offset, shiftop, shift, 0);
1300 if (!(insn & (1 << 23)))
1301 tcg_gen_sub_i32(var, var, offset);
1302 else
1303 tcg_gen_add_i32(var, var, offset);
1304 tcg_temp_free_i32(offset);
1305 }
1306 }
1307
1308 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1309 int extra, TCGv_i32 var)
1310 {
1311 int val, rm;
1312 TCGv_i32 offset;
1313
1314 if (insn & (1 << 22)) {
1315 /* immediate */
1316 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1317 if (!(insn & (1 << 23)))
1318 val = -val;
1319 val += extra;
1320 if (val != 0)
1321 tcg_gen_addi_i32(var, var, val);
1322 } else {
1323 /* register */
1324 if (extra)
1325 tcg_gen_addi_i32(var, var, extra);
1326 rm = (insn) & 0xf;
1327 offset = load_reg(s, rm);
1328 if (!(insn & (1 << 23)))
1329 tcg_gen_sub_i32(var, var, offset);
1330 else
1331 tcg_gen_add_i32(var, var, offset);
1332 tcg_temp_free_i32(offset);
1333 }
1334 }
1335
1336 static TCGv_ptr get_fpstatus_ptr(int neon)
1337 {
1338 TCGv_ptr statusptr = tcg_temp_new_ptr();
1339 int offset;
1340 if (neon) {
1341 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1342 } else {
1343 offset = offsetof(CPUARMState, vfp.fp_status);
1344 }
1345 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1346 return statusptr;
1347 }
1348
1349 #define VFP_OP2(name) \
1350 static inline void gen_vfp_##name(int dp) \
1351 { \
1352 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1353 if (dp) { \
1354 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1355 } else { \
1356 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1357 } \
1358 tcg_temp_free_ptr(fpst); \
1359 }
1360
1361 VFP_OP2(add)
1362 VFP_OP2(sub)
1363 VFP_OP2(mul)
1364 VFP_OP2(div)
1365
1366 #undef VFP_OP2
1367
1368 static inline void gen_vfp_F1_mul(int dp)
1369 {
1370 /* Like gen_vfp_mul() but put result in F1 */
1371 TCGv_ptr fpst = get_fpstatus_ptr(0);
1372 if (dp) {
1373 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1374 } else {
1375 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1376 }
1377 tcg_temp_free_ptr(fpst);
1378 }
1379
1380 static inline void gen_vfp_F1_neg(int dp)
1381 {
1382 /* Like gen_vfp_neg() but put result in F1 */
1383 if (dp) {
1384 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1385 } else {
1386 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1387 }
1388 }
1389
1390 static inline void gen_vfp_abs(int dp)
1391 {
1392 if (dp)
1393 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1394 else
1395 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1396 }
1397
1398 static inline void gen_vfp_neg(int dp)
1399 {
1400 if (dp)
1401 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1402 else
1403 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1404 }
1405
1406 static inline void gen_vfp_sqrt(int dp)
1407 {
1408 if (dp)
1409 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1410 else
1411 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1412 }
1413
1414 static inline void gen_vfp_cmp(int dp)
1415 {
1416 if (dp)
1417 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1418 else
1419 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1420 }
1421
1422 static inline void gen_vfp_cmpe(int dp)
1423 {
1424 if (dp)
1425 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1426 else
1427 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1428 }
1429
1430 static inline void gen_vfp_F1_ld0(int dp)
1431 {
1432 if (dp)
1433 tcg_gen_movi_i64(cpu_F1d, 0);
1434 else
1435 tcg_gen_movi_i32(cpu_F1s, 0);
1436 }
1437
1438 #define VFP_GEN_ITOF(name) \
1439 static inline void gen_vfp_##name(int dp, int neon) \
1440 { \
1441 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1442 if (dp) { \
1443 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1444 } else { \
1445 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1446 } \
1447 tcg_temp_free_ptr(statusptr); \
1448 }
1449
1450 VFP_GEN_ITOF(uito)
1451 VFP_GEN_ITOF(sito)
1452 #undef VFP_GEN_ITOF
1453
1454 #define VFP_GEN_FTOI(name) \
1455 static inline void gen_vfp_##name(int dp, int neon) \
1456 { \
1457 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1458 if (dp) { \
1459 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1460 } else { \
1461 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1462 } \
1463 tcg_temp_free_ptr(statusptr); \
1464 }
1465
1466 VFP_GEN_FTOI(toui)
1467 VFP_GEN_FTOI(touiz)
1468 VFP_GEN_FTOI(tosi)
1469 VFP_GEN_FTOI(tosiz)
1470 #undef VFP_GEN_FTOI
1471
1472 #define VFP_GEN_FIX(name, round) \
1473 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1474 { \
1475 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1476 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1477 if (dp) { \
1478 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1479 statusptr); \
1480 } else { \
1481 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1482 statusptr); \
1483 } \
1484 tcg_temp_free_i32(tmp_shift); \
1485 tcg_temp_free_ptr(statusptr); \
1486 }
1487 VFP_GEN_FIX(tosh, _round_to_zero)
1488 VFP_GEN_FIX(tosl, _round_to_zero)
1489 VFP_GEN_FIX(touh, _round_to_zero)
1490 VFP_GEN_FIX(toul, _round_to_zero)
1491 VFP_GEN_FIX(shto, )
1492 VFP_GEN_FIX(slto, )
1493 VFP_GEN_FIX(uhto, )
1494 VFP_GEN_FIX(ulto, )
1495 #undef VFP_GEN_FIX
1496
1497 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1498 {
1499 if (dp) {
1500 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
1501 } else {
1502 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
1503 }
1504 }
1505
1506 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1507 {
1508 if (dp) {
1509 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
1510 } else {
1511 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
1512 }
1513 }
1514
1515 static inline long
1516 vfp_reg_offset (int dp, int reg)
1517 {
1518 if (dp)
1519 return offsetof(CPUARMState, vfp.regs[reg]);
1520 else if (reg & 1) {
1521 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1522 + offsetof(CPU_DoubleU, l.upper);
1523 } else {
1524 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1525 + offsetof(CPU_DoubleU, l.lower);
1526 }
1527 }
1528
1529 /* Return the offset of a 32-bit piece of a NEON register.
1530 zero is the least significant end of the register. */
1531 static inline long
1532 neon_reg_offset (int reg, int n)
1533 {
1534 int sreg;
1535 sreg = reg * 2 + n;
1536 return vfp_reg_offset(0, sreg);
1537 }
1538
1539 static TCGv_i32 neon_load_reg(int reg, int pass)
1540 {
1541 TCGv_i32 tmp = tcg_temp_new_i32();
1542 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1543 return tmp;
1544 }
1545
1546 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1547 {
1548 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1549 tcg_temp_free_i32(var);
1550 }
1551
1552 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1553 {
1554 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1555 }
1556
1557 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1558 {
1559 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1560 }
1561
1562 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1563 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1564 #define tcg_gen_st_f32 tcg_gen_st_i32
1565 #define tcg_gen_st_f64 tcg_gen_st_i64
1566
1567 static inline void gen_mov_F0_vreg(int dp, int reg)
1568 {
1569 if (dp)
1570 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1571 else
1572 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1573 }
1574
1575 static inline void gen_mov_F1_vreg(int dp, int reg)
1576 {
1577 if (dp)
1578 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1579 else
1580 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1581 }
1582
1583 static inline void gen_mov_vreg_F0(int dp, int reg)
1584 {
1585 if (dp)
1586 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1587 else
1588 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1589 }
1590
1591 #define ARM_CP_RW_BIT (1 << 20)
1592
1593 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1594 {
1595 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1596 }
1597
1598 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1599 {
1600 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1601 }
1602
1603 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1604 {
1605 TCGv_i32 var = tcg_temp_new_i32();
1606 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1607 return var;
1608 }
1609
1610 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1611 {
1612 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1613 tcg_temp_free_i32(var);
1614 }
1615
1616 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1617 {
1618 iwmmxt_store_reg(cpu_M0, rn);
1619 }
1620
1621 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1622 {
1623 iwmmxt_load_reg(cpu_M0, rn);
1624 }
1625
1626 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1627 {
1628 iwmmxt_load_reg(cpu_V1, rn);
1629 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1630 }
1631
1632 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1633 {
1634 iwmmxt_load_reg(cpu_V1, rn);
1635 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1636 }
1637
1638 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1639 {
1640 iwmmxt_load_reg(cpu_V1, rn);
1641 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1642 }
1643
1644 #define IWMMXT_OP(name) \
1645 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1646 { \
1647 iwmmxt_load_reg(cpu_V1, rn); \
1648 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1649 }
1650
1651 #define IWMMXT_OP_ENV(name) \
1652 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1653 { \
1654 iwmmxt_load_reg(cpu_V1, rn); \
1655 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1656 }
1657
1658 #define IWMMXT_OP_ENV_SIZE(name) \
1659 IWMMXT_OP_ENV(name##b) \
1660 IWMMXT_OP_ENV(name##w) \
1661 IWMMXT_OP_ENV(name##l)
1662
1663 #define IWMMXT_OP_ENV1(name) \
1664 static inline void gen_op_iwmmxt_##name##_M0(void) \
1665 { \
1666 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1667 }
1668
1669 IWMMXT_OP(maddsq)
1670 IWMMXT_OP(madduq)
1671 IWMMXT_OP(sadb)
1672 IWMMXT_OP(sadw)
1673 IWMMXT_OP(mulslw)
1674 IWMMXT_OP(mulshw)
1675 IWMMXT_OP(mululw)
1676 IWMMXT_OP(muluhw)
1677 IWMMXT_OP(macsw)
1678 IWMMXT_OP(macuw)
1679
1680 IWMMXT_OP_ENV_SIZE(unpackl)
1681 IWMMXT_OP_ENV_SIZE(unpackh)
1682
1683 IWMMXT_OP_ENV1(unpacklub)
1684 IWMMXT_OP_ENV1(unpackluw)
1685 IWMMXT_OP_ENV1(unpacklul)
1686 IWMMXT_OP_ENV1(unpackhub)
1687 IWMMXT_OP_ENV1(unpackhuw)
1688 IWMMXT_OP_ENV1(unpackhul)
1689 IWMMXT_OP_ENV1(unpacklsb)
1690 IWMMXT_OP_ENV1(unpacklsw)
1691 IWMMXT_OP_ENV1(unpacklsl)
1692 IWMMXT_OP_ENV1(unpackhsb)
1693 IWMMXT_OP_ENV1(unpackhsw)
1694 IWMMXT_OP_ENV1(unpackhsl)
1695
1696 IWMMXT_OP_ENV_SIZE(cmpeq)
1697 IWMMXT_OP_ENV_SIZE(cmpgtu)
1698 IWMMXT_OP_ENV_SIZE(cmpgts)
1699
1700 IWMMXT_OP_ENV_SIZE(mins)
1701 IWMMXT_OP_ENV_SIZE(minu)
1702 IWMMXT_OP_ENV_SIZE(maxs)
1703 IWMMXT_OP_ENV_SIZE(maxu)
1704
1705 IWMMXT_OP_ENV_SIZE(subn)
1706 IWMMXT_OP_ENV_SIZE(addn)
1707 IWMMXT_OP_ENV_SIZE(subu)
1708 IWMMXT_OP_ENV_SIZE(addu)
1709 IWMMXT_OP_ENV_SIZE(subs)
1710 IWMMXT_OP_ENV_SIZE(adds)
1711
1712 IWMMXT_OP_ENV(avgb0)
1713 IWMMXT_OP_ENV(avgb1)
1714 IWMMXT_OP_ENV(avgw0)
1715 IWMMXT_OP_ENV(avgw1)
1716
1717 IWMMXT_OP_ENV(packuw)
1718 IWMMXT_OP_ENV(packul)
1719 IWMMXT_OP_ENV(packuq)
1720 IWMMXT_OP_ENV(packsw)
1721 IWMMXT_OP_ENV(packsl)
1722 IWMMXT_OP_ENV(packsq)
1723
1724 static void gen_op_iwmmxt_set_mup(void)
1725 {
1726 TCGv_i32 tmp;
1727 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1728 tcg_gen_ori_i32(tmp, tmp, 2);
1729 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1730 }
1731
1732 static void gen_op_iwmmxt_set_cup(void)
1733 {
1734 TCGv_i32 tmp;
1735 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1736 tcg_gen_ori_i32(tmp, tmp, 1);
1737 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1738 }
1739
1740 static void gen_op_iwmmxt_setpsr_nz(void)
1741 {
1742 TCGv_i32 tmp = tcg_temp_new_i32();
1743 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1744 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1745 }
1746
1747 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1748 {
1749 iwmmxt_load_reg(cpu_V1, rn);
1750 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1751 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1752 }
1753
1754 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1755 TCGv_i32 dest)
1756 {
1757 int rd;
1758 uint32_t offset;
1759 TCGv_i32 tmp;
1760
1761 rd = (insn >> 16) & 0xf;
1762 tmp = load_reg(s, rd);
1763
1764 offset = (insn & 0xff) << ((insn >> 7) & 2);
1765 if (insn & (1 << 24)) {
1766 /* Pre indexed */
1767 if (insn & (1 << 23))
1768 tcg_gen_addi_i32(tmp, tmp, offset);
1769 else
1770 tcg_gen_addi_i32(tmp, tmp, -offset);
1771 tcg_gen_mov_i32(dest, tmp);
1772 if (insn & (1 << 21))
1773 store_reg(s, rd, tmp);
1774 else
1775 tcg_temp_free_i32(tmp);
1776 } else if (insn & (1 << 21)) {
1777 /* Post indexed */
1778 tcg_gen_mov_i32(dest, tmp);
1779 if (insn & (1 << 23))
1780 tcg_gen_addi_i32(tmp, tmp, offset);
1781 else
1782 tcg_gen_addi_i32(tmp, tmp, -offset);
1783 store_reg(s, rd, tmp);
1784 } else if (!(insn & (1 << 23)))
1785 return 1;
1786 return 0;
1787 }
1788
1789 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1790 {
1791 int rd = (insn >> 0) & 0xf;
1792 TCGv_i32 tmp;
1793
1794 if (insn & (1 << 8)) {
1795 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1796 return 1;
1797 } else {
1798 tmp = iwmmxt_load_creg(rd);
1799 }
1800 } else {
1801 tmp = tcg_temp_new_i32();
1802 iwmmxt_load_reg(cpu_V0, rd);
1803 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1804 }
1805 tcg_gen_andi_i32(tmp, tmp, mask);
1806 tcg_gen_mov_i32(dest, tmp);
1807 tcg_temp_free_i32(tmp);
1808 return 0;
1809 }
1810
1811 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1812 (ie. an undefined instruction). */
1813 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1814 {
1815 int rd, wrd;
1816 int rdhi, rdlo, rd0, rd1, i;
1817 TCGv_i32 addr;
1818 TCGv_i32 tmp, tmp2, tmp3;
1819
1820 if ((insn & 0x0e000e00) == 0x0c000000) {
1821 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1822 wrd = insn & 0xf;
1823 rdlo = (insn >> 12) & 0xf;
1824 rdhi = (insn >> 16) & 0xf;
1825 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1826 iwmmxt_load_reg(cpu_V0, wrd);
1827 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1828 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1829 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1830 } else { /* TMCRR */
1831 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1832 iwmmxt_store_reg(cpu_V0, wrd);
1833 gen_op_iwmmxt_set_mup();
1834 }
1835 return 0;
1836 }
1837
1838 wrd = (insn >> 12) & 0xf;
1839 addr = tcg_temp_new_i32();
1840 if (gen_iwmmxt_address(s, insn, addr)) {
1841 tcg_temp_free_i32(addr);
1842 return 1;
1843 }
1844 if (insn & ARM_CP_RW_BIT) {
1845 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1846 tmp = tcg_temp_new_i32();
1847 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1848 iwmmxt_store_creg(wrd, tmp);
1849 } else {
1850 i = 1;
1851 if (insn & (1 << 8)) {
1852 if (insn & (1 << 22)) { /* WLDRD */
1853 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1854 i = 0;
1855 } else { /* WLDRW wRd */
1856 tmp = tcg_temp_new_i32();
1857 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1858 }
1859 } else {
1860 tmp = tcg_temp_new_i32();
1861 if (insn & (1 << 22)) { /* WLDRH */
1862 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1863 } else { /* WLDRB */
1864 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1865 }
1866 }
1867 if (i) {
1868 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1869 tcg_temp_free_i32(tmp);
1870 }
1871 gen_op_iwmmxt_movq_wRn_M0(wrd);
1872 }
1873 } else {
1874 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1875 tmp = iwmmxt_load_creg(wrd);
1876 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1877 } else {
1878 gen_op_iwmmxt_movq_M0_wRn(wrd);
1879 tmp = tcg_temp_new_i32();
1880 if (insn & (1 << 8)) {
1881 if (insn & (1 << 22)) { /* WSTRD */
1882 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1883 } else { /* WSTRW wRd */
1884 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1885 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1886 }
1887 } else {
1888 if (insn & (1 << 22)) { /* WSTRH */
1889 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1890 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1891 } else { /* WSTRB */
1892 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1893 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1894 }
1895 }
1896 }
1897 tcg_temp_free_i32(tmp);
1898 }
1899 tcg_temp_free_i32(addr);
1900 return 0;
1901 }
1902
1903 if ((insn & 0x0f000000) != 0x0e000000)
1904 return 1;
1905
1906 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1907 case 0x000: /* WOR */
1908 wrd = (insn >> 12) & 0xf;
1909 rd0 = (insn >> 0) & 0xf;
1910 rd1 = (insn >> 16) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 gen_op_iwmmxt_orq_M0_wRn(rd1);
1913 gen_op_iwmmxt_setpsr_nz();
1914 gen_op_iwmmxt_movq_wRn_M0(wrd);
1915 gen_op_iwmmxt_set_mup();
1916 gen_op_iwmmxt_set_cup();
1917 break;
1918 case 0x011: /* TMCR */
1919 if (insn & 0xf)
1920 return 1;
1921 rd = (insn >> 12) & 0xf;
1922 wrd = (insn >> 16) & 0xf;
1923 switch (wrd) {
1924 case ARM_IWMMXT_wCID:
1925 case ARM_IWMMXT_wCASF:
1926 break;
1927 case ARM_IWMMXT_wCon:
1928 gen_op_iwmmxt_set_cup();
1929 /* Fall through. */
1930 case ARM_IWMMXT_wCSSF:
1931 tmp = iwmmxt_load_creg(wrd);
1932 tmp2 = load_reg(s, rd);
1933 tcg_gen_andc_i32(tmp, tmp, tmp2);
1934 tcg_temp_free_i32(tmp2);
1935 iwmmxt_store_creg(wrd, tmp);
1936 break;
1937 case ARM_IWMMXT_wCGR0:
1938 case ARM_IWMMXT_wCGR1:
1939 case ARM_IWMMXT_wCGR2:
1940 case ARM_IWMMXT_wCGR3:
1941 gen_op_iwmmxt_set_cup();
1942 tmp = load_reg(s, rd);
1943 iwmmxt_store_creg(wrd, tmp);
1944 break;
1945 default:
1946 return 1;
1947 }
1948 break;
1949 case 0x100: /* WXOR */
1950 wrd = (insn >> 12) & 0xf;
1951 rd0 = (insn >> 0) & 0xf;
1952 rd1 = (insn >> 16) & 0xf;
1953 gen_op_iwmmxt_movq_M0_wRn(rd0);
1954 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1955 gen_op_iwmmxt_setpsr_nz();
1956 gen_op_iwmmxt_movq_wRn_M0(wrd);
1957 gen_op_iwmmxt_set_mup();
1958 gen_op_iwmmxt_set_cup();
1959 break;
1960 case 0x111: /* TMRC */
1961 if (insn & 0xf)
1962 return 1;
1963 rd = (insn >> 12) & 0xf;
1964 wrd = (insn >> 16) & 0xf;
1965 tmp = iwmmxt_load_creg(wrd);
1966 store_reg(s, rd, tmp);
1967 break;
1968 case 0x300: /* WANDN */
1969 wrd = (insn >> 12) & 0xf;
1970 rd0 = (insn >> 0) & 0xf;
1971 rd1 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1974 gen_op_iwmmxt_andq_M0_wRn(rd1);
1975 gen_op_iwmmxt_setpsr_nz();
1976 gen_op_iwmmxt_movq_wRn_M0(wrd);
1977 gen_op_iwmmxt_set_mup();
1978 gen_op_iwmmxt_set_cup();
1979 break;
1980 case 0x200: /* WAND */
1981 wrd = (insn >> 12) & 0xf;
1982 rd0 = (insn >> 0) & 0xf;
1983 rd1 = (insn >> 16) & 0xf;
1984 gen_op_iwmmxt_movq_M0_wRn(rd0);
1985 gen_op_iwmmxt_andq_M0_wRn(rd1);
1986 gen_op_iwmmxt_setpsr_nz();
1987 gen_op_iwmmxt_movq_wRn_M0(wrd);
1988 gen_op_iwmmxt_set_mup();
1989 gen_op_iwmmxt_set_cup();
1990 break;
1991 case 0x810: case 0xa10: /* WMADD */
1992 wrd = (insn >> 12) & 0xf;
1993 rd0 = (insn >> 0) & 0xf;
1994 rd1 = (insn >> 16) & 0xf;
1995 gen_op_iwmmxt_movq_M0_wRn(rd0);
1996 if (insn & (1 << 21))
1997 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1998 else
1999 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2000 gen_op_iwmmxt_movq_wRn_M0(wrd);
2001 gen_op_iwmmxt_set_mup();
2002 break;
2003 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2004 wrd = (insn >> 12) & 0xf;
2005 rd0 = (insn >> 16) & 0xf;
2006 rd1 = (insn >> 0) & 0xf;
2007 gen_op_iwmmxt_movq_M0_wRn(rd0);
2008 switch ((insn >> 22) & 3) {
2009 case 0:
2010 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2011 break;
2012 case 1:
2013 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2014 break;
2015 case 2:
2016 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2017 break;
2018 case 3:
2019 return 1;
2020 }
2021 gen_op_iwmmxt_movq_wRn_M0(wrd);
2022 gen_op_iwmmxt_set_mup();
2023 gen_op_iwmmxt_set_cup();
2024 break;
2025 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2026 wrd = (insn >> 12) & 0xf;
2027 rd0 = (insn >> 16) & 0xf;
2028 rd1 = (insn >> 0) & 0xf;
2029 gen_op_iwmmxt_movq_M0_wRn(rd0);
2030 switch ((insn >> 22) & 3) {
2031 case 0:
2032 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2033 break;
2034 case 1:
2035 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2036 break;
2037 case 2:
2038 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2039 break;
2040 case 3:
2041 return 1;
2042 }
2043 gen_op_iwmmxt_movq_wRn_M0(wrd);
2044 gen_op_iwmmxt_set_mup();
2045 gen_op_iwmmxt_set_cup();
2046 break;
2047 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2048 wrd = (insn >> 12) & 0xf;
2049 rd0 = (insn >> 16) & 0xf;
2050 rd1 = (insn >> 0) & 0xf;
2051 gen_op_iwmmxt_movq_M0_wRn(rd0);
2052 if (insn & (1 << 22))
2053 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2054 else
2055 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2056 if (!(insn & (1 << 20)))
2057 gen_op_iwmmxt_addl_M0_wRn(wrd);
2058 gen_op_iwmmxt_movq_wRn_M0(wrd);
2059 gen_op_iwmmxt_set_mup();
2060 break;
2061 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2062 wrd = (insn >> 12) & 0xf;
2063 rd0 = (insn >> 16) & 0xf;
2064 rd1 = (insn >> 0) & 0xf;
2065 gen_op_iwmmxt_movq_M0_wRn(rd0);
2066 if (insn & (1 << 21)) {
2067 if (insn & (1 << 20))
2068 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2069 else
2070 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2071 } else {
2072 if (insn & (1 << 20))
2073 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2074 else
2075 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2076 }
2077 gen_op_iwmmxt_movq_wRn_M0(wrd);
2078 gen_op_iwmmxt_set_mup();
2079 break;
2080 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2081 wrd = (insn >> 12) & 0xf;
2082 rd0 = (insn >> 16) & 0xf;
2083 rd1 = (insn >> 0) & 0xf;
2084 gen_op_iwmmxt_movq_M0_wRn(rd0);
2085 if (insn & (1 << 21))
2086 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2087 else
2088 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2089 if (!(insn & (1 << 20))) {
2090 iwmmxt_load_reg(cpu_V1, wrd);
2091 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
2092 }
2093 gen_op_iwmmxt_movq_wRn_M0(wrd);
2094 gen_op_iwmmxt_set_mup();
2095 break;
2096 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2097 wrd = (insn >> 12) & 0xf;
2098 rd0 = (insn >> 16) & 0xf;
2099 rd1 = (insn >> 0) & 0xf;
2100 gen_op_iwmmxt_movq_M0_wRn(rd0);
2101 switch ((insn >> 22) & 3) {
2102 case 0:
2103 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2104 break;
2105 case 1:
2106 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2107 break;
2108 case 2:
2109 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2110 break;
2111 case 3:
2112 return 1;
2113 }
2114 gen_op_iwmmxt_movq_wRn_M0(wrd);
2115 gen_op_iwmmxt_set_mup();
2116 gen_op_iwmmxt_set_cup();
2117 break;
2118 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2119 wrd = (insn >> 12) & 0xf;
2120 rd0 = (insn >> 16) & 0xf;
2121 rd1 = (insn >> 0) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
2123 if (insn & (1 << 22)) {
2124 if (insn & (1 << 20))
2125 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2126 else
2127 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2128 } else {
2129 if (insn & (1 << 20))
2130 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2131 else
2132 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2133 }
2134 gen_op_iwmmxt_movq_wRn_M0(wrd);
2135 gen_op_iwmmxt_set_mup();
2136 gen_op_iwmmxt_set_cup();
2137 break;
2138 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2139 wrd = (insn >> 12) & 0xf;
2140 rd0 = (insn >> 16) & 0xf;
2141 rd1 = (insn >> 0) & 0xf;
2142 gen_op_iwmmxt_movq_M0_wRn(rd0);
2143 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2144 tcg_gen_andi_i32(tmp, tmp, 7);
2145 iwmmxt_load_reg(cpu_V1, rd1);
2146 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2147 tcg_temp_free_i32(tmp);
2148 gen_op_iwmmxt_movq_wRn_M0(wrd);
2149 gen_op_iwmmxt_set_mup();
2150 break;
2151 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2152 if (((insn >> 6) & 3) == 3)
2153 return 1;
2154 rd = (insn >> 12) & 0xf;
2155 wrd = (insn >> 16) & 0xf;
2156 tmp = load_reg(s, rd);
2157 gen_op_iwmmxt_movq_M0_wRn(wrd);
2158 switch ((insn >> 6) & 3) {
2159 case 0:
2160 tmp2 = tcg_const_i32(0xff);
2161 tmp3 = tcg_const_i32((insn & 7) << 3);
2162 break;
2163 case 1:
2164 tmp2 = tcg_const_i32(0xffff);
2165 tmp3 = tcg_const_i32((insn & 3) << 4);
2166 break;
2167 case 2:
2168 tmp2 = tcg_const_i32(0xffffffff);
2169 tmp3 = tcg_const_i32((insn & 1) << 5);
2170 break;
2171 default:
2172 TCGV_UNUSED_I32(tmp2);
2173 TCGV_UNUSED_I32(tmp3);
2174 }
2175 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2176 tcg_temp_free_i32(tmp3);
2177 tcg_temp_free_i32(tmp2);
2178 tcg_temp_free_i32(tmp);
2179 gen_op_iwmmxt_movq_wRn_M0(wrd);
2180 gen_op_iwmmxt_set_mup();
2181 break;
2182 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2183 rd = (insn >> 12) & 0xf;
2184 wrd = (insn >> 16) & 0xf;
2185 if (rd == 15 || ((insn >> 22) & 3) == 3)
2186 return 1;
2187 gen_op_iwmmxt_movq_M0_wRn(wrd);
2188 tmp = tcg_temp_new_i32();
2189 switch ((insn >> 22) & 3) {
2190 case 0:
2191 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2192 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2193 if (insn & 8) {
2194 tcg_gen_ext8s_i32(tmp, tmp);
2195 } else {
2196 tcg_gen_andi_i32(tmp, tmp, 0xff);
2197 }
2198 break;
2199 case 1:
2200 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2201 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2202 if (insn & 8) {
2203 tcg_gen_ext16s_i32(tmp, tmp);
2204 } else {
2205 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2206 }
2207 break;
2208 case 2:
2209 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2210 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2211 break;
2212 }
2213 store_reg(s, rd, tmp);
2214 break;
2215 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2216 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2217 return 1;
2218 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2219 switch ((insn >> 22) & 3) {
2220 case 0:
2221 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2222 break;
2223 case 1:
2224 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2225 break;
2226 case 2:
2227 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2228 break;
2229 }
2230 tcg_gen_shli_i32(tmp, tmp, 28);
2231 gen_set_nzcv(tmp);
2232 tcg_temp_free_i32(tmp);
2233 break;
2234 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2235 if (((insn >> 6) & 3) == 3)
2236 return 1;
2237 rd = (insn >> 12) & 0xf;
2238 wrd = (insn >> 16) & 0xf;
2239 tmp = load_reg(s, rd);
2240 switch ((insn >> 6) & 3) {
2241 case 0:
2242 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2243 break;
2244 case 1:
2245 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2246 break;
2247 case 2:
2248 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2249 break;
2250 }
2251 tcg_temp_free_i32(tmp);
2252 gen_op_iwmmxt_movq_wRn_M0(wrd);
2253 gen_op_iwmmxt_set_mup();
2254 break;
2255 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2256 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2257 return 1;
2258 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2259 tmp2 = tcg_temp_new_i32();
2260 tcg_gen_mov_i32(tmp2, tmp);
2261 switch ((insn >> 22) & 3) {
2262 case 0:
2263 for (i = 0; i < 7; i ++) {
2264 tcg_gen_shli_i32(tmp2, tmp2, 4);
2265 tcg_gen_and_i32(tmp, tmp, tmp2);
2266 }
2267 break;
2268 case 1:
2269 for (i = 0; i < 3; i ++) {
2270 tcg_gen_shli_i32(tmp2, tmp2, 8);
2271 tcg_gen_and_i32(tmp, tmp, tmp2);
2272 }
2273 break;
2274 case 2:
2275 tcg_gen_shli_i32(tmp2, tmp2, 16);
2276 tcg_gen_and_i32(tmp, tmp, tmp2);
2277 break;
2278 }
2279 gen_set_nzcv(tmp);
2280 tcg_temp_free_i32(tmp2);
2281 tcg_temp_free_i32(tmp);
2282 break;
2283 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2284 wrd = (insn >> 12) & 0xf;
2285 rd0 = (insn >> 16) & 0xf;
2286 gen_op_iwmmxt_movq_M0_wRn(rd0);
2287 switch ((insn >> 22) & 3) {
2288 case 0:
2289 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2290 break;
2291 case 1:
2292 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2293 break;
2294 case 2:
2295 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2296 break;
2297 case 3:
2298 return 1;
2299 }
2300 gen_op_iwmmxt_movq_wRn_M0(wrd);
2301 gen_op_iwmmxt_set_mup();
2302 break;
2303 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2304 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2305 return 1;
2306 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2307 tmp2 = tcg_temp_new_i32();
2308 tcg_gen_mov_i32(tmp2, tmp);
2309 switch ((insn >> 22) & 3) {
2310 case 0:
2311 for (i = 0; i < 7; i ++) {
2312 tcg_gen_shli_i32(tmp2, tmp2, 4);
2313 tcg_gen_or_i32(tmp, tmp, tmp2);
2314 }
2315 break;
2316 case 1:
2317 for (i = 0; i < 3; i ++) {
2318 tcg_gen_shli_i32(tmp2, tmp2, 8);
2319 tcg_gen_or_i32(tmp, tmp, tmp2);
2320 }
2321 break;
2322 case 2:
2323 tcg_gen_shli_i32(tmp2, tmp2, 16);
2324 tcg_gen_or_i32(tmp, tmp, tmp2);
2325 break;
2326 }
2327 gen_set_nzcv(tmp);
2328 tcg_temp_free_i32(tmp2);
2329 tcg_temp_free_i32(tmp);
2330 break;
2331 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2332 rd = (insn >> 12) & 0xf;
2333 rd0 = (insn >> 16) & 0xf;
2334 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2335 return 1;
2336 gen_op_iwmmxt_movq_M0_wRn(rd0);
2337 tmp = tcg_temp_new_i32();
2338 switch ((insn >> 22) & 3) {
2339 case 0:
2340 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2341 break;
2342 case 1:
2343 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2344 break;
2345 case 2:
2346 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2347 break;
2348 }
2349 store_reg(s, rd, tmp);
2350 break;
2351 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2352 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2353 wrd = (insn >> 12) & 0xf;
2354 rd0 = (insn >> 16) & 0xf;
2355 rd1 = (insn >> 0) & 0xf;
2356 gen_op_iwmmxt_movq_M0_wRn(rd0);
2357 switch ((insn >> 22) & 3) {
2358 case 0:
2359 if (insn & (1 << 21))
2360 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2361 else
2362 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2363 break;
2364 case 1:
2365 if (insn & (1 << 21))
2366 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2367 else
2368 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2369 break;
2370 case 2:
2371 if (insn & (1 << 21))
2372 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2373 else
2374 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2375 break;
2376 case 3:
2377 return 1;
2378 }
2379 gen_op_iwmmxt_movq_wRn_M0(wrd);
2380 gen_op_iwmmxt_set_mup();
2381 gen_op_iwmmxt_set_cup();
2382 break;
2383 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2384 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2385 wrd = (insn >> 12) & 0xf;
2386 rd0 = (insn >> 16) & 0xf;
2387 gen_op_iwmmxt_movq_M0_wRn(rd0);
2388 switch ((insn >> 22) & 3) {
2389 case 0:
2390 if (insn & (1 << 21))
2391 gen_op_iwmmxt_unpacklsb_M0();
2392 else
2393 gen_op_iwmmxt_unpacklub_M0();
2394 break;
2395 case 1:
2396 if (insn & (1 << 21))
2397 gen_op_iwmmxt_unpacklsw_M0();
2398 else
2399 gen_op_iwmmxt_unpackluw_M0();
2400 break;
2401 case 2:
2402 if (insn & (1 << 21))
2403 gen_op_iwmmxt_unpacklsl_M0();
2404 else
2405 gen_op_iwmmxt_unpacklul_M0();
2406 break;
2407 case 3:
2408 return 1;
2409 }
2410 gen_op_iwmmxt_movq_wRn_M0(wrd);
2411 gen_op_iwmmxt_set_mup();
2412 gen_op_iwmmxt_set_cup();
2413 break;
2414 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2415 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2416 wrd = (insn >> 12) & 0xf;
2417 rd0 = (insn >> 16) & 0xf;
2418 gen_op_iwmmxt_movq_M0_wRn(rd0);
2419 switch ((insn >> 22) & 3) {
2420 case 0:
2421 if (insn & (1 << 21))
2422 gen_op_iwmmxt_unpackhsb_M0();
2423 else
2424 gen_op_iwmmxt_unpackhub_M0();
2425 break;
2426 case 1:
2427 if (insn & (1 << 21))
2428 gen_op_iwmmxt_unpackhsw_M0();
2429 else
2430 gen_op_iwmmxt_unpackhuw_M0();
2431 break;
2432 case 2:
2433 if (insn & (1 << 21))
2434 gen_op_iwmmxt_unpackhsl_M0();
2435 else
2436 gen_op_iwmmxt_unpackhul_M0();
2437 break;
2438 case 3:
2439 return 1;
2440 }
2441 gen_op_iwmmxt_movq_wRn_M0(wrd);
2442 gen_op_iwmmxt_set_mup();
2443 gen_op_iwmmxt_set_cup();
2444 break;
2445 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2446 case 0x214: case 0x614: case 0xa14: case 0xe14:
2447 if (((insn >> 22) & 3) == 0)
2448 return 1;
2449 wrd = (insn >> 12) & 0xf;
2450 rd0 = (insn >> 16) & 0xf;
2451 gen_op_iwmmxt_movq_M0_wRn(rd0);
2452 tmp = tcg_temp_new_i32();
2453 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2454 tcg_temp_free_i32(tmp);
2455 return 1;
2456 }
2457 switch ((insn >> 22) & 3) {
2458 case 1:
2459 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2460 break;
2461 case 2:
2462 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2463 break;
2464 case 3:
2465 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2466 break;
2467 }
2468 tcg_temp_free_i32(tmp);
2469 gen_op_iwmmxt_movq_wRn_M0(wrd);
2470 gen_op_iwmmxt_set_mup();
2471 gen_op_iwmmxt_set_cup();
2472 break;
2473 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2474 case 0x014: case 0x414: case 0x814: case 0xc14:
2475 if (((insn >> 22) & 3) == 0)
2476 return 1;
2477 wrd = (insn >> 12) & 0xf;
2478 rd0 = (insn >> 16) & 0xf;
2479 gen_op_iwmmxt_movq_M0_wRn(rd0);
2480 tmp = tcg_temp_new_i32();
2481 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2482 tcg_temp_free_i32(tmp);
2483 return 1;
2484 }
2485 switch ((insn >> 22) & 3) {
2486 case 1:
2487 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2488 break;
2489 case 2:
2490 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2491 break;
2492 case 3:
2493 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2494 break;
2495 }
2496 tcg_temp_free_i32(tmp);
2497 gen_op_iwmmxt_movq_wRn_M0(wrd);
2498 gen_op_iwmmxt_set_mup();
2499 gen_op_iwmmxt_set_cup();
2500 break;
2501 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2502 case 0x114: case 0x514: case 0x914: case 0xd14:
2503 if (((insn >> 22) & 3) == 0)
2504 return 1;
2505 wrd = (insn >> 12) & 0xf;
2506 rd0 = (insn >> 16) & 0xf;
2507 gen_op_iwmmxt_movq_M0_wRn(rd0);
2508 tmp = tcg_temp_new_i32();
2509 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2510 tcg_temp_free_i32(tmp);
2511 return 1;
2512 }
2513 switch ((insn >> 22) & 3) {
2514 case 1:
2515 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2516 break;
2517 case 2:
2518 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2519 break;
2520 case 3:
2521 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2522 break;
2523 }
2524 tcg_temp_free_i32(tmp);
2525 gen_op_iwmmxt_movq_wRn_M0(wrd);
2526 gen_op_iwmmxt_set_mup();
2527 gen_op_iwmmxt_set_cup();
2528 break;
2529 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2530 case 0x314: case 0x714: case 0xb14: case 0xf14:
2531 if (((insn >> 22) & 3) == 0)
2532 return 1;
2533 wrd = (insn >> 12) & 0xf;
2534 rd0 = (insn >> 16) & 0xf;
2535 gen_op_iwmmxt_movq_M0_wRn(rd0);
2536 tmp = tcg_temp_new_i32();
2537 switch ((insn >> 22) & 3) {
2538 case 1:
2539 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2540 tcg_temp_free_i32(tmp);
2541 return 1;
2542 }
2543 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2544 break;
2545 case 2:
2546 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2547 tcg_temp_free_i32(tmp);
2548 return 1;
2549 }
2550 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2551 break;
2552 case 3:
2553 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2554 tcg_temp_free_i32(tmp);
2555 return 1;
2556 }
2557 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2558 break;
2559 }
2560 tcg_temp_free_i32(tmp);
2561 gen_op_iwmmxt_movq_wRn_M0(wrd);
2562 gen_op_iwmmxt_set_mup();
2563 gen_op_iwmmxt_set_cup();
2564 break;
2565 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2566 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2567 wrd = (insn >> 12) & 0xf;
2568 rd0 = (insn >> 16) & 0xf;
2569 rd1 = (insn >> 0) & 0xf;
2570 gen_op_iwmmxt_movq_M0_wRn(rd0);
2571 switch ((insn >> 22) & 3) {
2572 case 0:
2573 if (insn & (1 << 21))
2574 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2575 else
2576 gen_op_iwmmxt_minub_M0_wRn(rd1);
2577 break;
2578 case 1:
2579 if (insn & (1 << 21))
2580 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2581 else
2582 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2583 break;
2584 case 2:
2585 if (insn & (1 << 21))
2586 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2587 else
2588 gen_op_iwmmxt_minul_M0_wRn(rd1);
2589 break;
2590 case 3:
2591 return 1;
2592 }
2593 gen_op_iwmmxt_movq_wRn_M0(wrd);
2594 gen_op_iwmmxt_set_mup();
2595 break;
2596 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2597 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2598 wrd = (insn >> 12) & 0xf;
2599 rd0 = (insn >> 16) & 0xf;
2600 rd1 = (insn >> 0) & 0xf;
2601 gen_op_iwmmxt_movq_M0_wRn(rd0);
2602 switch ((insn >> 22) & 3) {
2603 case 0:
2604 if (insn & (1 << 21))
2605 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2606 else
2607 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2608 break;
2609 case 1:
2610 if (insn & (1 << 21))
2611 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2612 else
2613 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2614 break;
2615 case 2:
2616 if (insn & (1 << 21))
2617 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2618 else
2619 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2620 break;
2621 case 3:
2622 return 1;
2623 }
2624 gen_op_iwmmxt_movq_wRn_M0(wrd);
2625 gen_op_iwmmxt_set_mup();
2626 break;
2627 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2628 case 0x402: case 0x502: case 0x602: case 0x702:
2629 wrd = (insn >> 12) & 0xf;
2630 rd0 = (insn >> 16) & 0xf;
2631 rd1 = (insn >> 0) & 0xf;
2632 gen_op_iwmmxt_movq_M0_wRn(rd0);
2633 tmp = tcg_const_i32((insn >> 20) & 3);
2634 iwmmxt_load_reg(cpu_V1, rd1);
2635 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2636 tcg_temp_free_i32(tmp);
2637 gen_op_iwmmxt_movq_wRn_M0(wrd);
2638 gen_op_iwmmxt_set_mup();
2639 break;
2640 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2641 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2642 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2643 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2644 wrd = (insn >> 12) & 0xf;
2645 rd0 = (insn >> 16) & 0xf;
2646 rd1 = (insn >> 0) & 0xf;
2647 gen_op_iwmmxt_movq_M0_wRn(rd0);
2648 switch ((insn >> 20) & 0xf) {
2649 case 0x0:
2650 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2651 break;
2652 case 0x1:
2653 gen_op_iwmmxt_subub_M0_wRn(rd1);
2654 break;
2655 case 0x3:
2656 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2657 break;
2658 case 0x4:
2659 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2660 break;
2661 case 0x5:
2662 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2663 break;
2664 case 0x7:
2665 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2666 break;
2667 case 0x8:
2668 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2669 break;
2670 case 0x9:
2671 gen_op_iwmmxt_subul_M0_wRn(rd1);
2672 break;
2673 case 0xb:
2674 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2675 break;
2676 default:
2677 return 1;
2678 }
2679 gen_op_iwmmxt_movq_wRn_M0(wrd);
2680 gen_op_iwmmxt_set_mup();
2681 gen_op_iwmmxt_set_cup();
2682 break;
2683 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2684 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2685 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2686 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2687 wrd = (insn >> 12) & 0xf;
2688 rd0 = (insn >> 16) & 0xf;
2689 gen_op_iwmmxt_movq_M0_wRn(rd0);
2690 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2691 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2692 tcg_temp_free_i32(tmp);
2693 gen_op_iwmmxt_movq_wRn_M0(wrd);
2694 gen_op_iwmmxt_set_mup();
2695 gen_op_iwmmxt_set_cup();
2696 break;
2697 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2698 case 0x418: case 0x518: case 0x618: case 0x718:
2699 case 0x818: case 0x918: case 0xa18: case 0xb18:
2700 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2701 wrd = (insn >> 12) & 0xf;
2702 rd0 = (insn >> 16) & 0xf;
2703 rd1 = (insn >> 0) & 0xf;
2704 gen_op_iwmmxt_movq_M0_wRn(rd0);
2705 switch ((insn >> 20) & 0xf) {
2706 case 0x0:
2707 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2708 break;
2709 case 0x1:
2710 gen_op_iwmmxt_addub_M0_wRn(rd1);
2711 break;
2712 case 0x3:
2713 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2714 break;
2715 case 0x4:
2716 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2717 break;
2718 case 0x5:
2719 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2720 break;
2721 case 0x7:
2722 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2723 break;
2724 case 0x8:
2725 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2726 break;
2727 case 0x9:
2728 gen_op_iwmmxt_addul_M0_wRn(rd1);
2729 break;
2730 case 0xb:
2731 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2732 break;
2733 default:
2734 return 1;
2735 }
2736 gen_op_iwmmxt_movq_wRn_M0(wrd);
2737 gen_op_iwmmxt_set_mup();
2738 gen_op_iwmmxt_set_cup();
2739 break;
2740 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2741 case 0x408: case 0x508: case 0x608: case 0x708:
2742 case 0x808: case 0x908: case 0xa08: case 0xb08:
2743 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2744 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2745 return 1;
2746 wrd = (insn >> 12) & 0xf;
2747 rd0 = (insn >> 16) & 0xf;
2748 rd1 = (insn >> 0) & 0xf;
2749 gen_op_iwmmxt_movq_M0_wRn(rd0);
2750 switch ((insn >> 22) & 3) {
2751 case 1:
2752 if (insn & (1 << 21))
2753 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2754 else
2755 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2756 break;
2757 case 2:
2758 if (insn & (1 << 21))
2759 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2760 else
2761 gen_op_iwmmxt_packul_M0_wRn(rd1);
2762 break;
2763 case 3:
2764 if (insn & (1 << 21))
2765 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2766 else
2767 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2768 break;
2769 }
2770 gen_op_iwmmxt_movq_wRn_M0(wrd);
2771 gen_op_iwmmxt_set_mup();
2772 gen_op_iwmmxt_set_cup();
2773 break;
2774 case 0x201: case 0x203: case 0x205: case 0x207:
2775 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2776 case 0x211: case 0x213: case 0x215: case 0x217:
2777 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2778 wrd = (insn >> 5) & 0xf;
2779 rd0 = (insn >> 12) & 0xf;
2780 rd1 = (insn >> 0) & 0xf;
2781 if (rd0 == 0xf || rd1 == 0xf)
2782 return 1;
2783 gen_op_iwmmxt_movq_M0_wRn(wrd);
2784 tmp = load_reg(s, rd0);
2785 tmp2 = load_reg(s, rd1);
2786 switch ((insn >> 16) & 0xf) {
2787 case 0x0: /* TMIA */
2788 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2789 break;
2790 case 0x8: /* TMIAPH */
2791 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2792 break;
2793 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2794 if (insn & (1 << 16))
2795 tcg_gen_shri_i32(tmp, tmp, 16);
2796 if (insn & (1 << 17))
2797 tcg_gen_shri_i32(tmp2, tmp2, 16);
2798 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2799 break;
2800 default:
2801 tcg_temp_free_i32(tmp2);
2802 tcg_temp_free_i32(tmp);
2803 return 1;
2804 }
2805 tcg_temp_free_i32(tmp2);
2806 tcg_temp_free_i32(tmp);
2807 gen_op_iwmmxt_movq_wRn_M0(wrd);
2808 gen_op_iwmmxt_set_mup();
2809 break;
2810 default:
2811 return 1;
2812 }
2813
2814 return 0;
2815 }
2816
2817 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2818 (ie. an undefined instruction). */
2819 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2820 {
2821 int acc, rd0, rd1, rdhi, rdlo;
2822 TCGv_i32 tmp, tmp2;
2823
2824 if ((insn & 0x0ff00f10) == 0x0e200010) {
2825 /* Multiply with Internal Accumulate Format */
2826 rd0 = (insn >> 12) & 0xf;
2827 rd1 = insn & 0xf;
2828 acc = (insn >> 5) & 7;
2829
2830 if (acc != 0)
2831 return 1;
2832
2833 tmp = load_reg(s, rd0);
2834 tmp2 = load_reg(s, rd1);
2835 switch ((insn >> 16) & 0xf) {
2836 case 0x0: /* MIA */
2837 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2838 break;
2839 case 0x8: /* MIAPH */
2840 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2841 break;
2842 case 0xc: /* MIABB */
2843 case 0xd: /* MIABT */
2844 case 0xe: /* MIATB */
2845 case 0xf: /* MIATT */
2846 if (insn & (1 << 16))
2847 tcg_gen_shri_i32(tmp, tmp, 16);
2848 if (insn & (1 << 17))
2849 tcg_gen_shri_i32(tmp2, tmp2, 16);
2850 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2851 break;
2852 default:
2853 return 1;
2854 }
2855 tcg_temp_free_i32(tmp2);
2856 tcg_temp_free_i32(tmp);
2857
2858 gen_op_iwmmxt_movq_wRn_M0(acc);
2859 return 0;
2860 }
2861
2862 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2863 /* Internal Accumulator Access Format */
2864 rdhi = (insn >> 16) & 0xf;
2865 rdlo = (insn >> 12) & 0xf;
2866 acc = insn & 7;
2867
2868 if (acc != 0)
2869 return 1;
2870
2871 if (insn & ARM_CP_RW_BIT) { /* MRA */
2872 iwmmxt_load_reg(cpu_V0, acc);
2873 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2874 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2875 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2876 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2877 } else { /* MAR */
2878 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2879 iwmmxt_store_reg(cpu_V0, acc);
2880 }
2881 return 0;
2882 }
2883
2884 return 1;
2885 }
2886
2887 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2888 #define VFP_SREG(insn, bigbit, smallbit) \
2889 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2890 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2891 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2892 reg = (((insn) >> (bigbit)) & 0x0f) \
2893 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2894 } else { \
2895 if (insn & (1 << (smallbit))) \
2896 return 1; \
2897 reg = ((insn) >> (bigbit)) & 0x0f; \
2898 }} while (0)
2899
2900 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2901 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2902 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2903 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2904 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2905 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2906
2907 /* Move between integer and VFP cores. */
2908 static TCGv_i32 gen_vfp_mrs(void)
2909 {
2910 TCGv_i32 tmp = tcg_temp_new_i32();
2911 tcg_gen_mov_i32(tmp, cpu_F0s);
2912 return tmp;
2913 }
2914
2915 static void gen_vfp_msr(TCGv_i32 tmp)
2916 {
2917 tcg_gen_mov_i32(cpu_F0s, tmp);
2918 tcg_temp_free_i32(tmp);
2919 }
2920
2921 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2922 {
2923 TCGv_i32 tmp = tcg_temp_new_i32();
2924 if (shift)
2925 tcg_gen_shri_i32(var, var, shift);
2926 tcg_gen_ext8u_i32(var, var);
2927 tcg_gen_shli_i32(tmp, var, 8);
2928 tcg_gen_or_i32(var, var, tmp);
2929 tcg_gen_shli_i32(tmp, var, 16);
2930 tcg_gen_or_i32(var, var, tmp);
2931 tcg_temp_free_i32(tmp);
2932 }
2933
2934 static void gen_neon_dup_low16(TCGv_i32 var)
2935 {
2936 TCGv_i32 tmp = tcg_temp_new_i32();
2937 tcg_gen_ext16u_i32(var, var);
2938 tcg_gen_shli_i32(tmp, var, 16);
2939 tcg_gen_or_i32(var, var, tmp);
2940 tcg_temp_free_i32(tmp);
2941 }
2942
2943 static void gen_neon_dup_high16(TCGv_i32 var)
2944 {
2945 TCGv_i32 tmp = tcg_temp_new_i32();
2946 tcg_gen_andi_i32(var, var, 0xffff0000);
2947 tcg_gen_shri_i32(tmp, var, 16);
2948 tcg_gen_or_i32(var, var, tmp);
2949 tcg_temp_free_i32(tmp);
2950 }
2951
2952 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2953 {
2954 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2955 TCGv_i32 tmp = tcg_temp_new_i32();
2956 switch (size) {
2957 case 0:
2958 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2959 gen_neon_dup_u8(tmp, 0);
2960 break;
2961 case 1:
2962 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2963 gen_neon_dup_low16(tmp);
2964 break;
2965 case 2:
2966 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2967 break;
2968 default: /* Avoid compiler warnings. */
2969 abort();
2970 }
2971 return tmp;
2972 }
2973
2974 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2975 uint32_t dp)
2976 {
2977 uint32_t cc = extract32(insn, 20, 2);
2978
2979 if (dp) {
2980 TCGv_i64 frn, frm, dest;
2981 TCGv_i64 tmp, zero, zf, nf, vf;
2982
2983 zero = tcg_const_i64(0);
2984
2985 frn = tcg_temp_new_i64();
2986 frm = tcg_temp_new_i64();
2987 dest = tcg_temp_new_i64();
2988
2989 zf = tcg_temp_new_i64();
2990 nf = tcg_temp_new_i64();
2991 vf = tcg_temp_new_i64();
2992
2993 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2994 tcg_gen_ext_i32_i64(nf, cpu_NF);
2995 tcg_gen_ext_i32_i64(vf, cpu_VF);
2996
2997 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2998 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2999 switch (cc) {
3000 case 0: /* eq: Z */
3001 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3002 frn, frm);
3003 break;
3004 case 1: /* vs: V */
3005 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3006 frn, frm);
3007 break;
3008 case 2: /* ge: N == V -> N ^ V == 0 */
3009 tmp = tcg_temp_new_i64();
3010 tcg_gen_xor_i64(tmp, vf, nf);
3011 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3012 frn, frm);
3013 tcg_temp_free_i64(tmp);
3014 break;
3015 case 3: /* gt: !Z && N == V */
3016 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3017 frn, frm);
3018 tmp = tcg_temp_new_i64();
3019 tcg_gen_xor_i64(tmp, vf, nf);
3020 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3021 dest, frm);
3022 tcg_temp_free_i64(tmp);
3023 break;
3024 }
3025 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3026 tcg_temp_free_i64(frn);
3027 tcg_temp_free_i64(frm);
3028 tcg_temp_free_i64(dest);
3029
3030 tcg_temp_free_i64(zf);
3031 tcg_temp_free_i64(nf);
3032 tcg_temp_free_i64(vf);
3033
3034 tcg_temp_free_i64(zero);
3035 } else {
3036 TCGv_i32 frn, frm, dest;
3037 TCGv_i32 tmp, zero;
3038
3039 zero = tcg_const_i32(0);
3040
3041 frn = tcg_temp_new_i32();
3042 frm = tcg_temp_new_i32();
3043 dest = tcg_temp_new_i32();
3044 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3045 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3046 switch (cc) {
3047 case 0: /* eq: Z */
3048 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3049 frn, frm);
3050 break;
3051 case 1: /* vs: V */
3052 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3053 frn, frm);
3054 break;
3055 case 2: /* ge: N == V -> N ^ V == 0 */
3056 tmp = tcg_temp_new_i32();
3057 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3058 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3059 frn, frm);
3060 tcg_temp_free_i32(tmp);
3061 break;
3062 case 3: /* gt: !Z && N == V */
3063 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3064 frn, frm);
3065 tmp = tcg_temp_new_i32();
3066 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3067 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3068 dest, frm);
3069 tcg_temp_free_i32(tmp);
3070 break;
3071 }
3072 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3073 tcg_temp_free_i32(frn);
3074 tcg_temp_free_i32(frm);
3075 tcg_temp_free_i32(dest);
3076
3077 tcg_temp_free_i32(zero);
3078 }
3079
3080 return 0;
3081 }
3082
3083 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3084 uint32_t rm, uint32_t dp)
3085 {
3086 uint32_t vmin = extract32(insn, 6, 1);
3087 TCGv_ptr fpst =