target/arm: Stop using cpu_F0s for NEON_2RM_VNEG_F
[qemu.git] / target / arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "tcg-op-gvec.h"
29 #include "qemu/log.h"
30 #include "qemu/bitops.h"
31 #include "qemu/qemu-print.h"
32 #include "arm_ldst.h"
33 #include "hw/semihosting/semihost.h"
34
35 #include "exec/helper-proto.h"
36 #include "exec/helper-gen.h"
37
38 #include "trace-tcg.h"
39 #include "exec/log.h"
40
41
42 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
43 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
44 /* currently all emulated v5 cores are also v5TE, so don't bother */
45 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
46 #define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
47 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
48 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
49 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
50 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
51 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52
53 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54
55 #include "translate.h"
56
57 #if defined(CONFIG_USER_ONLY)
58 #define IS_USER(s) 1
59 #else
60 #define IS_USER(s) (s->user)
61 #endif
62
63 /* We reuse the same 64-bit temporaries for efficiency. */
64 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
65 static TCGv_i32 cpu_R[16];
66 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
67 TCGv_i64 cpu_exclusive_addr;
68 TCGv_i64 cpu_exclusive_val;
69
70 /* FIXME: These should be removed. */
71 static TCGv_i32 cpu_F0s, cpu_F1s;
72 static TCGv_i64 cpu_F0d, cpu_F1d;
73
74 #include "exec/gen-icount.h"
75
76 static const char * const regnames[] =
77 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
78 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79
80 /* Function prototypes for gen_ functions calling Neon helpers. */
81 typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
82 TCGv_i32, TCGv_i32);
83
84 /* initialize TCG globals. */
85 void arm_translate_init(void)
86 {
87 int i;
88
89 for (i = 0; i < 16; i++) {
90 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
91 offsetof(CPUARMState, regs[i]),
92 regnames[i]);
93 }
94 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
95 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
96 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
97 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
98
99 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
100 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
101 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
102 offsetof(CPUARMState, exclusive_val), "exclusive_val");
103
104 a64_translate_init();
105 }
106
107 /* Flags for the disas_set_da_iss info argument:
108 * lower bits hold the Rt register number, higher bits are flags.
109 */
110 typedef enum ISSInfo {
111 ISSNone = 0,
112 ISSRegMask = 0x1f,
113 ISSInvalid = (1 << 5),
114 ISSIsAcqRel = (1 << 6),
115 ISSIsWrite = (1 << 7),
116 ISSIs16Bit = (1 << 8),
117 } ISSInfo;
118
119 /* Save the syndrome information for a Data Abort */
120 static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
121 {
122 uint32_t syn;
123 int sas = memop & MO_SIZE;
124 bool sse = memop & MO_SIGN;
125 bool is_acqrel = issinfo & ISSIsAcqRel;
126 bool is_write = issinfo & ISSIsWrite;
127 bool is_16bit = issinfo & ISSIs16Bit;
128 int srt = issinfo & ISSRegMask;
129
130 if (issinfo & ISSInvalid) {
131 /* Some callsites want to conditionally provide ISS info,
132 * eg "only if this was not a writeback"
133 */
134 return;
135 }
136
137 if (srt == 15) {
138 /* For AArch32, insns where the src/dest is R15 never generate
139 * ISS information. Catching that here saves checking at all
140 * the call sites.
141 */
142 return;
143 }
144
145 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
146 0, 0, 0, is_write, 0, is_16bit);
147 disas_set_insn_syndrome(s, syn);
148 }
149
150 static inline int get_a32_user_mem_index(DisasContext *s)
151 {
152 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
153 * insns:
154 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
155 * otherwise, access as if at PL0.
156 */
157 switch (s->mmu_idx) {
158 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
159 case ARMMMUIdx_S12NSE0:
160 case ARMMMUIdx_S12NSE1:
161 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
162 case ARMMMUIdx_S1E3:
163 case ARMMMUIdx_S1SE0:
164 case ARMMMUIdx_S1SE1:
165 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
166 case ARMMMUIdx_MUser:
167 case ARMMMUIdx_MPriv:
168 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
169 case ARMMMUIdx_MUserNegPri:
170 case ARMMMUIdx_MPrivNegPri:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
172 case ARMMMUIdx_MSUser:
173 case ARMMMUIdx_MSPriv:
174 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
175 case ARMMMUIdx_MSUserNegPri:
176 case ARMMMUIdx_MSPrivNegPri:
177 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
178 case ARMMMUIdx_S2NS:
179 default:
180 g_assert_not_reached();
181 }
182 }
183
184 static inline TCGv_i32 load_cpu_offset(int offset)
185 {
186 TCGv_i32 tmp = tcg_temp_new_i32();
187 tcg_gen_ld_i32(tmp, cpu_env, offset);
188 return tmp;
189 }
190
191 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
192
193 static inline void store_cpu_offset(TCGv_i32 var, int offset)
194 {
195 tcg_gen_st_i32(var, cpu_env, offset);
196 tcg_temp_free_i32(var);
197 }
198
199 #define store_cpu_field(var, name) \
200 store_cpu_offset(var, offsetof(CPUARMState, name))
201
202 /* Set a variable to the value of a CPU register. */
203 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
204 {
205 if (reg == 15) {
206 uint32_t addr;
207 /* normally, since we updated PC, we need only to add one insn */
208 if (s->thumb)
209 addr = (long)s->pc + 2;
210 else
211 addr = (long)s->pc + 4;
212 tcg_gen_movi_i32(var, addr);
213 } else {
214 tcg_gen_mov_i32(var, cpu_R[reg]);
215 }
216 }
217
218 /* Create a new temporary and set it to the value of a CPU register. */
219 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
220 {
221 TCGv_i32 tmp = tcg_temp_new_i32();
222 load_reg_var(s, tmp, reg);
223 return tmp;
224 }
225
226 /* Set a CPU register. The source must be a temporary and will be
227 marked as dead. */
228 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
229 {
230 if (reg == 15) {
231 /* In Thumb mode, we must ignore bit 0.
232 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
233 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
234 * We choose to ignore [1:0] in ARM mode for all architecture versions.
235 */
236 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
237 s->base.is_jmp = DISAS_JUMP;
238 }
239 tcg_gen_mov_i32(cpu_R[reg], var);
240 tcg_temp_free_i32(var);
241 }
242
243 /*
244 * Variant of store_reg which applies v8M stack-limit checks before updating
245 * SP. If the check fails this will result in an exception being taken.
246 * We disable the stack checks for CONFIG_USER_ONLY because we have
247 * no idea what the stack limits should be in that case.
248 * If stack checking is not being done this just acts like store_reg().
249 */
250 static void store_sp_checked(DisasContext *s, TCGv_i32 var)
251 {
252 #ifndef CONFIG_USER_ONLY
253 if (s->v8m_stackcheck) {
254 gen_helper_v8m_stackcheck(cpu_env, var);
255 }
256 #endif
257 store_reg(s, 13, var);
258 }
259
260 /* Value extensions. */
261 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
262 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
263 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
264 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
265
266 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
267 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
268
269
270 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
271 {
272 TCGv_i32 tmp_mask = tcg_const_i32(mask);
273 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
274 tcg_temp_free_i32(tmp_mask);
275 }
276 /* Set NZCV flags from the high 4 bits of var. */
277 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
278
279 static void gen_exception_internal(int excp)
280 {
281 TCGv_i32 tcg_excp = tcg_const_i32(excp);
282
283 assert(excp_is_internal(excp));
284 gen_helper_exception_internal(cpu_env, tcg_excp);
285 tcg_temp_free_i32(tcg_excp);
286 }
287
288 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
289 {
290 TCGv_i32 tcg_excp = tcg_const_i32(excp);
291 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
292 TCGv_i32 tcg_el = tcg_const_i32(target_el);
293
294 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
295 tcg_syn, tcg_el);
296
297 tcg_temp_free_i32(tcg_el);
298 tcg_temp_free_i32(tcg_syn);
299 tcg_temp_free_i32(tcg_excp);
300 }
301
302 static void gen_step_complete_exception(DisasContext *s)
303 {
304 /* We just completed step of an insn. Move from Active-not-pending
305 * to Active-pending, and then also take the swstep exception.
306 * This corresponds to making the (IMPDEF) choice to prioritize
307 * swstep exceptions over asynchronous exceptions taken to an exception
308 * level where debug is disabled. This choice has the advantage that
309 * we do not need to maintain internal state corresponding to the
310 * ISV/EX syndrome bits between completion of the step and generation
311 * of the exception, and our syndrome information is always correct.
312 */
313 gen_ss_advance(s);
314 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
315 default_exception_el(s));
316 s->base.is_jmp = DISAS_NORETURN;
317 }
318
319 static void gen_singlestep_exception(DisasContext *s)
320 {
321 /* Generate the right kind of exception for singlestep, which is
322 * either the architectural singlestep or EXCP_DEBUG for QEMU's
323 * gdb singlestepping.
324 */
325 if (s->ss_active) {
326 gen_step_complete_exception(s);
327 } else {
328 gen_exception_internal(EXCP_DEBUG);
329 }
330 }
331
332 static inline bool is_singlestepping(DisasContext *s)
333 {
334 /* Return true if we are singlestepping either because of
335 * architectural singlestep or QEMU gdbstub singlestep. This does
336 * not include the command line '-singlestep' mode which is rather
337 * misnamed as it only means "one instruction per TB" and doesn't
338 * affect the code we generate.
339 */
340 return s->base.singlestep_enabled || s->ss_active;
341 }
342
343 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
344 {
345 TCGv_i32 tmp1 = tcg_temp_new_i32();
346 TCGv_i32 tmp2 = tcg_temp_new_i32();
347 tcg_gen_ext16s_i32(tmp1, a);
348 tcg_gen_ext16s_i32(tmp2, b);
349 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
350 tcg_temp_free_i32(tmp2);
351 tcg_gen_sari_i32(a, a, 16);
352 tcg_gen_sari_i32(b, b, 16);
353 tcg_gen_mul_i32(b, b, a);
354 tcg_gen_mov_i32(a, tmp1);
355 tcg_temp_free_i32(tmp1);
356 }
357
358 /* Byteswap each halfword. */
359 static void gen_rev16(TCGv_i32 var)
360 {
361 TCGv_i32 tmp = tcg_temp_new_i32();
362 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
363 tcg_gen_shri_i32(tmp, var, 8);
364 tcg_gen_and_i32(tmp, tmp, mask);
365 tcg_gen_and_i32(var, var, mask);
366 tcg_gen_shli_i32(var, var, 8);
367 tcg_gen_or_i32(var, var, tmp);
368 tcg_temp_free_i32(mask);
369 tcg_temp_free_i32(tmp);
370 }
371
372 /* Byteswap low halfword and sign extend. */
373 static void gen_revsh(TCGv_i32 var)
374 {
375 tcg_gen_ext16u_i32(var, var);
376 tcg_gen_bswap16_i32(var, var);
377 tcg_gen_ext16s_i32(var, var);
378 }
379
380 /* Return (b << 32) + a. Mark inputs as dead */
381 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
382 {
383 TCGv_i64 tmp64 = tcg_temp_new_i64();
384
385 tcg_gen_extu_i32_i64(tmp64, b);
386 tcg_temp_free_i32(b);
387 tcg_gen_shli_i64(tmp64, tmp64, 32);
388 tcg_gen_add_i64(a, tmp64, a);
389
390 tcg_temp_free_i64(tmp64);
391 return a;
392 }
393
394 /* Return (b << 32) - a. Mark inputs as dead. */
395 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
396 {
397 TCGv_i64 tmp64 = tcg_temp_new_i64();
398
399 tcg_gen_extu_i32_i64(tmp64, b);
400 tcg_temp_free_i32(b);
401 tcg_gen_shli_i64(tmp64, tmp64, 32);
402 tcg_gen_sub_i64(a, tmp64, a);
403
404 tcg_temp_free_i64(tmp64);
405 return a;
406 }
407
408 /* 32x32->64 multiply. Marks inputs as dead. */
409 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
410 {
411 TCGv_i32 lo = tcg_temp_new_i32();
412 TCGv_i32 hi = tcg_temp_new_i32();
413 TCGv_i64 ret;
414
415 tcg_gen_mulu2_i32(lo, hi, a, b);
416 tcg_temp_free_i32(a);
417 tcg_temp_free_i32(b);
418
419 ret = tcg_temp_new_i64();
420 tcg_gen_concat_i32_i64(ret, lo, hi);
421 tcg_temp_free_i32(lo);
422 tcg_temp_free_i32(hi);
423
424 return ret;
425 }
426
427 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
428 {
429 TCGv_i32 lo = tcg_temp_new_i32();
430 TCGv_i32 hi = tcg_temp_new_i32();
431 TCGv_i64 ret;
432
433 tcg_gen_muls2_i32(lo, hi, a, b);
434 tcg_temp_free_i32(a);
435 tcg_temp_free_i32(b);
436
437 ret = tcg_temp_new_i64();
438 tcg_gen_concat_i32_i64(ret, lo, hi);
439 tcg_temp_free_i32(lo);
440 tcg_temp_free_i32(hi);
441
442 return ret;
443 }
444
445 /* Swap low and high halfwords. */
446 static void gen_swap_half(TCGv_i32 var)
447 {
448 TCGv_i32 tmp = tcg_temp_new_i32();
449 tcg_gen_shri_i32(tmp, var, 16);
450 tcg_gen_shli_i32(var, var, 16);
451 tcg_gen_or_i32(var, var, tmp);
452 tcg_temp_free_i32(tmp);
453 }
454
455 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
456 tmp = (t0 ^ t1) & 0x8000;
457 t0 &= ~0x8000;
458 t1 &= ~0x8000;
459 t0 = (t0 + t1) ^ tmp;
460 */
461
462 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
463 {
464 TCGv_i32 tmp = tcg_temp_new_i32();
465 tcg_gen_xor_i32(tmp, t0, t1);
466 tcg_gen_andi_i32(tmp, tmp, 0x8000);
467 tcg_gen_andi_i32(t0, t0, ~0x8000);
468 tcg_gen_andi_i32(t1, t1, ~0x8000);
469 tcg_gen_add_i32(t0, t0, t1);
470 tcg_gen_xor_i32(t0, t0, tmp);
471 tcg_temp_free_i32(tmp);
472 tcg_temp_free_i32(t1);
473 }
474
475 /* Set CF to the top bit of var. */
476 static void gen_set_CF_bit31(TCGv_i32 var)
477 {
478 tcg_gen_shri_i32(cpu_CF, var, 31);
479 }
480
481 /* Set N and Z flags from var. */
482 static inline void gen_logic_CC(TCGv_i32 var)
483 {
484 tcg_gen_mov_i32(cpu_NF, var);
485 tcg_gen_mov_i32(cpu_ZF, var);
486 }
487
488 /* T0 += T1 + CF. */
489 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
490 {
491 tcg_gen_add_i32(t0, t0, t1);
492 tcg_gen_add_i32(t0, t0, cpu_CF);
493 }
494
495 /* dest = T0 + T1 + CF. */
496 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
497 {
498 tcg_gen_add_i32(dest, t0, t1);
499 tcg_gen_add_i32(dest, dest, cpu_CF);
500 }
501
502 /* dest = T0 - T1 + CF - 1. */
503 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
504 {
505 tcg_gen_sub_i32(dest, t0, t1);
506 tcg_gen_add_i32(dest, dest, cpu_CF);
507 tcg_gen_subi_i32(dest, dest, 1);
508 }
509
510 /* dest = T0 + T1. Compute C, N, V and Z flags */
511 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
512 {
513 TCGv_i32 tmp = tcg_temp_new_i32();
514 tcg_gen_movi_i32(tmp, 0);
515 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
516 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
517 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
518 tcg_gen_xor_i32(tmp, t0, t1);
519 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
520 tcg_temp_free_i32(tmp);
521 tcg_gen_mov_i32(dest, cpu_NF);
522 }
523
524 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
525 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
526 {
527 TCGv_i32 tmp = tcg_temp_new_i32();
528 if (TCG_TARGET_HAS_add2_i32) {
529 tcg_gen_movi_i32(tmp, 0);
530 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
531 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
532 } else {
533 TCGv_i64 q0 = tcg_temp_new_i64();
534 TCGv_i64 q1 = tcg_temp_new_i64();
535 tcg_gen_extu_i32_i64(q0, t0);
536 tcg_gen_extu_i32_i64(q1, t1);
537 tcg_gen_add_i64(q0, q0, q1);
538 tcg_gen_extu_i32_i64(q1, cpu_CF);
539 tcg_gen_add_i64(q0, q0, q1);
540 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
541 tcg_temp_free_i64(q0);
542 tcg_temp_free_i64(q1);
543 }
544 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
545 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
546 tcg_gen_xor_i32(tmp, t0, t1);
547 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
548 tcg_temp_free_i32(tmp);
549 tcg_gen_mov_i32(dest, cpu_NF);
550 }
551
552 /* dest = T0 - T1. Compute C, N, V and Z flags */
553 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
554 {
555 TCGv_i32 tmp;
556 tcg_gen_sub_i32(cpu_NF, t0, t1);
557 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
558 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
559 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
560 tmp = tcg_temp_new_i32();
561 tcg_gen_xor_i32(tmp, t0, t1);
562 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
563 tcg_temp_free_i32(tmp);
564 tcg_gen_mov_i32(dest, cpu_NF);
565 }
566
567 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
568 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
569 {
570 TCGv_i32 tmp = tcg_temp_new_i32();
571 tcg_gen_not_i32(tmp, t1);
572 gen_adc_CC(dest, t0, tmp);
573 tcg_temp_free_i32(tmp);
574 }
575
576 #define GEN_SHIFT(name) \
577 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
578 { \
579 TCGv_i32 tmp1, tmp2, tmp3; \
580 tmp1 = tcg_temp_new_i32(); \
581 tcg_gen_andi_i32(tmp1, t1, 0xff); \
582 tmp2 = tcg_const_i32(0); \
583 tmp3 = tcg_const_i32(0x1f); \
584 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
585 tcg_temp_free_i32(tmp3); \
586 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
587 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
588 tcg_temp_free_i32(tmp2); \
589 tcg_temp_free_i32(tmp1); \
590 }
591 GEN_SHIFT(shl)
592 GEN_SHIFT(shr)
593 #undef GEN_SHIFT
594
595 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
596 {
597 TCGv_i32 tmp1, tmp2;
598 tmp1 = tcg_temp_new_i32();
599 tcg_gen_andi_i32(tmp1, t1, 0xff);
600 tmp2 = tcg_const_i32(0x1f);
601 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
602 tcg_temp_free_i32(tmp2);
603 tcg_gen_sar_i32(dest, t0, tmp1);
604 tcg_temp_free_i32(tmp1);
605 }
606
607 static void shifter_out_im(TCGv_i32 var, int shift)
608 {
609 if (shift == 0) {
610 tcg_gen_andi_i32(cpu_CF, var, 1);
611 } else {
612 tcg_gen_shri_i32(cpu_CF, var, shift);
613 if (shift != 31) {
614 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
615 }
616 }
617 }
618
619 /* Shift by immediate. Includes special handling for shift == 0. */
620 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
621 int shift, int flags)
622 {
623 switch (shiftop) {
624 case 0: /* LSL */
625 if (shift != 0) {
626 if (flags)
627 shifter_out_im(var, 32 - shift);
628 tcg_gen_shli_i32(var, var, shift);
629 }
630 break;
631 case 1: /* LSR */
632 if (shift == 0) {
633 if (flags) {
634 tcg_gen_shri_i32(cpu_CF, var, 31);
635 }
636 tcg_gen_movi_i32(var, 0);
637 } else {
638 if (flags)
639 shifter_out_im(var, shift - 1);
640 tcg_gen_shri_i32(var, var, shift);
641 }
642 break;
643 case 2: /* ASR */
644 if (shift == 0)
645 shift = 32;
646 if (flags)
647 shifter_out_im(var, shift - 1);
648 if (shift == 32)
649 shift = 31;
650 tcg_gen_sari_i32(var, var, shift);
651 break;
652 case 3: /* ROR/RRX */
653 if (shift != 0) {
654 if (flags)
655 shifter_out_im(var, shift - 1);
656 tcg_gen_rotri_i32(var, var, shift); break;
657 } else {
658 TCGv_i32 tmp = tcg_temp_new_i32();
659 tcg_gen_shli_i32(tmp, cpu_CF, 31);
660 if (flags)
661 shifter_out_im(var, 0);
662 tcg_gen_shri_i32(var, var, 1);
663 tcg_gen_or_i32(var, var, tmp);
664 tcg_temp_free_i32(tmp);
665 }
666 }
667 };
668
669 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
670 TCGv_i32 shift, int flags)
671 {
672 if (flags) {
673 switch (shiftop) {
674 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
675 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
676 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
677 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
678 }
679 } else {
680 switch (shiftop) {
681 case 0:
682 gen_shl(var, var, shift);
683 break;
684 case 1:
685 gen_shr(var, var, shift);
686 break;
687 case 2:
688 gen_sar(var, var, shift);
689 break;
690 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
691 tcg_gen_rotr_i32(var, var, shift); break;
692 }
693 }
694 tcg_temp_free_i32(shift);
695 }
696
697 #define PAS_OP(pfx) \
698 switch (op2) { \
699 case 0: gen_pas_helper(glue(pfx,add16)); break; \
700 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
701 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
702 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
703 case 4: gen_pas_helper(glue(pfx,add8)); break; \
704 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
705 }
706 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
707 {
708 TCGv_ptr tmp;
709
710 switch (op1) {
711 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
712 case 1:
713 tmp = tcg_temp_new_ptr();
714 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
715 PAS_OP(s)
716 tcg_temp_free_ptr(tmp);
717 break;
718 case 5:
719 tmp = tcg_temp_new_ptr();
720 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
721 PAS_OP(u)
722 tcg_temp_free_ptr(tmp);
723 break;
724 #undef gen_pas_helper
725 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
726 case 2:
727 PAS_OP(q);
728 break;
729 case 3:
730 PAS_OP(sh);
731 break;
732 case 6:
733 PAS_OP(uq);
734 break;
735 case 7:
736 PAS_OP(uh);
737 break;
738 #undef gen_pas_helper
739 }
740 }
741 #undef PAS_OP
742
743 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
744 #define PAS_OP(pfx) \
745 switch (op1) { \
746 case 0: gen_pas_helper(glue(pfx,add8)); break; \
747 case 1: gen_pas_helper(glue(pfx,add16)); break; \
748 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
749 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
750 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
751 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
752 }
753 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
754 {
755 TCGv_ptr tmp;
756
757 switch (op2) {
758 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
759 case 0:
760 tmp = tcg_temp_new_ptr();
761 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
762 PAS_OP(s)
763 tcg_temp_free_ptr(tmp);
764 break;
765 case 4:
766 tmp = tcg_temp_new_ptr();
767 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
768 PAS_OP(u)
769 tcg_temp_free_ptr(tmp);
770 break;
771 #undef gen_pas_helper
772 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
773 case 1:
774 PAS_OP(q);
775 break;
776 case 2:
777 PAS_OP(sh);
778 break;
779 case 5:
780 PAS_OP(uq);
781 break;
782 case 6:
783 PAS_OP(uh);
784 break;
785 #undef gen_pas_helper
786 }
787 }
788 #undef PAS_OP
789
790 /*
791 * Generate a conditional based on ARM condition code cc.
792 * This is common between ARM and Aarch64 targets.
793 */
794 void arm_test_cc(DisasCompare *cmp, int cc)
795 {
796 TCGv_i32 value;
797 TCGCond cond;
798 bool global = true;
799
800 switch (cc) {
801 case 0: /* eq: Z */
802 case 1: /* ne: !Z */
803 cond = TCG_COND_EQ;
804 value = cpu_ZF;
805 break;
806
807 case 2: /* cs: C */
808 case 3: /* cc: !C */
809 cond = TCG_COND_NE;
810 value = cpu_CF;
811 break;
812
813 case 4: /* mi: N */
814 case 5: /* pl: !N */
815 cond = TCG_COND_LT;
816 value = cpu_NF;
817 break;
818
819 case 6: /* vs: V */
820 case 7: /* vc: !V */
821 cond = TCG_COND_LT;
822 value = cpu_VF;
823 break;
824
825 case 8: /* hi: C && !Z */
826 case 9: /* ls: !C || Z -> !(C && !Z) */
827 cond = TCG_COND_NE;
828 value = tcg_temp_new_i32();
829 global = false;
830 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
831 ZF is non-zero for !Z; so AND the two subexpressions. */
832 tcg_gen_neg_i32(value, cpu_CF);
833 tcg_gen_and_i32(value, value, cpu_ZF);
834 break;
835
836 case 10: /* ge: N == V -> N ^ V == 0 */
837 case 11: /* lt: N != V -> N ^ V != 0 */
838 /* Since we're only interested in the sign bit, == 0 is >= 0. */
839 cond = TCG_COND_GE;
840 value = tcg_temp_new_i32();
841 global = false;
842 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
843 break;
844
845 case 12: /* gt: !Z && N == V */
846 case 13: /* le: Z || N != V */
847 cond = TCG_COND_NE;
848 value = tcg_temp_new_i32();
849 global = false;
850 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
851 * the sign bit then AND with ZF to yield the result. */
852 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
853 tcg_gen_sari_i32(value, value, 31);
854 tcg_gen_andc_i32(value, cpu_ZF, value);
855 break;
856
857 case 14: /* always */
858 case 15: /* always */
859 /* Use the ALWAYS condition, which will fold early.
860 * It doesn't matter what we use for the value. */
861 cond = TCG_COND_ALWAYS;
862 value = cpu_ZF;
863 goto no_invert;
864
865 default:
866 fprintf(stderr, "Bad condition code 0x%x\n", cc);
867 abort();
868 }
869
870 if (cc & 1) {
871 cond = tcg_invert_cond(cond);
872 }
873
874 no_invert:
875 cmp->cond = cond;
876 cmp->value = value;
877 cmp->value_global = global;
878 }
879
880 void arm_free_cc(DisasCompare *cmp)
881 {
882 if (!cmp->value_global) {
883 tcg_temp_free_i32(cmp->value);
884 }
885 }
886
887 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
888 {
889 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
890 }
891
892 void arm_gen_test_cc(int cc, TCGLabel *label)
893 {
894 DisasCompare cmp;
895 arm_test_cc(&cmp, cc);
896 arm_jump_cc(&cmp, label);
897 arm_free_cc(&cmp);
898 }
899
900 static const uint8_t table_logic_cc[16] = {
901 1, /* and */
902 1, /* xor */
903 0, /* sub */
904 0, /* rsb */
905 0, /* add */
906 0, /* adc */
907 0, /* sbc */
908 0, /* rsc */
909 1, /* andl */
910 1, /* xorl */
911 0, /* cmp */
912 0, /* cmn */
913 1, /* orr */
914 1, /* mov */
915 1, /* bic */
916 1, /* mvn */
917 };
918
919 static inline void gen_set_condexec(DisasContext *s)
920 {
921 if (s->condexec_mask) {
922 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
923 TCGv_i32 tmp = tcg_temp_new_i32();
924 tcg_gen_movi_i32(tmp, val);
925 store_cpu_field(tmp, condexec_bits);
926 }
927 }
928
929 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
930 {
931 tcg_gen_movi_i32(cpu_R[15], val);
932 }
933
934 /* Set PC and Thumb state from an immediate address. */
935 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
936 {
937 TCGv_i32 tmp;
938
939 s->base.is_jmp = DISAS_JUMP;
940 if (s->thumb != (addr & 1)) {
941 tmp = tcg_temp_new_i32();
942 tcg_gen_movi_i32(tmp, addr & 1);
943 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
944 tcg_temp_free_i32(tmp);
945 }
946 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
947 }
948
949 /* Set PC and Thumb state from var. var is marked as dead. */
950 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
951 {
952 s->base.is_jmp = DISAS_JUMP;
953 tcg_gen_andi_i32(cpu_R[15], var, ~1);
954 tcg_gen_andi_i32(var, var, 1);
955 store_cpu_field(var, thumb);
956 }
957
958 /* Set PC and Thumb state from var. var is marked as dead.
959 * For M-profile CPUs, include logic to detect exception-return
960 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
961 * and BX reg, and no others, and happens only for code in Handler mode.
962 */
963 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
964 {
965 /* Generate the same code here as for a simple bx, but flag via
966 * s->base.is_jmp that we need to do the rest of the work later.
967 */
968 gen_bx(s, var);
969 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
970 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
971 s->base.is_jmp = DISAS_BX_EXCRET;
972 }
973 }
974
975 static inline void gen_bx_excret_final_code(DisasContext *s)
976 {
977 /* Generate the code to finish possible exception return and end the TB */
978 TCGLabel *excret_label = gen_new_label();
979 uint32_t min_magic;
980
981 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
982 /* Covers FNC_RETURN and EXC_RETURN magic */
983 min_magic = FNC_RETURN_MIN_MAGIC;
984 } else {
985 /* EXC_RETURN magic only */
986 min_magic = EXC_RETURN_MIN_MAGIC;
987 }
988
989 /* Is the new PC value in the magic range indicating exception return? */
990 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
991 /* No: end the TB as we would for a DISAS_JMP */
992 if (is_singlestepping(s)) {
993 gen_singlestep_exception(s);
994 } else {
995 tcg_gen_exit_tb(NULL, 0);
996 }
997 gen_set_label(excret_label);
998 /* Yes: this is an exception return.
999 * At this point in runtime env->regs[15] and env->thumb will hold
1000 * the exception-return magic number, which do_v7m_exception_exit()
1001 * will read. Nothing else will be able to see those values because
1002 * the cpu-exec main loop guarantees that we will always go straight
1003 * from raising the exception to the exception-handling code.
1004 *
1005 * gen_ss_advance(s) does nothing on M profile currently but
1006 * calling it is conceptually the right thing as we have executed
1007 * this instruction (compare SWI, HVC, SMC handling).
1008 */
1009 gen_ss_advance(s);
1010 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1011 }
1012
1013 static inline void gen_bxns(DisasContext *s, int rm)
1014 {
1015 TCGv_i32 var = load_reg(s, rm);
1016
1017 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1018 * we need to sync state before calling it, but:
1019 * - we don't need to do gen_set_pc_im() because the bxns helper will
1020 * always set the PC itself
1021 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1022 * unless it's outside an IT block or the last insn in an IT block,
1023 * so we know that condexec == 0 (already set at the top of the TB)
1024 * is correct in the non-UNPREDICTABLE cases, and we can choose
1025 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1026 */
1027 gen_helper_v7m_bxns(cpu_env, var);
1028 tcg_temp_free_i32(var);
1029 s->base.is_jmp = DISAS_EXIT;
1030 }
1031
1032 static inline void gen_blxns(DisasContext *s, int rm)
1033 {
1034 TCGv_i32 var = load_reg(s, rm);
1035
1036 /* We don't need to sync condexec state, for the same reason as bxns.
1037 * We do however need to set the PC, because the blxns helper reads it.
1038 * The blxns helper may throw an exception.
1039 */
1040 gen_set_pc_im(s, s->pc);
1041 gen_helper_v7m_blxns(cpu_env, var);
1042 tcg_temp_free_i32(var);
1043 s->base.is_jmp = DISAS_EXIT;
1044 }
1045
1046 /* Variant of store_reg which uses branch&exchange logic when storing
1047 to r15 in ARM architecture v7 and above. The source must be a temporary
1048 and will be marked as dead. */
1049 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
1050 {
1051 if (reg == 15 && ENABLE_ARCH_7) {
1052 gen_bx(s, var);
1053 } else {
1054 store_reg(s, reg, var);
1055 }
1056 }
1057
1058 /* Variant of store_reg which uses branch&exchange logic when storing
1059 * to r15 in ARM architecture v5T and above. This is used for storing
1060 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1061 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1062 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
1063 {
1064 if (reg == 15 && ENABLE_ARCH_5) {
1065 gen_bx_excret(s, var);
1066 } else {
1067 store_reg(s, reg, var);
1068 }
1069 }
1070
1071 #ifdef CONFIG_USER_ONLY
1072 #define IS_USER_ONLY 1
1073 #else
1074 #define IS_USER_ONLY 0
1075 #endif
1076
1077 /* Abstractions of "generate code to do a guest load/store for
1078 * AArch32", where a vaddr is always 32 bits (and is zero
1079 * extended if we're a 64 bit core) and data is also
1080 * 32 bits unless specifically doing a 64 bit access.
1081 * These functions work like tcg_gen_qemu_{ld,st}* except
1082 * that the address argument is TCGv_i32 rather than TCGv.
1083 */
1084
1085 static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
1086 {
1087 TCGv addr = tcg_temp_new();
1088 tcg_gen_extu_i32_tl(addr, a32);
1089
1090 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1091 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1092 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
1093 }
1094 return addr;
1095 }
1096
1097 static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1098 int index, TCGMemOp opc)
1099 {
1100 TCGv addr;
1101
1102 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1103 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1104 opc |= MO_ALIGN;
1105 }
1106
1107 addr = gen_aa32_addr(s, a32, opc);
1108 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1109 tcg_temp_free(addr);
1110 }
1111
1112 static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1113 int index, TCGMemOp opc)
1114 {
1115 TCGv addr;
1116
1117 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1118 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1119 opc |= MO_ALIGN;
1120 }
1121
1122 addr = gen_aa32_addr(s, a32, opc);
1123 tcg_gen_qemu_st_i32(val, addr, index, opc);
1124 tcg_temp_free(addr);
1125 }
1126
1127 #define DO_GEN_LD(SUFF, OPC) \
1128 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1129 TCGv_i32 a32, int index) \
1130 { \
1131 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1132 } \
1133 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1134 TCGv_i32 val, \
1135 TCGv_i32 a32, int index, \
1136 ISSInfo issinfo) \
1137 { \
1138 gen_aa32_ld##SUFF(s, val, a32, index); \
1139 disas_set_da_iss(s, OPC, issinfo); \
1140 }
1141
1142 #define DO_GEN_ST(SUFF, OPC) \
1143 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1144 TCGv_i32 a32, int index) \
1145 { \
1146 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1147 } \
1148 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1149 TCGv_i32 val, \
1150 TCGv_i32 a32, int index, \
1151 ISSInfo issinfo) \
1152 { \
1153 gen_aa32_st##SUFF(s, val, a32, index); \
1154 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1155 }
1156
1157 static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
1158 {
1159 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1160 if (!IS_USER_ONLY && s->sctlr_b) {
1161 tcg_gen_rotri_i64(val, val, 32);
1162 }
1163 }
1164
1165 static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1166 int index, TCGMemOp opc)
1167 {
1168 TCGv addr = gen_aa32_addr(s, a32, opc);
1169 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1170 gen_aa32_frob64(s, val);
1171 tcg_temp_free(addr);
1172 }
1173
1174 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1175 TCGv_i32 a32, int index)
1176 {
1177 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1178 }
1179
1180 static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1181 int index, TCGMemOp opc)
1182 {
1183 TCGv addr = gen_aa32_addr(s, a32, opc);
1184
1185 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1186 if (!IS_USER_ONLY && s->sctlr_b) {
1187 TCGv_i64 tmp = tcg_temp_new_i64();
1188 tcg_gen_rotri_i64(tmp, val, 32);
1189 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1190 tcg_temp_free_i64(tmp);
1191 } else {
1192 tcg_gen_qemu_st_i64(val, addr, index, opc);
1193 }
1194 tcg_temp_free(addr);
1195 }
1196
1197 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1198 TCGv_i32 a32, int index)
1199 {
1200 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1201 }
1202
1203 DO_GEN_LD(8s, MO_SB)
1204 DO_GEN_LD(8u, MO_UB)
1205 DO_GEN_LD(16s, MO_SW)
1206 DO_GEN_LD(16u, MO_UW)
1207 DO_GEN_LD(32u, MO_UL)
1208 DO_GEN_ST(8, MO_UB)
1209 DO_GEN_ST(16, MO_UW)
1210 DO_GEN_ST(32, MO_UL)
1211
1212 static inline void gen_hvc(DisasContext *s, int imm16)
1213 {
1214 /* The pre HVC helper handles cases when HVC gets trapped
1215 * as an undefined insn by runtime configuration (ie before
1216 * the insn really executes).
1217 */
1218 gen_set_pc_im(s, s->pc - 4);
1219 gen_helper_pre_hvc(cpu_env);
1220 /* Otherwise we will treat this as a real exception which
1221 * happens after execution of the insn. (The distinction matters
1222 * for the PC value reported to the exception handler and also
1223 * for single stepping.)
1224 */
1225 s->svc_imm = imm16;
1226 gen_set_pc_im(s, s->pc);
1227 s->base.is_jmp = DISAS_HVC;
1228 }
1229
1230 static inline void gen_smc(DisasContext *s)
1231 {
1232 /* As with HVC, we may take an exception either before or after
1233 * the insn executes.
1234 */
1235 TCGv_i32 tmp;
1236
1237 gen_set_pc_im(s, s->pc - 4);
1238 tmp = tcg_const_i32(syn_aa32_smc());
1239 gen_helper_pre_smc(cpu_env, tmp);
1240 tcg_temp_free_i32(tmp);
1241 gen_set_pc_im(s, s->pc);
1242 s->base.is_jmp = DISAS_SMC;
1243 }
1244
1245 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1246 {
1247 gen_set_condexec(s);
1248 gen_set_pc_im(s, s->pc - offset);
1249 gen_exception_internal(excp);
1250 s->base.is_jmp = DISAS_NORETURN;
1251 }
1252
1253 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1254 int syn, uint32_t target_el)
1255 {
1256 gen_set_condexec(s);
1257 gen_set_pc_im(s, s->pc - offset);
1258 gen_exception(excp, syn, target_el);
1259 s->base.is_jmp = DISAS_NORETURN;
1260 }
1261
1262 static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1263 {
1264 TCGv_i32 tcg_syn;
1265
1266 gen_set_condexec(s);
1267 gen_set_pc_im(s, s->pc - offset);
1268 tcg_syn = tcg_const_i32(syn);
1269 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1270 tcg_temp_free_i32(tcg_syn);
1271 s->base.is_jmp = DISAS_NORETURN;
1272 }
1273
1274 /* Force a TB lookup after an instruction that changes the CPU state. */
1275 static inline void gen_lookup_tb(DisasContext *s)
1276 {
1277 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1278 s->base.is_jmp = DISAS_EXIT;
1279 }
1280
1281 static inline void gen_hlt(DisasContext *s, int imm)
1282 {
1283 /* HLT. This has two purposes.
1284 * Architecturally, it is an external halting debug instruction.
1285 * Since QEMU doesn't implement external debug, we treat this as
1286 * it is required for halting debug disabled: it will UNDEF.
1287 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1288 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1289 * must trigger semihosting even for ARMv7 and earlier, where
1290 * HLT was an undefined encoding.
1291 * In system mode, we don't allow userspace access to
1292 * semihosting, to provide some semblance of security
1293 * (and for consistency with our 32-bit semihosting).
1294 */
1295 if (semihosting_enabled() &&
1296 #ifndef CONFIG_USER_ONLY
1297 s->current_el != 0 &&
1298 #endif
1299 (imm == (s->thumb ? 0x3c : 0xf000))) {
1300 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1301 return;
1302 }
1303
1304 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1305 default_exception_el(s));
1306 }
1307
1308 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1309 TCGv_i32 var)
1310 {
1311 int val, rm, shift, shiftop;
1312 TCGv_i32 offset;
1313
1314 if (!(insn & (1 << 25))) {
1315 /* immediate */
1316 val = insn & 0xfff;
1317 if (!(insn & (1 << 23)))
1318 val = -val;
1319 if (val != 0)
1320 tcg_gen_addi_i32(var, var, val);
1321 } else {
1322 /* shift/register */
1323 rm = (insn) & 0xf;
1324 shift = (insn >> 7) & 0x1f;
1325 shiftop = (insn >> 5) & 3;
1326 offset = load_reg(s, rm);
1327 gen_arm_shift_im(offset, shiftop, shift, 0);
1328 if (!(insn & (1 << 23)))
1329 tcg_gen_sub_i32(var, var, offset);
1330 else
1331 tcg_gen_add_i32(var, var, offset);
1332 tcg_temp_free_i32(offset);
1333 }
1334 }
1335
1336 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1337 int extra, TCGv_i32 var)
1338 {
1339 int val, rm;
1340 TCGv_i32 offset;
1341
1342 if (insn & (1 << 22)) {
1343 /* immediate */
1344 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1345 if (!(insn & (1 << 23)))
1346 val = -val;
1347 val += extra;
1348 if (val != 0)
1349 tcg_gen_addi_i32(var, var, val);
1350 } else {
1351 /* register */
1352 if (extra)
1353 tcg_gen_addi_i32(var, var, extra);
1354 rm = (insn) & 0xf;
1355 offset = load_reg(s, rm);
1356 if (!(insn & (1 << 23)))
1357 tcg_gen_sub_i32(var, var, offset);
1358 else
1359 tcg_gen_add_i32(var, var, offset);
1360 tcg_temp_free_i32(offset);
1361 }
1362 }
1363
1364 static TCGv_ptr get_fpstatus_ptr(int neon)
1365 {
1366 TCGv_ptr statusptr = tcg_temp_new_ptr();
1367 int offset;
1368 if (neon) {
1369 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1370 } else {
1371 offset = offsetof(CPUARMState, vfp.fp_status);
1372 }
1373 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1374 return statusptr;
1375 }
1376
1377 #define VFP_GEN_ITOF(name) \
1378 static inline void gen_vfp_##name(int dp, int neon) \
1379 { \
1380 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1381 if (dp) { \
1382 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1383 } else { \
1384 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1385 } \
1386 tcg_temp_free_ptr(statusptr); \
1387 }
1388
1389 VFP_GEN_ITOF(uito)
1390 VFP_GEN_ITOF(sito)
1391 #undef VFP_GEN_ITOF
1392
1393 #define VFP_GEN_FTOI(name) \
1394 static inline void gen_vfp_##name(int dp, int neon) \
1395 { \
1396 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1397 if (dp) { \
1398 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1399 } else { \
1400 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1401 } \
1402 tcg_temp_free_ptr(statusptr); \
1403 }
1404
1405 VFP_GEN_FTOI(touiz)
1406 VFP_GEN_FTOI(tosiz)
1407 #undef VFP_GEN_FTOI
1408
1409 #define VFP_GEN_FIX(name, round) \
1410 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1411 { \
1412 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1413 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1414 if (dp) { \
1415 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1416 statusptr); \
1417 } else { \
1418 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1419 statusptr); \
1420 } \
1421 tcg_temp_free_i32(tmp_shift); \
1422 tcg_temp_free_ptr(statusptr); \
1423 }
1424 VFP_GEN_FIX(tosl, _round_to_zero)
1425 VFP_GEN_FIX(toul, _round_to_zero)
1426 VFP_GEN_FIX(slto, )
1427 VFP_GEN_FIX(ulto, )
1428 #undef VFP_GEN_FIX
1429
1430 static inline long vfp_reg_offset(bool dp, unsigned reg)
1431 {
1432 if (dp) {
1433 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1434 } else {
1435 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
1436 if (reg & 1) {
1437 ofs += offsetof(CPU_DoubleU, l.upper);
1438 } else {
1439 ofs += offsetof(CPU_DoubleU, l.lower);
1440 }
1441 return ofs;
1442 }
1443 }
1444
1445 /* Return the offset of a 32-bit piece of a NEON register.
1446 zero is the least significant end of the register. */
1447 static inline long
1448 neon_reg_offset (int reg, int n)
1449 {
1450 int sreg;
1451 sreg = reg * 2 + n;
1452 return vfp_reg_offset(0, sreg);
1453 }
1454
1455 /* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1456 * where 0 is the least significant end of the register.
1457 */
1458 static inline long
1459 neon_element_offset(int reg, int element, TCGMemOp size)
1460 {
1461 int element_size = 1 << size;
1462 int ofs = element * element_size;
1463 #ifdef HOST_WORDS_BIGENDIAN
1464 /* Calculate the offset assuming fully little-endian,
1465 * then XOR to account for the order of the 8-byte units.
1466 */
1467 if (element_size < 8) {
1468 ofs ^= 8 - element_size;
1469 }
1470 #endif
1471 return neon_reg_offset(reg, 0) + ofs;
1472 }
1473
1474 static TCGv_i32 neon_load_reg(int reg, int pass)
1475 {
1476 TCGv_i32 tmp = tcg_temp_new_i32();
1477 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1478 return tmp;
1479 }
1480
1481 static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1482 {
1483 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1484
1485 switch (mop) {
1486 case MO_UB:
1487 tcg_gen_ld8u_i32(var, cpu_env, offset);
1488 break;
1489 case MO_UW:
1490 tcg_gen_ld16u_i32(var, cpu_env, offset);
1491 break;
1492 case MO_UL:
1493 tcg_gen_ld_i32(var, cpu_env, offset);
1494 break;
1495 default:
1496 g_assert_not_reached();
1497 }
1498 }
1499
1500 static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1501 {
1502 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1503
1504 switch (mop) {
1505 case MO_UB:
1506 tcg_gen_ld8u_i64(var, cpu_env, offset);
1507 break;
1508 case MO_UW:
1509 tcg_gen_ld16u_i64(var, cpu_env, offset);
1510 break;
1511 case MO_UL:
1512 tcg_gen_ld32u_i64(var, cpu_env, offset);
1513 break;
1514 case MO_Q:
1515 tcg_gen_ld_i64(var, cpu_env, offset);
1516 break;
1517 default:
1518 g_assert_not_reached();
1519 }
1520 }
1521
1522 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1523 {
1524 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1525 tcg_temp_free_i32(var);
1526 }
1527
1528 static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1529 {
1530 long offset = neon_element_offset(reg, ele, size);
1531
1532 switch (size) {
1533 case MO_8:
1534 tcg_gen_st8_i32(var, cpu_env, offset);
1535 break;
1536 case MO_16:
1537 tcg_gen_st16_i32(var, cpu_env, offset);
1538 break;
1539 case MO_32:
1540 tcg_gen_st_i32(var, cpu_env, offset);
1541 break;
1542 default:
1543 g_assert_not_reached();
1544 }
1545 }
1546
1547 static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1548 {
1549 long offset = neon_element_offset(reg, ele, size);
1550
1551 switch (size) {
1552 case MO_8:
1553 tcg_gen_st8_i64(var, cpu_env, offset);
1554 break;
1555 case MO_16:
1556 tcg_gen_st16_i64(var, cpu_env, offset);
1557 break;
1558 case MO_32:
1559 tcg_gen_st32_i64(var, cpu_env, offset);
1560 break;
1561 case MO_64:
1562 tcg_gen_st_i64(var, cpu_env, offset);
1563 break;
1564 default:
1565 g_assert_not_reached();
1566 }
1567 }
1568
1569 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1570 {
1571 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1572 }
1573
1574 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1575 {
1576 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1577 }
1578
1579 static inline void neon_load_reg32(TCGv_i32 var, int reg)
1580 {
1581 tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
1582 }
1583
1584 static inline void neon_store_reg32(TCGv_i32 var, int reg)
1585 {
1586 tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
1587 }
1588
1589 static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1590 {
1591 TCGv_ptr ret = tcg_temp_new_ptr();
1592 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1593 return ret;
1594 }
1595
1596 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1597 #define tcg_gen_st_f32 tcg_gen_st_i32
1598
1599 #define ARM_CP_RW_BIT (1 << 20)
1600
1601 /* Include the VFP decoder */
1602 #include "translate-vfp.inc.c"
1603
1604 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1605 {
1606 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1607 }
1608
1609 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1610 {
1611 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1612 }
1613
1614 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1615 {
1616 TCGv_i32 var = tcg_temp_new_i32();
1617 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1618 return var;
1619 }
1620
1621 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1622 {
1623 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1624 tcg_temp_free_i32(var);
1625 }
1626
1627 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1628 {
1629 iwmmxt_store_reg(cpu_M0, rn);
1630 }
1631
1632 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1633 {
1634 iwmmxt_load_reg(cpu_M0, rn);
1635 }
1636
1637 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1638 {
1639 iwmmxt_load_reg(cpu_V1, rn);
1640 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1641 }
1642
1643 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1644 {
1645 iwmmxt_load_reg(cpu_V1, rn);
1646 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1647 }
1648
1649 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1650 {
1651 iwmmxt_load_reg(cpu_V1, rn);
1652 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1653 }
1654
1655 #define IWMMXT_OP(name) \
1656 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1657 { \
1658 iwmmxt_load_reg(cpu_V1, rn); \
1659 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1660 }
1661
1662 #define IWMMXT_OP_ENV(name) \
1663 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1664 { \
1665 iwmmxt_load_reg(cpu_V1, rn); \
1666 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1667 }
1668
1669 #define IWMMXT_OP_ENV_SIZE(name) \
1670 IWMMXT_OP_ENV(name##b) \
1671 IWMMXT_OP_ENV(name##w) \
1672 IWMMXT_OP_ENV(name##l)
1673
1674 #define IWMMXT_OP_ENV1(name) \
1675 static inline void gen_op_iwmmxt_##name##_M0(void) \
1676 { \
1677 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1678 }
1679
1680 IWMMXT_OP(maddsq)
1681 IWMMXT_OP(madduq)
1682 IWMMXT_OP(sadb)
1683 IWMMXT_OP(sadw)
1684 IWMMXT_OP(mulslw)
1685 IWMMXT_OP(mulshw)
1686 IWMMXT_OP(mululw)
1687 IWMMXT_OP(muluhw)
1688 IWMMXT_OP(macsw)
1689 IWMMXT_OP(macuw)
1690
1691 IWMMXT_OP_ENV_SIZE(unpackl)
1692 IWMMXT_OP_ENV_SIZE(unpackh)
1693
1694 IWMMXT_OP_ENV1(unpacklub)
1695 IWMMXT_OP_ENV1(unpackluw)
1696 IWMMXT_OP_ENV1(unpacklul)
1697 IWMMXT_OP_ENV1(unpackhub)
1698 IWMMXT_OP_ENV1(unpackhuw)
1699 IWMMXT_OP_ENV1(unpackhul)
1700 IWMMXT_OP_ENV1(unpacklsb)
1701 IWMMXT_OP_ENV1(unpacklsw)
1702 IWMMXT_OP_ENV1(unpacklsl)
1703 IWMMXT_OP_ENV1(unpackhsb)
1704 IWMMXT_OP_ENV1(unpackhsw)
1705 IWMMXT_OP_ENV1(unpackhsl)
1706
1707 IWMMXT_OP_ENV_SIZE(cmpeq)
1708 IWMMXT_OP_ENV_SIZE(cmpgtu)
1709 IWMMXT_OP_ENV_SIZE(cmpgts)
1710
1711 IWMMXT_OP_ENV_SIZE(mins)
1712 IWMMXT_OP_ENV_SIZE(minu)
1713 IWMMXT_OP_ENV_SIZE(maxs)
1714 IWMMXT_OP_ENV_SIZE(maxu)
1715
1716 IWMMXT_OP_ENV_SIZE(subn)
1717 IWMMXT_OP_ENV_SIZE(addn)
1718 IWMMXT_OP_ENV_SIZE(subu)
1719 IWMMXT_OP_ENV_SIZE(addu)
1720 IWMMXT_OP_ENV_SIZE(subs)
1721 IWMMXT_OP_ENV_SIZE(adds)
1722
1723 IWMMXT_OP_ENV(avgb0)
1724 IWMMXT_OP_ENV(avgb1)
1725 IWMMXT_OP_ENV(avgw0)
1726 IWMMXT_OP_ENV(avgw1)
1727
1728 IWMMXT_OP_ENV(packuw)
1729 IWMMXT_OP_ENV(packul)
1730 IWMMXT_OP_ENV(packuq)
1731 IWMMXT_OP_ENV(packsw)
1732 IWMMXT_OP_ENV(packsl)
1733 IWMMXT_OP_ENV(packsq)
1734
1735 static void gen_op_iwmmxt_set_mup(void)
1736 {
1737 TCGv_i32 tmp;
1738 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1739 tcg_gen_ori_i32(tmp, tmp, 2);
1740 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1741 }
1742
1743 static void gen_op_iwmmxt_set_cup(void)
1744 {
1745 TCGv_i32 tmp;
1746 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1747 tcg_gen_ori_i32(tmp, tmp, 1);
1748 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1749 }
1750
1751 static void gen_op_iwmmxt_setpsr_nz(void)
1752 {
1753 TCGv_i32 tmp = tcg_temp_new_i32();
1754 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1755 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1756 }
1757
1758 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1759 {
1760 iwmmxt_load_reg(cpu_V1, rn);
1761 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1762 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1763 }
1764
1765 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1766 TCGv_i32 dest)
1767 {
1768 int rd;
1769 uint32_t offset;
1770 TCGv_i32 tmp;
1771
1772 rd = (insn >> 16) & 0xf;
1773 tmp = load_reg(s, rd);
1774
1775 offset = (insn & 0xff) << ((insn >> 7) & 2);
1776 if (insn & (1 << 24)) {
1777 /* Pre indexed */
1778 if (insn & (1 << 23))
1779 tcg_gen_addi_i32(tmp, tmp, offset);
1780 else
1781 tcg_gen_addi_i32(tmp, tmp, -offset);
1782 tcg_gen_mov_i32(dest, tmp);
1783 if (insn & (1 << 21))
1784 store_reg(s, rd, tmp);
1785 else
1786 tcg_temp_free_i32(tmp);
1787 } else if (insn & (1 << 21)) {
1788 /* Post indexed */
1789 tcg_gen_mov_i32(dest, tmp);
1790 if (insn & (1 << 23))
1791 tcg_gen_addi_i32(tmp, tmp, offset);
1792 else
1793 tcg_gen_addi_i32(tmp, tmp, -offset);
1794 store_reg(s, rd, tmp);
1795 } else if (!(insn & (1 << 23)))
1796 return 1;
1797 return 0;
1798 }
1799
1800 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1801 {
1802 int rd = (insn >> 0) & 0xf;
1803 TCGv_i32 tmp;
1804
1805 if (insn & (1 << 8)) {
1806 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1807 return 1;
1808 } else {
1809 tmp = iwmmxt_load_creg(rd);
1810 }
1811 } else {
1812 tmp = tcg_temp_new_i32();
1813 iwmmxt_load_reg(cpu_V0, rd);
1814 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1815 }
1816 tcg_gen_andi_i32(tmp, tmp, mask);
1817 tcg_gen_mov_i32(dest, tmp);
1818 tcg_temp_free_i32(tmp);
1819 return 0;
1820 }
1821
1822 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1823 (ie. an undefined instruction). */
1824 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1825 {
1826 int rd, wrd;
1827 int rdhi, rdlo, rd0, rd1, i;
1828 TCGv_i32 addr;
1829 TCGv_i32 tmp, tmp2, tmp3;
1830
1831 if ((insn & 0x0e000e00) == 0x0c000000) {
1832 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1833 wrd = insn & 0xf;
1834 rdlo = (insn >> 12) & 0xf;
1835 rdhi = (insn >> 16) & 0xf;
1836 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1837 iwmmxt_load_reg(cpu_V0, wrd);
1838 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1839 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1840 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1841 } else { /* TMCRR */
1842 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1843 iwmmxt_store_reg(cpu_V0, wrd);
1844 gen_op_iwmmxt_set_mup();
1845 }
1846 return 0;
1847 }
1848
1849 wrd = (insn >> 12) & 0xf;
1850 addr = tcg_temp_new_i32();
1851 if (gen_iwmmxt_address(s, insn, addr)) {
1852 tcg_temp_free_i32(addr);
1853 return 1;
1854 }
1855 if (insn & ARM_CP_RW_BIT) {
1856 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1857 tmp = tcg_temp_new_i32();
1858 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1859 iwmmxt_store_creg(wrd, tmp);
1860 } else {
1861 i = 1;
1862 if (insn & (1 << 8)) {
1863 if (insn & (1 << 22)) { /* WLDRD */
1864 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1865 i = 0;
1866 } else { /* WLDRW wRd */
1867 tmp = tcg_temp_new_i32();
1868 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1869 }
1870 } else {
1871 tmp = tcg_temp_new_i32();
1872 if (insn & (1 << 22)) { /* WLDRH */
1873 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1874 } else { /* WLDRB */
1875 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1876 }
1877 }
1878 if (i) {
1879 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1880 tcg_temp_free_i32(tmp);
1881 }
1882 gen_op_iwmmxt_movq_wRn_M0(wrd);
1883 }
1884 } else {
1885 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1886 tmp = iwmmxt_load_creg(wrd);
1887 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1888 } else {
1889 gen_op_iwmmxt_movq_M0_wRn(wrd);
1890 tmp = tcg_temp_new_i32();
1891 if (insn & (1 << 8)) {
1892 if (insn & (1 << 22)) { /* WSTRD */
1893 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1894 } else { /* WSTRW wRd */
1895 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1896 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1897 }
1898 } else {
1899 if (insn & (1 << 22)) { /* WSTRH */
1900 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1901 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1902 } else { /* WSTRB */
1903 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1904 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1905 }
1906 }
1907 }
1908 tcg_temp_free_i32(tmp);
1909 }
1910 tcg_temp_free_i32(addr);
1911 return 0;
1912 }
1913
1914 if ((insn & 0x0f000000) != 0x0e000000)
1915 return 1;
1916
1917 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1918 case 0x000: /* WOR */
1919 wrd = (insn >> 12) & 0xf;
1920 rd0 = (insn >> 0) & 0xf;
1921 rd1 = (insn >> 16) & 0xf;
1922 gen_op_iwmmxt_movq_M0_wRn(rd0);
1923 gen_op_iwmmxt_orq_M0_wRn(rd1);
1924 gen_op_iwmmxt_setpsr_nz();
1925 gen_op_iwmmxt_movq_wRn_M0(wrd);
1926 gen_op_iwmmxt_set_mup();
1927 gen_op_iwmmxt_set_cup();
1928 break;
1929 case 0x011: /* TMCR */
1930 if (insn & 0xf)
1931 return 1;
1932 rd = (insn >> 12) & 0xf;
1933 wrd = (insn >> 16) & 0xf;
1934 switch (wrd) {
1935 case ARM_IWMMXT_wCID:
1936 case ARM_IWMMXT_wCASF:
1937 break;
1938 case ARM_IWMMXT_wCon:
1939 gen_op_iwmmxt_set_cup();
1940 /* Fall through. */
1941 case ARM_IWMMXT_wCSSF:
1942 tmp = iwmmxt_load_creg(wrd);
1943 tmp2 = load_reg(s, rd);
1944 tcg_gen_andc_i32(tmp, tmp, tmp2);
1945 tcg_temp_free_i32(tmp2);
1946 iwmmxt_store_creg(wrd, tmp);
1947 break;
1948 case ARM_IWMMXT_wCGR0:
1949 case ARM_IWMMXT_wCGR1:
1950 case ARM_IWMMXT_wCGR2:
1951 case ARM_IWMMXT_wCGR3:
1952 gen_op_iwmmxt_set_cup();
1953 tmp = load_reg(s, rd);
1954 iwmmxt_store_creg(wrd, tmp);
1955 break;
1956 default:
1957 return 1;
1958 }
1959 break;
1960 case 0x100: /* WXOR */
1961 wrd = (insn >> 12) & 0xf;
1962 rd0 = (insn >> 0) & 0xf;
1963 rd1 = (insn >> 16) & 0xf;
1964 gen_op_iwmmxt_movq_M0_wRn(rd0);
1965 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1966 gen_op_iwmmxt_setpsr_nz();
1967 gen_op_iwmmxt_movq_wRn_M0(wrd);
1968 gen_op_iwmmxt_set_mup();
1969 gen_op_iwmmxt_set_cup();
1970 break;
1971 case 0x111: /* TMRC */
1972 if (insn & 0xf)
1973 return 1;
1974 rd = (insn >> 12) & 0xf;
1975 wrd = (insn >> 16) & 0xf;
1976 tmp = iwmmxt_load_creg(wrd);
1977 store_reg(s, rd, tmp);
1978 break;
1979 case 0x300: /* WANDN */
1980 wrd = (insn >> 12) & 0xf;
1981 rd0 = (insn >> 0) & 0xf;
1982 rd1 = (insn >> 16) & 0xf;
1983 gen_op_iwmmxt_movq_M0_wRn(rd0);
1984 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1985 gen_op_iwmmxt_andq_M0_wRn(rd1);
1986 gen_op_iwmmxt_setpsr_nz();
1987 gen_op_iwmmxt_movq_wRn_M0(wrd);
1988 gen_op_iwmmxt_set_mup();
1989 gen_op_iwmmxt_set_cup();
1990 break;
1991 case 0x200: /* WAND */
1992 wrd = (insn >> 12) & 0xf;
1993 rd0 = (insn >> 0) & 0xf;
1994 rd1 = (insn >> 16) & 0xf;
1995 gen_op_iwmmxt_movq_M0_wRn(rd0);
1996 gen_op_iwmmxt_andq_M0_wRn(rd1);
1997 gen_op_iwmmxt_setpsr_nz();
1998 gen_op_iwmmxt_movq_wRn_M0(wrd);
1999 gen_op_iwmmxt_set_mup();
2000 gen_op_iwmmxt_set_cup();
2001 break;
2002 case 0x810: case 0xa10: /* WMADD */
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 0) & 0xf;
2005 rd1 = (insn >> 16) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
2007 if (insn & (1 << 21))
2008 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2009 else
2010 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2011 gen_op_iwmmxt_movq_wRn_M0(wrd);
2012 gen_op_iwmmxt_set_mup();
2013 break;
2014 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2015 wrd = (insn >> 12) & 0xf;
2016 rd0 = (insn >> 16) & 0xf;
2017 rd1 = (insn >> 0) & 0xf;
2018 gen_op_iwmmxt_movq_M0_wRn(rd0);
2019 switch ((insn >> 22) & 3) {
2020 case 0:
2021 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2022 break;
2023 case 1:
2024 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2025 break;
2026 case 2:
2027 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2028 break;
2029 case 3:
2030 return 1;
2031 }
2032 gen_op_iwmmxt_movq_wRn_M0(wrd);
2033 gen_op_iwmmxt_set_mup();
2034 gen_op_iwmmxt_set_cup();
2035 break;
2036 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2037 wrd = (insn >> 12) & 0xf;
2038 rd0 = (insn >> 16) & 0xf;
2039 rd1 = (insn >> 0) & 0xf;
2040 gen_op_iwmmxt_movq_M0_wRn(rd0);
2041 switch ((insn >> 22) & 3) {
2042 case 0:
2043 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2044 break;
2045 case 1:
2046 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2047 break;
2048 case 2:
2049 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2050 break;
2051 case 3:
2052 return 1;
2053 }
2054 gen_op_iwmmxt_movq_wRn_M0(wrd);
2055 gen_op_iwmmxt_set_mup();
2056 gen_op_iwmmxt_set_cup();
2057 break;
2058 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 rd1 = (insn >> 0) & 0xf;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0);
2063 if (insn & (1 << 22))
2064 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2065 else
2066 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2067 if (!(insn & (1 << 20)))
2068 gen_op_iwmmxt_addl_M0_wRn(wrd);
2069 gen_op_iwmmxt_movq_wRn_M0(wrd);
2070 gen_op_iwmmxt_set_mup();
2071 break;
2072 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2073 wrd = (insn >> 12) & 0xf;
2074 rd0 = (insn >> 16) & 0xf;
2075 rd1 = (insn >> 0) & 0xf;
2076 gen_op_iwmmxt_movq_M0_wRn(rd0);
2077 if (insn & (1 << 21)) {
2078 if (insn & (1 << 20))
2079 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2080 else
2081 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2082 } else {
2083 if (insn & (1 << 20))
2084 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2085 else
2086 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2087 }
2088 gen_op_iwmmxt_movq_wRn_M0(wrd);
2089 gen_op_iwmmxt_set_mup();
2090 break;
2091 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2092 wrd = (insn >> 12) & 0xf;
2093 rd0 = (insn >> 16) & 0xf;
2094 rd1 = (insn >> 0) & 0xf;
2095 gen_op_iwmmxt_movq_M0_wRn(rd0);
2096 if (insn & (1 << 21))
2097 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2098 else
2099 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2100 if (!(insn & (1 << 20))) {
2101 iwmmxt_load_reg(cpu_V1, wrd);
2102 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
2103 }
2104 gen_op_iwmmxt_movq_wRn_M0(wrd);
2105 gen_op_iwmmxt_set_mup();
2106 break;
2107 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2108 wrd = (insn >> 12) & 0xf;
2109 rd0 = (insn >> 16) & 0xf;
2110 rd1 = (insn >> 0) & 0xf;
2111 gen_op_iwmmxt_movq_M0_wRn(rd0);
2112 switch ((insn >> 22) & 3) {
2113 case 0:
2114 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2115 break;
2116 case 1:
2117 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2118 break;
2119 case 2:
2120 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2121 break;
2122 case 3:
2123 return 1;
2124 }
2125 gen_op_iwmmxt_movq_wRn_M0(wrd);
2126 gen_op_iwmmxt_set_mup();
2127 gen_op_iwmmxt_set_cup();
2128 break;
2129 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2130 wrd = (insn >> 12) & 0xf;
2131 rd0 = (insn >> 16) & 0xf;
2132 rd1 = (insn >> 0) & 0xf;
2133 gen_op_iwmmxt_movq_M0_wRn(rd0);
2134 if (insn & (1 << 22)) {
2135 if (insn & (1 << 20))
2136 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2137 else
2138 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2139 } else {
2140 if (insn & (1 << 20))
2141 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2142 else
2143 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2144 }
2145 gen_op_iwmmxt_movq_wRn_M0(wrd);
2146 gen_op_iwmmxt_set_mup();
2147 gen_op_iwmmxt_set_cup();
2148 break;
2149 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2150 wrd = (insn >> 12) & 0xf;
2151 rd0 = (insn >> 16) & 0xf;
2152 rd1 = (insn >> 0) & 0xf;
2153 gen_op_iwmmxt_movq_M0_wRn(rd0);
2154 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2155 tcg_gen_andi_i32(tmp, tmp, 7);
2156 iwmmxt_load_reg(cpu_V1, rd1);
2157 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2158 tcg_temp_free_i32(tmp);
2159 gen_op_iwmmxt_movq_wRn_M0(wrd);
2160 gen_op_iwmmxt_set_mup();
2161 break;
2162 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2163 if (((insn >> 6) & 3) == 3)
2164 return 1;
2165 rd = (insn >> 12) & 0xf;
2166 wrd = (insn >> 16) & 0xf;
2167 tmp = load_reg(s, rd);
2168 gen_op_iwmmxt_movq_M0_wRn(wrd);
2169 switch ((insn >> 6) & 3) {
2170 case 0:
2171 tmp2 = tcg_const_i32(0xff);
2172 tmp3 = tcg_const_i32((insn & 7) << 3);
2173 break;
2174 case 1:
2175 tmp2 = tcg_const_i32(0xffff);
2176 tmp3 = tcg_const_i32((insn & 3) << 4);
2177 break;
2178 case 2:
2179 tmp2 = tcg_const_i32(0xffffffff);
2180 tmp3 = tcg_const_i32((insn & 1) << 5);
2181 break;
2182 default:
2183 tmp2 = NULL;
2184 tmp3 = NULL;
2185 }
2186 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2187 tcg_temp_free_i32(tmp3);
2188 tcg_temp_free_i32(tmp2);
2189 tcg_temp_free_i32(tmp);
2190 gen_op_iwmmxt_movq_wRn_M0(wrd);
2191 gen_op_iwmmxt_set_mup();
2192 break;
2193 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2194 rd = (insn >> 12) & 0xf;
2195 wrd = (insn >> 16) & 0xf;
2196 if (rd == 15 || ((insn >> 22) & 3) == 3)
2197 return 1;
2198 gen_op_iwmmxt_movq_M0_wRn(wrd);
2199 tmp = tcg_temp_new_i32();
2200 switch ((insn >> 22) & 3) {
2201 case 0:
2202 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2203 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2204 if (insn & 8) {
2205 tcg_gen_ext8s_i32(tmp, tmp);
2206 } else {
2207 tcg_gen_andi_i32(tmp, tmp, 0xff);
2208 }
2209 break;
2210 case 1:
2211 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2212 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2213 if (insn & 8) {
2214 tcg_gen_ext16s_i32(tmp, tmp);
2215 } else {
2216 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2217 }
2218 break;
2219 case 2:
2220 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2221 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2222 break;
2223 }
2224 store_reg(s, rd, tmp);
2225 break;
2226 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2227 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2228 return 1;
2229 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2230 switch ((insn >> 22) & 3) {
2231 case 0:
2232 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2233 break;
2234 case 1:
2235 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2236 break;
2237 case 2:
2238 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2239 break;
2240 }
2241 tcg_gen_shli_i32(tmp, tmp, 28);
2242 gen_set_nzcv(tmp);
2243 tcg_temp_free_i32(tmp);
2244 break;
2245 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2246 if (((insn >> 6) & 3) == 3)
2247 return 1;
2248 rd = (insn >> 12) & 0xf;
2249 wrd = (insn >> 16) & 0xf;
2250 tmp = load_reg(s, rd);
2251 switch ((insn >> 6) & 3) {
2252 case 0:
2253 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2254 break;
2255 case 1:
2256 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2257 break;
2258 case 2:
2259 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2260 break;
2261 }
2262 tcg_temp_free_i32(tmp);
2263 gen_op_iwmmxt_movq_wRn_M0(wrd);
2264 gen_op_iwmmxt_set_mup();
2265 break;
2266 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2267 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2268 return 1;
2269 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2270 tmp2 = tcg_temp_new_i32();
2271 tcg_gen_mov_i32(tmp2, tmp);
2272 switch ((insn >> 22) & 3) {
2273 case 0:
2274 for (i = 0; i < 7; i ++) {
2275 tcg_gen_shli_i32(tmp2, tmp2, 4);
2276 tcg_gen_and_i32(tmp, tmp, tmp2);
2277 }
2278 break;
2279 case 1:
2280 for (i = 0; i < 3; i ++) {
2281 tcg_gen_shli_i32(tmp2, tmp2, 8);
2282 tcg_gen_and_i32(tmp, tmp, tmp2);
2283 }
2284 break;
2285 case 2:
2286 tcg_gen_shli_i32(tmp2, tmp2, 16);
2287 tcg_gen_and_i32(tmp, tmp, tmp2);
2288 break;
2289 }
2290 gen_set_nzcv(tmp);
2291 tcg_temp_free_i32(tmp2);
2292 tcg_temp_free_i32(tmp);
2293 break;
2294 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2295 wrd = (insn >> 12) & 0xf;
2296 rd0 = (insn >> 16) & 0xf;
2297 gen_op_iwmmxt_movq_M0_wRn(rd0);
2298 switch ((insn >> 22) & 3) {
2299 case 0:
2300 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2301 break;
2302 case 1:
2303 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2304 break;
2305 case 2:
2306 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2307 break;
2308 case 3:
2309 return 1;
2310 }
2311 gen_op_iwmmxt_movq_wRn_M0(wrd);
2312 gen_op_iwmmxt_set_mup();
2313 break;
2314 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2315 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2316 return 1;
2317 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2318 tmp2 = tcg_temp_new_i32();
2319 tcg_gen_mov_i32(tmp2, tmp);
2320 switch ((insn >> 22) & 3) {
2321 case 0:
2322 for (i = 0; i < 7; i ++) {
2323 tcg_gen_shli_i32(tmp2, tmp2, 4);
2324 tcg_gen_or_i32(tmp, tmp, tmp2);
2325 }
2326 break;
2327 case 1:
2328 for (i = 0; i < 3; i ++) {
2329 tcg_gen_shli_i32(tmp2, tmp2, 8);
2330 tcg_gen_or_i32(tmp, tmp, tmp2);
2331 }
2332 break;
2333 case 2:
2334 tcg_gen_shli_i32(tmp2, tmp2, 16);
2335 tcg_gen_or_i32(tmp, tmp, tmp2);
2336 break;
2337 }
2338 gen_set_nzcv(tmp);
2339 tcg_temp_free_i32(tmp2);
2340 tcg_temp_free_i32(tmp);
2341 break;
2342 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2343 rd = (insn >> 12) & 0xf;
2344 rd0 = (insn >> 16) & 0xf;
2345 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2346 return 1;
2347 gen_op_iwmmxt_movq_M0_wRn(rd0);
2348 tmp = tcg_temp_new_i32();
2349 switch ((insn >> 22) & 3) {
2350 case 0:
2351 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2352 break;
2353 case 1:
2354 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2355 break;
2356 case 2:
2357 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2358 break;
2359 }
2360 store_reg(s, rd, tmp);
2361 break;
2362 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2363 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2364 wrd = (insn >> 12) & 0xf;
2365 rd0 = (insn >> 16) & 0xf;
2366 rd1 = (insn >> 0) & 0xf;
2367 gen_op_iwmmxt_movq_M0_wRn(rd0);
2368 switch ((insn >> 22) & 3) {
2369 case 0:
2370 if (insn & (1 << 21))
2371 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2372 else
2373 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2374 break;
2375 case 1:
2376 if (insn & (1 << 21))
2377 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2378 else
2379 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2380 break;
2381 case 2:
2382 if (insn & (1 << 21))
2383 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2384 else
2385 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2386 break;
2387 case 3:
2388 return 1;
2389 }
2390 gen_op_iwmmxt_movq_wRn_M0(wrd);
2391 gen_op_iwmmxt_set_mup();
2392 gen_op_iwmmxt_set_cup();
2393 break;
2394 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2395 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2396 wrd = (insn >> 12) & 0xf;
2397 rd0 = (insn >> 16) & 0xf;
2398 gen_op_iwmmxt_movq_M0_wRn(rd0);
2399 switch ((insn >> 22) & 3) {
2400 case 0:
2401 if (insn & (1 << 21))
2402 gen_op_iwmmxt_unpacklsb_M0();
2403 else
2404 gen_op_iwmmxt_unpacklub_M0();
2405 break;
2406 case 1:
2407 if (insn & (1 << 21))
2408 gen_op_iwmmxt_unpacklsw_M0();
2409 else
2410 gen_op_iwmmxt_unpackluw_M0();
2411 break;
2412 case 2:
2413 if (insn & (1 << 21))
2414 gen_op_iwmmxt_unpacklsl_M0();
2415 else
2416 gen_op_iwmmxt_unpacklul_M0();
2417 break;
2418 case 3:
2419 return 1;
2420 }
2421 gen_op_iwmmxt_movq_wRn_M0(wrd);
2422 gen_op_iwmmxt_set_mup();
2423 gen_op_iwmmxt_set_cup();
2424 break;
2425 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2426 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2427 wrd = (insn >> 12) & 0xf;
2428 rd0 = (insn >> 16) & 0xf;
2429 gen_op_iwmmxt_movq_M0_wRn(rd0);
2430 switch ((insn >> 22) & 3) {
2431 case 0:
2432 if (insn & (1 << 21))
2433 gen_op_iwmmxt_unpackhsb_M0();
2434 else
2435 gen_op_iwmmxt_unpackhub_M0();
2436 break;
2437 case 1:
2438 if (insn & (1 << 21))
2439 gen_op_iwmmxt_unpackhsw_M0();
2440 else
2441 gen_op_iwmmxt_unpackhuw_M0();
2442 break;
2443 case 2:
2444 if (insn & (1 << 21))
2445 gen_op_iwmmxt_unpackhsl_M0();
2446 else
2447 gen_op_iwmmxt_unpackhul_M0();
2448 break;
2449 case 3:
2450 return 1;
2451 }
2452 gen_op_iwmmxt_movq_wRn_M0(wrd);
2453 gen_op_iwmmxt_set_mup();
2454 gen_op_iwmmxt_set_cup();
2455 break;
2456 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2457 case 0x214: case 0x614: case 0xa14: case 0xe14:
2458 if (((insn >> 22) & 3) == 0)
2459 return 1;
2460 wrd = (insn >> 12) & 0xf;
2461 rd0 = (insn >> 16) & 0xf;
2462 gen_op_iwmmxt_movq_M0_wRn(rd0);
2463 tmp = tcg_temp_new_i32();
2464 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2465 tcg_temp_free_i32(tmp);
2466 return 1;
2467 }
2468 switch ((insn >> 22) & 3) {
2469 case 1:
2470 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2471 break;
2472 case 2:
2473 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2474 break;
2475 case 3:
2476 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2477 break;
2478 }
2479 tcg_temp_free_i32(tmp);
2480 gen_op_iwmmxt_movq_wRn_M0(wrd);
2481 gen_op_iwmmxt_set_mup();
2482 gen_op_iwmmxt_set_cup();
2483 break;
2484 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2485 case 0x014: case 0x414: case 0x814: case 0xc14:
2486 if (((insn >> 22) & 3) == 0)
2487 return 1;
2488 wrd = (insn >> 12) & 0xf;
2489 rd0 = (insn >> 16) & 0xf;
2490 gen_op_iwmmxt_movq_M0_wRn(rd0);
2491 tmp = tcg_temp_new_i32();
2492 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2493 tcg_temp_free_i32(tmp);
2494 return 1;
2495 }
2496 switch ((insn >> 22) & 3) {
2497 case 1:
2498 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2499 break;
2500 case 2:
2501 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2502 break;
2503 case 3:
2504 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2505 break;
2506 }
2507 tcg_temp_free_i32(tmp);
2508 gen_op_iwmmxt_movq_wRn_M0(wrd);
2509 gen_op_iwmmxt_set_mup();
2510 gen_op_iwmmxt_set_cup();
2511 break;
2512 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2513 case 0x114: case 0x514: case 0x914: case 0xd14:
2514 if (((insn >> 22) & 3) == 0)
2515 return 1;
2516 wrd = (insn >> 12) & 0xf;
2517 rd0 = (insn >> 16) & 0xf;
2518 gen_op_iwmmxt_movq_M0_wRn(rd0);
2519 tmp = tcg_temp_new_i32();
2520 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2521 tcg_temp_free_i32(tmp);
2522 return 1;
2523 }
2524 switch ((insn >> 22) & 3) {
2525 case 1:
2526 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2527 break;
2528 case 2:
2529 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2530 break;
2531 case 3:
2532 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2533 break;
2534 }
2535 tcg_temp_free_i32(tmp);
2536 gen_op_iwmmxt_movq_wRn_M0(wrd);
2537 gen_op_iwmmxt_set_mup();
2538 gen_op_iwmmxt_set_cup();
2539 break;
2540 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2541 case 0x314: case 0x714: case 0xb14: case 0xf14:
2542 if (((insn >> 22) & 3) == 0)
2543 return 1;
2544 wrd = (insn >> 12) & 0xf;
2545 rd0 = (insn >> 16) & 0xf;
2546 gen_op_iwmmxt_movq_M0_wRn(rd0);
2547 tmp = tcg_temp_new_i32();
2548 switch ((insn >> 22) & 3) {
2549 case 1:
2550 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2551 tcg_temp_free_i32(tmp);
2552 return 1;
2553 }
2554 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2555 break;
2556 case 2:
2557 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2558 tcg_temp_free_i32(tmp);
2559 return 1;
2560 }
2561 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2562 break;
2563 case 3:
2564 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2565 tcg_temp_free_i32(tmp);
2566 return 1;
2567 }
2568 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2569 break;
2570 }
2571 tcg_temp_free_i32(tmp);
2572 gen_op_iwmmxt_movq_wRn_M0(wrd);
2573 gen_op_iwmmxt_set_mup();
2574 gen_op_iwmmxt_set_cup();
2575 break;
2576 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2577 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2578 wrd = (insn >> 12) & 0xf;
2579 rd0 = (insn >> 16) & 0xf;
2580 rd1 = (insn >> 0) & 0xf;
2581 gen_op_iwmmxt_movq_M0_wRn(rd0);
2582 switch ((insn >> 22) & 3) {
2583 case 0:
2584 if (insn & (1 << 21))
2585 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2586 else
2587 gen_op_iwmmxt_minub_M0_wRn(rd1);
2588 break;
2589 case 1:
2590 if (insn & (1 << 21))
2591 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2592 else
2593 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2594 break;
2595 case 2:
2596 if (insn & (1 << 21))
2597 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2598 else
2599 gen_op_iwmmxt_minul_M0_wRn(rd1);
2600 break;
2601 case 3:
2602 return 1;
2603 }
2604 gen_op_iwmmxt_movq_wRn_M0(wrd);
2605 gen_op_iwmmxt_set_mup();
2606 break;
2607 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2608 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2609 wrd = (insn >> 12) & 0xf;
2610 rd0 = (insn >> 16) & 0xf;
2611 rd1 = (insn >> 0) & 0xf;
2612 gen_op_iwmmxt_movq_M0_wRn(rd0);
2613 switch ((insn >> 22) & 3) {
2614 case 0:
2615 if (insn & (1 << 21))
2616 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2617 else
2618 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2619 break;
2620 case 1:
2621 if (insn & (1 << 21))
2622 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2623 else
2624 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2625 break;
2626 case 2:
2627 if (insn & (1 << 21))
2628 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2629 else
2630 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2631 break;
2632 case 3:
2633 return 1;
2634 }
2635 gen_op_iwmmxt_movq_wRn_M0(wrd);
2636 gen_op_iwmmxt_set_mup();
2637 break;
2638 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2639 case 0x402: case 0x502: case 0x602: case 0x702:
2640 wrd = (insn >> 12) & 0xf;
2641 rd0 = (insn >> 16) & 0xf;
2642 rd1 = (insn >> 0) & 0xf;
2643 gen_op_iwmmxt_movq_M0_wRn(rd0);
2644 tmp = tcg_const_i32((insn >> 20) & 3);
2645 iwmmxt_load_reg(cpu_V1, rd1);
2646 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2647 tcg_temp_free_i32(tmp);
2648 gen_op_iwmmxt_movq_wRn_M0(wrd);
2649 gen_op_iwmmxt_set_mup();
2650 break;
2651 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2652 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2653 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2654 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2655 wrd = (insn >> 12) & 0xf;
2656 rd0 = (insn >> 16) & 0xf;
2657 rd1 = (insn >> 0) & 0xf;
2658 gen_op_iwmmxt_movq_M0_wRn(rd0);
2659 switch ((insn >> 20) & 0xf) {
2660 case 0x0:
2661 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2662 break;
2663 case 0x1:
2664 gen_op_iwmmxt_subub_M0_wRn(rd1);
2665 break;
2666 case 0x3:
2667 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2668 break;
2669 case 0x4:
2670 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2671 break;
2672 case 0x5:
2673 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2674 break;
2675 case 0x7:
2676 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2677 break;
2678 case 0x8:
2679 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2680 break;
2681 case 0x9:
2682 gen_op_iwmmxt_subul_M0_wRn(rd1);
2683 break;
2684 case 0xb:
2685 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2686 break;
2687 default:
2688 return 1;
2689 }
2690 gen_op_iwmmxt_movq_wRn_M0(wrd);
2691 gen_op_iwmmxt_set_mup();
2692 gen_op_iwmmxt_set_cup();
2693 break;
2694 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2695 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2696 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2697 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2698 wrd = (insn >> 12) & 0xf;
2699 rd0 = (insn >> 16) & 0xf;
2700 gen_op_iwmmxt_movq_M0_wRn(rd0);
2701 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2702 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2703 tcg_temp_free_i32(tmp);
2704 gen_op_iwmmxt_movq_wRn_M0(wrd);
2705 gen_op_iwmmxt_set_mup();
2706 gen_op_iwmmxt_set_cup();
2707 break;
2708 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2709 case 0x418: case 0x518: case 0x618: case 0x718:
2710 case 0x818: case 0x918: case 0xa18: case 0xb18:
2711 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2712 wrd = (insn >> 12) & 0xf;
2713 rd0 = (insn >> 16) & 0xf;
2714 rd1 = (insn >> 0) & 0xf;
2715 gen_op_iwmmxt_movq_M0_wRn(rd0);
2716 switch ((insn >> 20) & 0xf) {
2717 case 0x0:
2718 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2719 break;
2720 case 0x1:
2721 gen_op_iwmmxt_addub_M0_wRn(rd1);
2722 break;
2723 case 0x3:
2724 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2725 break;
2726 case 0x4:
2727 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2728 break;
2729 case 0x5:
2730 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2731 break;
2732 case 0x7:
2733 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2734 break;
2735 case 0x8:
2736 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2737 break;
2738 case 0x9:
2739 gen_op_iwmmxt_addul_M0_wRn(rd1);
2740 break;
2741 case 0xb:
2742 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2743 break;
2744 default:
2745 return 1;
2746 }
2747 gen_op_iwmmxt_movq_wRn_M0(wrd);
2748 gen_op_iwmmxt_set_mup();
2749 gen_op_iwmmxt_set_cup();
2750 break;
2751 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2752 case 0x408: case 0x508: case 0x608: case 0x708:
2753 case 0x808: case 0x908: case 0xa08: case 0xb08:
2754 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2755 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2756 return 1;
2757 wrd = (insn >> 12) & 0xf;
2758 rd0 = (insn >> 16) & 0xf;
2759 rd1 = (insn >> 0) & 0xf;
2760 gen_op_iwmmxt_movq_M0_wRn(rd0);
2761 switch ((insn >> 22) & 3) {
2762 case 1:
2763 if (insn & (1 << 21))
2764 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2765 else
2766 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2767 break;
2768 case 2:
2769 if (insn & (1 << 21))
2770 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2771 else
2772 gen_op_iwmmxt_packul_M0_wRn(rd1);
2773 break;
2774 case 3:
2775 if (insn & (1 << 21))
2776 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2777 else
2778 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2779 break;
2780 }
2781 gen_op_iwmmxt_movq_wRn_M0(wrd);
2782 gen_op_iwmmxt_set_mup();
2783 gen_op_iwmmxt_set_cup();
2784 break;
2785 case 0x201: case 0x203: case 0x205: case 0x207:
2786 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2787 case 0x211: case 0x213: case 0x215: case 0x217:
2788 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2789 wrd = (insn >> 5) & 0xf;
2790 rd0 = (insn >> 12) & 0xf;
2791 rd1 = (insn >> 0) & 0xf;
2792 if (rd0 == 0xf || rd1 == 0xf)
2793 return 1;
2794 gen_op_iwmmxt_movq_M0_wRn(wrd);
2795 tmp = load_reg(s, rd0);
2796 tmp2 = load_reg(s, rd1);
2797 switch ((insn >> 16) & 0xf) {
2798 case 0x0: /* TMIA */
2799 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2800 break;
2801 case 0x8: /* TMIAPH */
2802 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2803 break;
2804 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2805 if (insn & (1 << 16))
2806 tcg_gen_shri_i32(tmp, tmp, 16);
2807 if (insn & (1 << 17))
2808 tcg_gen_shri_i32(tmp2, tmp2, 16);
2809 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2810 break;
2811 default:
2812 tcg_temp_free_i32(tmp2);
2813 tcg_temp_free_i32(tmp);
2814 return 1;
2815 }
2816 tcg_temp_free_i32(tmp2);
2817 tcg_temp_free_i32(tmp);
2818 gen_op_iwmmxt_movq_wRn_M0(wrd);
2819 gen_op_iwmmxt_set_mup();
2820 break;
2821 default:
2822 return 1;
2823 }
2824
2825 return 0;
2826 }
2827
2828 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2829 (ie. an undefined instruction). */
2830 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2831 {
2832 int acc, rd0, rd1, rdhi, rdlo;
2833 TCGv_i32 tmp, tmp2;
2834
2835 if ((insn & 0x0ff00f10) == 0x0e200010) {
2836 /* Multiply with Internal Accumulate Format */
2837 rd0 = (insn >> 12) & 0xf;
2838 rd1 = insn & 0xf;
2839 acc = (insn >> 5) & 7;
2840
2841 if (acc != 0)
2842 return 1;
2843
2844 tmp = load_reg(s, rd0);
2845 tmp2 = load_reg(s, rd1);
2846 switch ((insn >> 16) & 0xf) {
2847 case 0x0: /* MIA */
2848 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2849 break;
2850 case 0x8: /* MIAPH */
2851 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2852 break;
2853 case 0xc: /* MIABB */
2854 case 0xd: /* MIABT */
2855 case 0xe: /* MIATB */
2856 case 0xf: /* MIATT */
2857 if (insn & (1 << 16))
2858 tcg_gen_shri_i32(tmp, tmp, 16);
2859 if (insn & (1 << 17))
2860 tcg_gen_shri_i32(tmp2, tmp2, 16);
2861 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2862 break;
2863 default:
2864 return 1;
2865 }
2866 tcg_temp_free_i32(tmp2);
2867 tcg_temp_free_i32(tmp);
2868
2869 gen_op_iwmmxt_movq_wRn_M0(acc);
2870 return 0;
2871 }
2872
2873 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2874 /* Internal Accumulator Access Format */
2875 rdhi = (insn >> 16) & 0xf;
2876 rdlo = (insn >> 12) & 0xf;
2877 acc = insn & 7;
2878
2879 if (acc != 0)
2880 return 1;
2881
2882 if (insn & ARM_CP_RW_BIT) { /* MRA */
2883 iwmmxt_load_reg(cpu_V0, acc);
2884 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2885 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2886 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2887 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2888 } else { /* MAR */
2889 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2890 iwmmxt_store_reg(cpu_V0, acc);
2891 }
2892 return 0;
2893 }
2894
2895 return 1;
2896 }
2897
2898 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2899 #define VFP_SREG(insn, bigbit, smallbit) \
2900 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2901 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2902 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2903 reg = (((insn) >> (bigbit)) & 0x0f) \
2904 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2905 } else { \
2906 if (insn & (1 << (smallbit))) \
2907 return 1; \
2908 reg = ((insn) >> (bigbit)) & 0x0f; \
2909 }} while (0)
2910
2911 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2912 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2913 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2914 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2915 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2916 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2917
2918 static void gen_neon_dup_low16(TCGv_i32 var)
2919 {
2920 TCGv_i32 tmp = tcg_temp_new_i32();
2921 tcg_gen_ext16u_i32(var, var);
2922 tcg_gen_shli_i32(tmp, var, 16);
2923 tcg_gen_or_i32(var, var, tmp);
2924 tcg_temp_free_i32(tmp);
2925 }
2926
2927 static void gen_neon_dup_high16(TCGv_i32 var)
2928 {
2929 TCGv_i32 tmp = tcg_temp_new_i32();
2930 tcg_gen_andi_i32(var, var, 0xffff0000);
2931 tcg_gen_shri_i32(tmp, var, 16);
2932 tcg_gen_or_i32(var, var, tmp);
2933 tcg_temp_free_i32(tmp);
2934 }
2935
2936 /*
2937 * Disassemble a VFP instruction. Returns nonzero if an error occurred
2938 * (ie. an undefined instruction).
2939 */
2940 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
2941 {
2942 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
2943 return 1;
2944 }
2945
2946 /*
2947 * If the decodetree decoder handles this insn it will always
2948 * emit code to either execute the insn or generate an appropriate
2949 * exception; so we don't need to ever return non-zero to tell
2950 * the calling code to emit an UNDEF exception.
2951 */
2952 if (extract32(insn, 28, 4) == 0xf) {
2953 if (disas_vfp_uncond(s, insn)) {
2954 return 0;
2955 }
2956 } else {
2957 if (disas_vfp(s, insn)) {
2958 return 0;
2959 }
2960 }
2961 /* If the decodetree decoder didn't handle this insn, it must be UNDEF */
2962 return 1;
2963 }
2964
2965 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
2966 {
2967 #ifndef CONFIG_USER_ONLY
2968 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
2969 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
2970 #else
2971 return true;
2972 #endif
2973 }
2974
2975 static void gen_goto_ptr(void)
2976 {
2977 tcg_gen_lookup_and_goto_ptr();
2978 }
2979
2980 /* This will end the TB but doesn't guarantee we'll return to
2981 * cpu_loop_exec. Any live exit_requests will be processed as we
2982 * enter the next TB.
2983 */
2984 static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
2985 {
2986 if (use_goto_tb(s, dest)) {
2987 tcg_gen_goto_tb(n);
2988 gen_set_pc_im(s, dest);
2989 tcg_gen_exit_tb(s->base.tb, n);
2990 } else {
2991 gen_set_pc_im(s, dest);
2992 gen_goto_ptr();
2993 }
2994 s->base.is_jmp = DISAS_NORETURN;
2995 }
2996
2997 static inline void gen_jmp (DisasContext *s, uint32_t dest)
2998 {
2999 if (unlikely(is_singlestepping(s))) {
3000 /* An indirect jump so that we still trigger the debug exception. */
3001 if (s->thumb)
3002 dest |= 1;
3003 gen_bx_im(s, dest);
3004 } else {
3005 gen_goto_tb(s, 0, dest);
3006 }
3007 }
3008
3009 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
3010 {
3011 if (x)
3012 tcg_gen_sari_i32(t0, t0, 16);
3013 else
3014 gen_sxth(t0);
3015 if (y)
3016 tcg_gen_sari_i32(t1, t1, 16);
3017 else
3018 gen_sxth(t1);
3019 tcg_gen_mul_i32(t0, t0, t1);
3020 }
3021
3022 /* Return the mask of PSR bits set by a MSR instruction. */
3023 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
3024 {
3025 uint32_t mask;
3026
3027 mask = 0;
3028 if (flags & (1 << 0))
3029 mask |= 0xff;
3030 if (flags & (1 << 1))
3031 mask |= 0xff00;
3032 if (flags & (1 << 2))
3033 mask |= 0xff0000;
3034 if (flags & (1 << 3))
3035 mask |= 0xff000000;
3036
3037 /* Mask out undefined bits. */
3038 mask &= ~CPSR_RESERVED;
3039 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
3040 mask &= ~CPSR_T;
3041 }
3042 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
3043 mask &= ~CPSR_Q; /* V5TE in reality*/
3044 }
3045 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
3046 mask &= ~(CPSR_E | CPSR_GE);
3047 }
3048 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
3049 mask &= ~CPSR_IT;
3050 }
3051 /* Mask out execution state and reserved bits. */
3052 if (!spsr) {
3053 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
3054 }
3055 /* Mask out privileged bits. */
3056 if (IS_USER(s))
3057 mask &= CPSR_USER;
3058 return mask;
3059 }
3060
3061 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3062 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
3063 {
3064 TCGv_i32 tmp;
3065 if (spsr) {
3066 /* ??? This is also undefined in system mode. */
3067 if (IS_USER(s))
3068 return 1;
3069
3070 tmp = load_cpu_field(spsr);
3071 tcg_gen_andi_i32(tmp, tmp, ~mask);
3072 tcg_gen_andi_i32(t0, t0, mask);
3073 tcg_gen_or_i32(tmp, tmp, t0);
3074 store_cpu_field(tmp, spsr);
3075 } else {
3076 gen_set_cpsr(t0, mask);
3077 }
3078 tcg_temp_free_i32(t0);
3079 gen_lookup_tb(s);
3080 return 0;
3081 }
3082
3083 /* Returns nonzero if access to the PSR is not permitted. */
3084 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3085 {
3086 TCGv_i32 tmp;
3087 tmp = tcg_temp_new_i32();
3088 tcg_gen_movi_i32(tmp, val);
3089 return gen_set_psr(s, mask, spsr, tmp);
3090 }
3091
3092 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
3093 int *tgtmode, int *regno)
3094 {
3095 /* Decode the r and sysm fields of MSR/MRS banked accesses into
3096 * the target mode and register number, and identify the various
3097 * unpredictable cases.
3098 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
3099 * + executed in user mode
3100 * + using R15 as the src/dest register
3101 * + accessing an unimplemented register
3102 * + accessing a register that's inaccessible at current PL/security state*
3103 * + accessing a register that you could access with a different insn
3104 * We choose to UNDEF in all these cases.
3105 * Since we don't know which of the various AArch32 modes we are in
3106 * we have to defer some checks to runtime.
3107 * Accesses to Monitor mode registers from Secure EL1 (which implies
3108 * that EL3 is AArch64) must trap to EL3.
3109 *
3110 * If the access checks fail this function will emit code to take
3111 * an exception and return false. Otherwise it will return true,
3112 * and set *tgtmode and *regno appropriately.
3113 */
3114 int exc_target = default_exception_el(s);
3115
3116 /* These instructions are present only in ARMv8, or in ARMv7 with the
3117 * Virtualization Extensions.
3118 */
3119 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
3120 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
3121 goto undef;
3122 }
3123
3124 if (IS_USER(s) || rn == 15) {
3125 goto undef;
3126 }
3127
3128 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
3129 * of registers into (r, sysm).
3130 */
3131 if (r) {
3132 /* SPSRs for other modes */
3133 switch (sysm) {
3134 case 0xe: /* SPSR_fiq */
3135 *tgtmode = ARM_CPU_MODE_FIQ;
3136 break;
3137 case 0x10: /* SPSR_irq */
3138 *tgtmode = ARM_CPU_MODE_IRQ;
3139 break;
3140 case 0x12: /* SPSR_svc */
3141 *tgtmode = ARM_CPU_MODE_SVC;
3142 break;
3143 case 0x14: /* SPSR_abt */
3144 *tgtmode = ARM_CPU_MODE_ABT;
3145 break;
3146 case 0x16: /* SPSR_und */
3147 *tgtmode = ARM_CPU_MODE_UND;
3148 break;
3149 case 0x1c: /* SPSR_mon */
3150 *tgtmode = ARM_CPU_MODE_MON;
3151 break;
3152 case 0x1e: /* SPSR_hyp */
3153 *tgtmode = ARM_CPU_MODE_HYP;
3154 break;
3155 default: /* unallocated */
3156 goto undef;
3157 }
3158 /* We arbitrarily assign SPSR a register number of 16. */
3159 *regno = 16;
3160 } else {
3161 /* general purpose registers for other modes */
3162 switch (sysm) {
3163 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
3164 *tgtmode = ARM_CPU_MODE_USR;
3165 *regno = sysm + 8;
3166 break;
3167 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
3168 *tgtmode = ARM_CPU_MODE_FIQ;
3169 *regno = sysm;
3170 break;
3171 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
3172 *tgtmode = ARM_CPU_MODE_IRQ;
3173 *regno = sysm & 1 ? 13 : 14;
3174 break;
3175 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
3176 *tgtmode = ARM_CPU_MODE_SVC;
3177 *regno = sysm & 1 ? 13 : 14;
3178 break;
3179 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
3180 *tgtmode = ARM_CPU_MODE_ABT;
3181 *regno = sysm & 1 ? 13 : 14;
3182 break;
3183 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
3184 *tgtmode = ARM_CPU_MODE_UND;
3185 *regno = sysm & 1 ? 13 : 14;
3186 break;
3187 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
3188 *tgtmode = ARM_CPU_MODE_MON;
3189 *regno = sysm & 1 ? 13 : 14;
3190 break;
3191 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */