vl: Table-based select_vgahw()
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "qemu/log.h"
29 #include "qemu/bitops.h"
30 #include "arm_ldst.h"
31
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
34
35 #include "trace-tcg.h"
36 #include "exec/log.h"
37
38
39 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
40 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
41 /* currently all emulated v5 cores are also v5TE, so don't bother */
42 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
43 #define ENABLE_ARCH_5J 0
44 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
45 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
46 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
47 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
48 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
49
50 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
51
52 #include "translate.h"
53
54 #if defined(CONFIG_USER_ONLY)
55 #define IS_USER(s) 1
56 #else
57 #define IS_USER(s) (s->user)
58 #endif
59
60 TCGv_env cpu_env;
61 /* We reuse the same 64-bit temporaries for efficiency. */
62 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
63 static TCGv_i32 cpu_R[16];
64 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
65 TCGv_i64 cpu_exclusive_addr;
66 TCGv_i64 cpu_exclusive_val;
67 #ifdef CONFIG_USER_ONLY
68 TCGv_i64 cpu_exclusive_test;
69 TCGv_i32 cpu_exclusive_info;
70 #endif
71
72 /* FIXME: These should be removed. */
73 static TCGv_i32 cpu_F0s, cpu_F1s;
74 static TCGv_i64 cpu_F0d, cpu_F1d;
75
76 #include "exec/gen-icount.h"
77
78 static const char *regnames[] =
79 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
80 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
81
82 /* initialize TCG globals. */
83 void arm_translate_init(void)
84 {
85 int i;
86
87 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
88
89 for (i = 0; i < 16; i++) {
90 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
91 offsetof(CPUARMState, regs[i]),
92 regnames[i]);
93 }
94 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
95 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
96 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
97 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
98
99 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
100 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
101 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
102 offsetof(CPUARMState, exclusive_val), "exclusive_val");
103 #ifdef CONFIG_USER_ONLY
104 cpu_exclusive_test = tcg_global_mem_new_i64(cpu_env,
105 offsetof(CPUARMState, exclusive_test), "exclusive_test");
106 cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env,
107 offsetof(CPUARMState, exclusive_info), "exclusive_info");
108 #endif
109
110 a64_translate_init();
111 }
112
113 static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
114 {
115 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
116 * insns:
117 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
118 * otherwise, access as if at PL0.
119 */
120 switch (s->mmu_idx) {
121 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
122 case ARMMMUIdx_S12NSE0:
123 case ARMMMUIdx_S12NSE1:
124 return ARMMMUIdx_S12NSE0;
125 case ARMMMUIdx_S1E3:
126 case ARMMMUIdx_S1SE0:
127 case ARMMMUIdx_S1SE1:
128 return ARMMMUIdx_S1SE0;
129 case ARMMMUIdx_S2NS:
130 default:
131 g_assert_not_reached();
132 }
133 }
134
135 static inline TCGv_i32 load_cpu_offset(int offset)
136 {
137 TCGv_i32 tmp = tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
139 return tmp;
140 }
141
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
143
144 static inline void store_cpu_offset(TCGv_i32 var, int offset)
145 {
146 tcg_gen_st_i32(var, cpu_env, offset);
147 tcg_temp_free_i32(var);
148 }
149
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUARMState, name))
152
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
155 {
156 if (reg == 15) {
157 uint32_t addr;
158 /* normally, since we updated PC, we need only to add one insn */
159 if (s->thumb)
160 addr = (long)s->pc + 2;
161 else
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
164 } else {
165 tcg_gen_mov_i32(var, cpu_R[reg]);
166 }
167 }
168
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
171 {
172 TCGv_i32 tmp = tcg_temp_new_i32();
173 load_reg_var(s, tmp, reg);
174 return tmp;
175 }
176
177 /* Set a CPU register. The source must be a temporary and will be
178 marked as dead. */
179 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
180 {
181 if (reg == 15) {
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
184 }
185 tcg_gen_mov_i32(cpu_R[reg], var);
186 tcg_temp_free_i32(var);
187 }
188
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
194
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
197
198
199 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
200 {
201 TCGv_i32 tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
204 }
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
207
208 static void gen_exception_internal(int excp)
209 {
210 TCGv_i32 tcg_excp = tcg_const_i32(excp);
211
212 assert(excp_is_internal(excp));
213 gen_helper_exception_internal(cpu_env, tcg_excp);
214 tcg_temp_free_i32(tcg_excp);
215 }
216
217 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
218 {
219 TCGv_i32 tcg_excp = tcg_const_i32(excp);
220 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
221 TCGv_i32 tcg_el = tcg_const_i32(target_el);
222
223 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
224 tcg_syn, tcg_el);
225
226 tcg_temp_free_i32(tcg_el);
227 tcg_temp_free_i32(tcg_syn);
228 tcg_temp_free_i32(tcg_excp);
229 }
230
231 static void gen_ss_advance(DisasContext *s)
232 {
233 /* If the singlestep state is Active-not-pending, advance to
234 * Active-pending.
235 */
236 if (s->ss_active) {
237 s->pstate_ss = 0;
238 gen_helper_clear_pstate_ss(cpu_env);
239 }
240 }
241
242 static void gen_step_complete_exception(DisasContext *s)
243 {
244 /* We just completed step of an insn. Move from Active-not-pending
245 * to Active-pending, and then also take the swstep exception.
246 * This corresponds to making the (IMPDEF) choice to prioritize
247 * swstep exceptions over asynchronous exceptions taken to an exception
248 * level where debug is disabled. This choice has the advantage that
249 * we do not need to maintain internal state corresponding to the
250 * ISV/EX syndrome bits between completion of the step and generation
251 * of the exception, and our syndrome information is always correct.
252 */
253 gen_ss_advance(s);
254 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
255 default_exception_el(s));
256 s->is_jmp = DISAS_EXC;
257 }
258
259 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
260 {
261 TCGv_i32 tmp1 = tcg_temp_new_i32();
262 TCGv_i32 tmp2 = tcg_temp_new_i32();
263 tcg_gen_ext16s_i32(tmp1, a);
264 tcg_gen_ext16s_i32(tmp2, b);
265 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
266 tcg_temp_free_i32(tmp2);
267 tcg_gen_sari_i32(a, a, 16);
268 tcg_gen_sari_i32(b, b, 16);
269 tcg_gen_mul_i32(b, b, a);
270 tcg_gen_mov_i32(a, tmp1);
271 tcg_temp_free_i32(tmp1);
272 }
273
274 /* Byteswap each halfword. */
275 static void gen_rev16(TCGv_i32 var)
276 {
277 TCGv_i32 tmp = tcg_temp_new_i32();
278 tcg_gen_shri_i32(tmp, var, 8);
279 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
280 tcg_gen_shli_i32(var, var, 8);
281 tcg_gen_andi_i32(var, var, 0xff00ff00);
282 tcg_gen_or_i32(var, var, tmp);
283 tcg_temp_free_i32(tmp);
284 }
285
286 /* Byteswap low halfword and sign extend. */
287 static void gen_revsh(TCGv_i32 var)
288 {
289 tcg_gen_ext16u_i32(var, var);
290 tcg_gen_bswap16_i32(var, var);
291 tcg_gen_ext16s_i32(var, var);
292 }
293
294 /* Unsigned bitfield extract. */
295 static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
296 {
297 if (shift)
298 tcg_gen_shri_i32(var, var, shift);
299 tcg_gen_andi_i32(var, var, mask);
300 }
301
302 /* Signed bitfield extract. */
303 static void gen_sbfx(TCGv_i32 var, int shift, int width)
304 {
305 uint32_t signbit;
306
307 if (shift)
308 tcg_gen_sari_i32(var, var, shift);
309 if (shift + width < 32) {
310 signbit = 1u << (width - 1);
311 tcg_gen_andi_i32(var, var, (1u << width) - 1);
312 tcg_gen_xori_i32(var, var, signbit);
313 tcg_gen_subi_i32(var, var, signbit);
314 }
315 }
316
317 /* Return (b << 32) + a. Mark inputs as dead */
318 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
319 {
320 TCGv_i64 tmp64 = tcg_temp_new_i64();
321
322 tcg_gen_extu_i32_i64(tmp64, b);
323 tcg_temp_free_i32(b);
324 tcg_gen_shli_i64(tmp64, tmp64, 32);
325 tcg_gen_add_i64(a, tmp64, a);
326
327 tcg_temp_free_i64(tmp64);
328 return a;
329 }
330
331 /* Return (b << 32) - a. Mark inputs as dead. */
332 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
333 {
334 TCGv_i64 tmp64 = tcg_temp_new_i64();
335
336 tcg_gen_extu_i32_i64(tmp64, b);
337 tcg_temp_free_i32(b);
338 tcg_gen_shli_i64(tmp64, tmp64, 32);
339 tcg_gen_sub_i64(a, tmp64, a);
340
341 tcg_temp_free_i64(tmp64);
342 return a;
343 }
344
345 /* 32x32->64 multiply. Marks inputs as dead. */
346 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
347 {
348 TCGv_i32 lo = tcg_temp_new_i32();
349 TCGv_i32 hi = tcg_temp_new_i32();
350 TCGv_i64 ret;
351
352 tcg_gen_mulu2_i32(lo, hi, a, b);
353 tcg_temp_free_i32(a);
354 tcg_temp_free_i32(b);
355
356 ret = tcg_temp_new_i64();
357 tcg_gen_concat_i32_i64(ret, lo, hi);
358 tcg_temp_free_i32(lo);
359 tcg_temp_free_i32(hi);
360
361 return ret;
362 }
363
364 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
365 {
366 TCGv_i32 lo = tcg_temp_new_i32();
367 TCGv_i32 hi = tcg_temp_new_i32();
368 TCGv_i64 ret;
369
370 tcg_gen_muls2_i32(lo, hi, a, b);
371 tcg_temp_free_i32(a);
372 tcg_temp_free_i32(b);
373
374 ret = tcg_temp_new_i64();
375 tcg_gen_concat_i32_i64(ret, lo, hi);
376 tcg_temp_free_i32(lo);
377 tcg_temp_free_i32(hi);
378
379 return ret;
380 }
381
382 /* Swap low and high halfwords. */
383 static void gen_swap_half(TCGv_i32 var)
384 {
385 TCGv_i32 tmp = tcg_temp_new_i32();
386 tcg_gen_shri_i32(tmp, var, 16);
387 tcg_gen_shli_i32(var, var, 16);
388 tcg_gen_or_i32(var, var, tmp);
389 tcg_temp_free_i32(tmp);
390 }
391
392 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
393 tmp = (t0 ^ t1) & 0x8000;
394 t0 &= ~0x8000;
395 t1 &= ~0x8000;
396 t0 = (t0 + t1) ^ tmp;
397 */
398
399 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
400 {
401 TCGv_i32 tmp = tcg_temp_new_i32();
402 tcg_gen_xor_i32(tmp, t0, t1);
403 tcg_gen_andi_i32(tmp, tmp, 0x8000);
404 tcg_gen_andi_i32(t0, t0, ~0x8000);
405 tcg_gen_andi_i32(t1, t1, ~0x8000);
406 tcg_gen_add_i32(t0, t0, t1);
407 tcg_gen_xor_i32(t0, t0, tmp);
408 tcg_temp_free_i32(tmp);
409 tcg_temp_free_i32(t1);
410 }
411
412 /* Set CF to the top bit of var. */
413 static void gen_set_CF_bit31(TCGv_i32 var)
414 {
415 tcg_gen_shri_i32(cpu_CF, var, 31);
416 }
417
418 /* Set N and Z flags from var. */
419 static inline void gen_logic_CC(TCGv_i32 var)
420 {
421 tcg_gen_mov_i32(cpu_NF, var);
422 tcg_gen_mov_i32(cpu_ZF, var);
423 }
424
425 /* T0 += T1 + CF. */
426 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
427 {
428 tcg_gen_add_i32(t0, t0, t1);
429 tcg_gen_add_i32(t0, t0, cpu_CF);
430 }
431
432 /* dest = T0 + T1 + CF. */
433 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
434 {
435 tcg_gen_add_i32(dest, t0, t1);
436 tcg_gen_add_i32(dest, dest, cpu_CF);
437 }
438
439 /* dest = T0 - T1 + CF - 1. */
440 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
441 {
442 tcg_gen_sub_i32(dest, t0, t1);
443 tcg_gen_add_i32(dest, dest, cpu_CF);
444 tcg_gen_subi_i32(dest, dest, 1);
445 }
446
447 /* dest = T0 + T1. Compute C, N, V and Z flags */
448 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
449 {
450 TCGv_i32 tmp = tcg_temp_new_i32();
451 tcg_gen_movi_i32(tmp, 0);
452 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
453 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
454 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
455 tcg_gen_xor_i32(tmp, t0, t1);
456 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
457 tcg_temp_free_i32(tmp);
458 tcg_gen_mov_i32(dest, cpu_NF);
459 }
460
461 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
462 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
463 {
464 TCGv_i32 tmp = tcg_temp_new_i32();
465 if (TCG_TARGET_HAS_add2_i32) {
466 tcg_gen_movi_i32(tmp, 0);
467 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
468 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
469 } else {
470 TCGv_i64 q0 = tcg_temp_new_i64();
471 TCGv_i64 q1 = tcg_temp_new_i64();
472 tcg_gen_extu_i32_i64(q0, t0);
473 tcg_gen_extu_i32_i64(q1, t1);
474 tcg_gen_add_i64(q0, q0, q1);
475 tcg_gen_extu_i32_i64(q1, cpu_CF);
476 tcg_gen_add_i64(q0, q0, q1);
477 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
478 tcg_temp_free_i64(q0);
479 tcg_temp_free_i64(q1);
480 }
481 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
482 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
483 tcg_gen_xor_i32(tmp, t0, t1);
484 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
485 tcg_temp_free_i32(tmp);
486 tcg_gen_mov_i32(dest, cpu_NF);
487 }
488
489 /* dest = T0 - T1. Compute C, N, V and Z flags */
490 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
491 {
492 TCGv_i32 tmp;
493 tcg_gen_sub_i32(cpu_NF, t0, t1);
494 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
495 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
496 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
497 tmp = tcg_temp_new_i32();
498 tcg_gen_xor_i32(tmp, t0, t1);
499 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
500 tcg_temp_free_i32(tmp);
501 tcg_gen_mov_i32(dest, cpu_NF);
502 }
503
504 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
505 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
506 {
507 TCGv_i32 tmp = tcg_temp_new_i32();
508 tcg_gen_not_i32(tmp, t1);
509 gen_adc_CC(dest, t0, tmp);
510 tcg_temp_free_i32(tmp);
511 }
512
513 #define GEN_SHIFT(name) \
514 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
515 { \
516 TCGv_i32 tmp1, tmp2, tmp3; \
517 tmp1 = tcg_temp_new_i32(); \
518 tcg_gen_andi_i32(tmp1, t1, 0xff); \
519 tmp2 = tcg_const_i32(0); \
520 tmp3 = tcg_const_i32(0x1f); \
521 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
522 tcg_temp_free_i32(tmp3); \
523 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
524 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
525 tcg_temp_free_i32(tmp2); \
526 tcg_temp_free_i32(tmp1); \
527 }
528 GEN_SHIFT(shl)
529 GEN_SHIFT(shr)
530 #undef GEN_SHIFT
531
532 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
533 {
534 TCGv_i32 tmp1, tmp2;
535 tmp1 = tcg_temp_new_i32();
536 tcg_gen_andi_i32(tmp1, t1, 0xff);
537 tmp2 = tcg_const_i32(0x1f);
538 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
539 tcg_temp_free_i32(tmp2);
540 tcg_gen_sar_i32(dest, t0, tmp1);
541 tcg_temp_free_i32(tmp1);
542 }
543
544 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
545 {
546 TCGv_i32 c0 = tcg_const_i32(0);
547 TCGv_i32 tmp = tcg_temp_new_i32();
548 tcg_gen_neg_i32(tmp, src);
549 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
550 tcg_temp_free_i32(c0);
551 tcg_temp_free_i32(tmp);
552 }
553
554 static void shifter_out_im(TCGv_i32 var, int shift)
555 {
556 if (shift == 0) {
557 tcg_gen_andi_i32(cpu_CF, var, 1);
558 } else {
559 tcg_gen_shri_i32(cpu_CF, var, shift);
560 if (shift != 31) {
561 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
562 }
563 }
564 }
565
566 /* Shift by immediate. Includes special handling for shift == 0. */
567 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
568 int shift, int flags)
569 {
570 switch (shiftop) {
571 case 0: /* LSL */
572 if (shift != 0) {
573 if (flags)
574 shifter_out_im(var, 32 - shift);
575 tcg_gen_shli_i32(var, var, shift);
576 }
577 break;
578 case 1: /* LSR */
579 if (shift == 0) {
580 if (flags) {
581 tcg_gen_shri_i32(cpu_CF, var, 31);
582 }
583 tcg_gen_movi_i32(var, 0);
584 } else {
585 if (flags)
586 shifter_out_im(var, shift - 1);
587 tcg_gen_shri_i32(var, var, shift);
588 }
589 break;
590 case 2: /* ASR */
591 if (shift == 0)
592 shift = 32;
593 if (flags)
594 shifter_out_im(var, shift - 1);
595 if (shift == 32)
596 shift = 31;
597 tcg_gen_sari_i32(var, var, shift);
598 break;
599 case 3: /* ROR/RRX */
600 if (shift != 0) {
601 if (flags)
602 shifter_out_im(var, shift - 1);
603 tcg_gen_rotri_i32(var, var, shift); break;
604 } else {
605 TCGv_i32 tmp = tcg_temp_new_i32();
606 tcg_gen_shli_i32(tmp, cpu_CF, 31);
607 if (flags)
608 shifter_out_im(var, 0);
609 tcg_gen_shri_i32(var, var, 1);
610 tcg_gen_or_i32(var, var, tmp);
611 tcg_temp_free_i32(tmp);
612 }
613 }
614 };
615
616 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
617 TCGv_i32 shift, int flags)
618 {
619 if (flags) {
620 switch (shiftop) {
621 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
622 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
623 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
624 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
625 }
626 } else {
627 switch (shiftop) {
628 case 0:
629 gen_shl(var, var, shift);
630 break;
631 case 1:
632 gen_shr(var, var, shift);
633 break;
634 case 2:
635 gen_sar(var, var, shift);
636 break;
637 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
638 tcg_gen_rotr_i32(var, var, shift); break;
639 }
640 }
641 tcg_temp_free_i32(shift);
642 }
643
644 #define PAS_OP(pfx) \
645 switch (op2) { \
646 case 0: gen_pas_helper(glue(pfx,add16)); break; \
647 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
648 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
649 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
650 case 4: gen_pas_helper(glue(pfx,add8)); break; \
651 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
652 }
653 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
654 {
655 TCGv_ptr tmp;
656
657 switch (op1) {
658 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
659 case 1:
660 tmp = tcg_temp_new_ptr();
661 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
662 PAS_OP(s)
663 tcg_temp_free_ptr(tmp);
664 break;
665 case 5:
666 tmp = tcg_temp_new_ptr();
667 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
668 PAS_OP(u)
669 tcg_temp_free_ptr(tmp);
670 break;
671 #undef gen_pas_helper
672 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
673 case 2:
674 PAS_OP(q);
675 break;
676 case 3:
677 PAS_OP(sh);
678 break;
679 case 6:
680 PAS_OP(uq);
681 break;
682 case 7:
683 PAS_OP(uh);
684 break;
685 #undef gen_pas_helper
686 }
687 }
688 #undef PAS_OP
689
690 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
691 #define PAS_OP(pfx) \
692 switch (op1) { \
693 case 0: gen_pas_helper(glue(pfx,add8)); break; \
694 case 1: gen_pas_helper(glue(pfx,add16)); break; \
695 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
696 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
697 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
698 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
699 }
700 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
701 {
702 TCGv_ptr tmp;
703
704 switch (op2) {
705 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
706 case 0:
707 tmp = tcg_temp_new_ptr();
708 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
709 PAS_OP(s)
710 tcg_temp_free_ptr(tmp);
711 break;
712 case 4:
713 tmp = tcg_temp_new_ptr();
714 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
715 PAS_OP(u)
716 tcg_temp_free_ptr(tmp);
717 break;
718 #undef gen_pas_helper
719 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
720 case 1:
721 PAS_OP(q);
722 break;
723 case 2:
724 PAS_OP(sh);
725 break;
726 case 5:
727 PAS_OP(uq);
728 break;
729 case 6:
730 PAS_OP(uh);
731 break;
732 #undef gen_pas_helper
733 }
734 }
735 #undef PAS_OP
736
737 /*
738 * Generate a conditional based on ARM condition code cc.
739 * This is common between ARM and Aarch64 targets.
740 */
741 void arm_test_cc(DisasCompare *cmp, int cc)
742 {
743 TCGv_i32 value;
744 TCGCond cond;
745 bool global = true;
746
747 switch (cc) {
748 case 0: /* eq: Z */
749 case 1: /* ne: !Z */
750 cond = TCG_COND_EQ;
751 value = cpu_ZF;
752 break;
753
754 case 2: /* cs: C */
755 case 3: /* cc: !C */
756 cond = TCG_COND_NE;
757 value = cpu_CF;
758 break;
759
760 case 4: /* mi: N */
761 case 5: /* pl: !N */
762 cond = TCG_COND_LT;
763 value = cpu_NF;
764 break;
765
766 case 6: /* vs: V */
767 case 7: /* vc: !V */
768 cond = TCG_COND_LT;
769 value = cpu_VF;
770 break;
771
772 case 8: /* hi: C && !Z */
773 case 9: /* ls: !C || Z -> !(C && !Z) */
774 cond = TCG_COND_NE;
775 value = tcg_temp_new_i32();
776 global = false;
777 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
778 ZF is non-zero for !Z; so AND the two subexpressions. */
779 tcg_gen_neg_i32(value, cpu_CF);
780 tcg_gen_and_i32(value, value, cpu_ZF);
781 break;
782
783 case 10: /* ge: N == V -> N ^ V == 0 */
784 case 11: /* lt: N != V -> N ^ V != 0 */
785 /* Since we're only interested in the sign bit, == 0 is >= 0. */
786 cond = TCG_COND_GE;
787 value = tcg_temp_new_i32();
788 global = false;
789 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
790 break;
791
792 case 12: /* gt: !Z && N == V */
793 case 13: /* le: Z || N != V */
794 cond = TCG_COND_NE;
795 value = tcg_temp_new_i32();
796 global = false;
797 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
798 * the sign bit then AND with ZF to yield the result. */
799 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
800 tcg_gen_sari_i32(value, value, 31);
801 tcg_gen_andc_i32(value, cpu_ZF, value);
802 break;
803
804 case 14: /* always */
805 case 15: /* always */
806 /* Use the ALWAYS condition, which will fold early.
807 * It doesn't matter what we use for the value. */
808 cond = TCG_COND_ALWAYS;
809 value = cpu_ZF;
810 goto no_invert;
811
812 default:
813 fprintf(stderr, "Bad condition code 0x%x\n", cc);
814 abort();
815 }
816
817 if (cc & 1) {
818 cond = tcg_invert_cond(cond);
819 }
820
821 no_invert:
822 cmp->cond = cond;
823 cmp->value = value;
824 cmp->value_global = global;
825 }
826
827 void arm_free_cc(DisasCompare *cmp)
828 {
829 if (!cmp->value_global) {
830 tcg_temp_free_i32(cmp->value);
831 }
832 }
833
834 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
835 {
836 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
837 }
838
839 void arm_gen_test_cc(int cc, TCGLabel *label)
840 {
841 DisasCompare cmp;
842 arm_test_cc(&cmp, cc);
843 arm_jump_cc(&cmp, label);
844 arm_free_cc(&cmp);
845 }
846
847 static const uint8_t table_logic_cc[16] = {
848 1, /* and */
849 1, /* xor */
850 0, /* sub */
851 0, /* rsb */
852 0, /* add */
853 0, /* adc */
854 0, /* sbc */
855 0, /* rsc */
856 1, /* andl */
857 1, /* xorl */
858 0, /* cmp */
859 0, /* cmn */
860 1, /* orr */
861 1, /* mov */
862 1, /* bic */
863 1, /* mvn */
864 };
865
866 /* Set PC and Thumb state from an immediate address. */
867 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
868 {
869 TCGv_i32 tmp;
870
871 s->is_jmp = DISAS_JUMP;
872 if (s->thumb != (addr & 1)) {
873 tmp = tcg_temp_new_i32();
874 tcg_gen_movi_i32(tmp, addr & 1);
875 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
876 tcg_temp_free_i32(tmp);
877 }
878 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
879 }
880
881 /* Set PC and Thumb state from var. var is marked as dead. */
882 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
883 {
884 s->is_jmp = DISAS_JUMP;
885 tcg_gen_andi_i32(cpu_R[15], var, ~1);
886 tcg_gen_andi_i32(var, var, 1);
887 store_cpu_field(var, thumb);
888 }
889
890 /* Variant of store_reg which uses branch&exchange logic when storing
891 to r15 in ARM architecture v7 and above. The source must be a temporary
892 and will be marked as dead. */
893 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
894 {
895 if (reg == 15 && ENABLE_ARCH_7) {
896 gen_bx(s, var);
897 } else {
898 store_reg(s, reg, var);
899 }
900 }
901
902 /* Variant of store_reg which uses branch&exchange logic when storing
903 * to r15 in ARM architecture v5T and above. This is used for storing
904 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
905 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
906 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
907 {
908 if (reg == 15 && ENABLE_ARCH_5) {
909 gen_bx(s, var);
910 } else {
911 store_reg(s, reg, var);
912 }
913 }
914
915 #ifdef CONFIG_USER_ONLY
916 #define IS_USER_ONLY 1
917 #else
918 #define IS_USER_ONLY 0
919 #endif
920
921 /* Abstractions of "generate code to do a guest load/store for
922 * AArch32", where a vaddr is always 32 bits (and is zero
923 * extended if we're a 64 bit core) and data is also
924 * 32 bits unless specifically doing a 64 bit access.
925 * These functions work like tcg_gen_qemu_{ld,st}* except
926 * that the address argument is TCGv_i32 rather than TCGv.
927 */
928 #if TARGET_LONG_BITS == 32
929
930 #define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
931 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
932 TCGv_i32 addr, int index) \
933 { \
934 TCGMemOp opc = (OPC) | s->be_data; \
935 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
936 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
937 TCGv addr_be = tcg_temp_new(); \
938 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
939 tcg_gen_qemu_ld_i32(val, addr_be, index, opc); \
940 tcg_temp_free(addr_be); \
941 return; \
942 } \
943 tcg_gen_qemu_ld_i32(val, addr, index, opc); \
944 }
945
946 #define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
947 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
948 TCGv_i32 addr, int index) \
949 { \
950 TCGMemOp opc = (OPC) | s->be_data; \
951 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
952 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
953 TCGv addr_be = tcg_temp_new(); \
954 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
955 tcg_gen_qemu_st_i32(val, addr_be, index, opc); \
956 tcg_temp_free(addr_be); \
957 return; \
958 } \
959 tcg_gen_qemu_st_i32(val, addr, index, opc); \
960 }
961
962 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
963 TCGv_i32 addr, int index)
964 {
965 TCGMemOp opc = MO_Q | s->be_data;
966 tcg_gen_qemu_ld_i64(val, addr, index, opc);
967 /* Not needed for user-mode BE32, where we use MO_BE instead. */
968 if (!IS_USER_ONLY && s->sctlr_b) {
969 tcg_gen_rotri_i64(val, val, 32);
970 }
971 }
972
973 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
974 TCGv_i32 addr, int index)
975 {
976 TCGMemOp opc = MO_Q | s->be_data;
977 /* Not needed for user-mode BE32, where we use MO_BE instead. */
978 if (!IS_USER_ONLY && s->sctlr_b) {
979 TCGv_i64 tmp = tcg_temp_new_i64();
980 tcg_gen_rotri_i64(tmp, val, 32);
981 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
982 tcg_temp_free_i64(tmp);
983 return;
984 }
985 tcg_gen_qemu_st_i64(val, addr, index, opc);
986 }
987
988 #else
989
990 #define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
991 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
992 TCGv_i32 addr, int index) \
993 { \
994 TCGMemOp opc = (OPC) | s->be_data; \
995 TCGv addr64 = tcg_temp_new(); \
996 tcg_gen_extu_i32_i64(addr64, addr); \
997 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
998 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
999 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1000 } \
1001 tcg_gen_qemu_ld_i32(val, addr64, index, opc); \
1002 tcg_temp_free(addr64); \
1003 }
1004
1005 #define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
1006 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1007 TCGv_i32 addr, int index) \
1008 { \
1009 TCGMemOp opc = (OPC) | s->be_data; \
1010 TCGv addr64 = tcg_temp_new(); \
1011 tcg_gen_extu_i32_i64(addr64, addr); \
1012 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
1013 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
1014 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1015 } \
1016 tcg_gen_qemu_st_i32(val, addr64, index, opc); \
1017 tcg_temp_free(addr64); \
1018 }
1019
1020 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1021 TCGv_i32 addr, int index)
1022 {
1023 TCGMemOp opc = MO_Q | s->be_data;
1024 TCGv addr64 = tcg_temp_new();
1025 tcg_gen_extu_i32_i64(addr64, addr);
1026 tcg_gen_qemu_ld_i64(val, addr64, index, opc);
1027
1028 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1029 if (!IS_USER_ONLY && s->sctlr_b) {
1030 tcg_gen_rotri_i64(val, val, 32);
1031 }
1032 tcg_temp_free(addr64);
1033 }
1034
1035 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1036 TCGv_i32 addr, int index)
1037 {
1038 TCGMemOp opc = MO_Q | s->be_data;
1039 TCGv addr64 = tcg_temp_new();
1040 tcg_gen_extu_i32_i64(addr64, addr);
1041
1042 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1043 if (!IS_USER_ONLY && s->sctlr_b) {
1044 TCGv tmp = tcg_temp_new();
1045 tcg_gen_rotri_i64(tmp, val, 32);
1046 tcg_gen_qemu_st_i64(tmp, addr64, index, opc);
1047 tcg_temp_free(tmp);
1048 } else {
1049 tcg_gen_qemu_st_i64(val, addr64, index, opc);
1050 }
1051 tcg_temp_free(addr64);
1052 }
1053
1054 #endif
1055
1056 DO_GEN_LD(8s, MO_SB, 3)
1057 DO_GEN_LD(8u, MO_UB, 3)
1058 DO_GEN_LD(16s, MO_SW, 2)
1059 DO_GEN_LD(16u, MO_UW, 2)
1060 DO_GEN_LD(32u, MO_UL, 0)
1061 /* 'a' variants include an alignment check */
1062 DO_GEN_LD(16ua, MO_UW | MO_ALIGN, 2)
1063 DO_GEN_LD(32ua, MO_UL | MO_ALIGN, 0)
1064 DO_GEN_ST(8, MO_UB, 3)
1065 DO_GEN_ST(16, MO_UW, 2)
1066 DO_GEN_ST(32, MO_UL, 0)
1067
1068 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
1069 {
1070 tcg_gen_movi_i32(cpu_R[15], val);
1071 }
1072
1073 static inline void gen_hvc(DisasContext *s, int imm16)
1074 {
1075 /* The pre HVC helper handles cases when HVC gets trapped
1076 * as an undefined insn by runtime configuration (ie before
1077 * the insn really executes).
1078 */
1079 gen_set_pc_im(s, s->pc - 4);
1080 gen_helper_pre_hvc(cpu_env);
1081 /* Otherwise we will treat this as a real exception which
1082 * happens after execution of the insn. (The distinction matters
1083 * for the PC value reported to the exception handler and also
1084 * for single stepping.)
1085 */
1086 s->svc_imm = imm16;
1087 gen_set_pc_im(s, s->pc);
1088 s->is_jmp = DISAS_HVC;
1089 }
1090
1091 static inline void gen_smc(DisasContext *s)
1092 {
1093 /* As with HVC, we may take an exception either before or after
1094 * the insn executes.
1095 */
1096 TCGv_i32 tmp;
1097
1098 gen_set_pc_im(s, s->pc - 4);
1099 tmp = tcg_const_i32(syn_aa32_smc());
1100 gen_helper_pre_smc(cpu_env, tmp);
1101 tcg_temp_free_i32(tmp);
1102 gen_set_pc_im(s, s->pc);
1103 s->is_jmp = DISAS_SMC;
1104 }
1105
1106 static inline void
1107 gen_set_condexec (DisasContext *s)
1108 {
1109 if (s->condexec_mask) {
1110 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1111 TCGv_i32 tmp = tcg_temp_new_i32();
1112 tcg_gen_movi_i32(tmp, val);
1113 store_cpu_field(tmp, condexec_bits);
1114 }
1115 }
1116
1117 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1118 {
1119 gen_set_condexec(s);
1120 gen_set_pc_im(s, s->pc - offset);
1121 gen_exception_internal(excp);
1122 s->is_jmp = DISAS_JUMP;
1123 }
1124
1125 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1126 int syn, uint32_t target_el)
1127 {
1128 gen_set_condexec(s);
1129 gen_set_pc_im(s, s->pc - offset);
1130 gen_exception(excp, syn, target_el);
1131 s->is_jmp = DISAS_JUMP;
1132 }
1133
1134 /* Force a TB lookup after an instruction that changes the CPU state. */
1135 static inline void gen_lookup_tb(DisasContext *s)
1136 {
1137 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1138 s->is_jmp = DISAS_JUMP;
1139 }
1140
1141 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1142 TCGv_i32 var)
1143 {
1144 int val, rm, shift, shiftop;
1145 TCGv_i32 offset;
1146
1147 if (!(insn & (1 << 25))) {
1148 /* immediate */
1149 val = insn & 0xfff;
1150 if (!(insn & (1 << 23)))
1151 val = -val;
1152 if (val != 0)
1153 tcg_gen_addi_i32(var, var, val);
1154 } else {
1155 /* shift/register */
1156 rm = (insn) & 0xf;
1157 shift = (insn >> 7) & 0x1f;
1158 shiftop = (insn >> 5) & 3;
1159 offset = load_reg(s, rm);
1160 gen_arm_shift_im(offset, shiftop, shift, 0);
1161 if (!(insn & (1 << 23)))
1162 tcg_gen_sub_i32(var, var, offset);
1163 else
1164 tcg_gen_add_i32(var, var, offset);
1165 tcg_temp_free_i32(offset);
1166 }
1167 }
1168
1169 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1170 int extra, TCGv_i32 var)
1171 {
1172 int val, rm;
1173 TCGv_i32 offset;
1174
1175 if (insn & (1 << 22)) {
1176 /* immediate */
1177 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1178 if (!(insn & (1 << 23)))
1179 val = -val;
1180 val += extra;
1181 if (val != 0)
1182 tcg_gen_addi_i32(var, var, val);
1183 } else {
1184 /* register */
1185 if (extra)
1186 tcg_gen_addi_i32(var, var, extra);
1187 rm = (insn) & 0xf;
1188 offset = load_reg(s, rm);
1189 if (!(insn & (1 << 23)))
1190 tcg_gen_sub_i32(var, var, offset);
1191 else
1192 tcg_gen_add_i32(var, var, offset);
1193 tcg_temp_free_i32(offset);
1194 }
1195 }
1196
1197 static TCGv_ptr get_fpstatus_ptr(int neon)
1198 {
1199 TCGv_ptr statusptr = tcg_temp_new_ptr();
1200 int offset;
1201 if (neon) {
1202 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1203 } else {
1204 offset = offsetof(CPUARMState, vfp.fp_status);
1205 }
1206 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1207 return statusptr;
1208 }
1209
1210 #define VFP_OP2(name) \
1211 static inline void gen_vfp_##name(int dp) \
1212 { \
1213 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1214 if (dp) { \
1215 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1216 } else { \
1217 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1218 } \
1219 tcg_temp_free_ptr(fpst); \
1220 }
1221
1222 VFP_OP2(add)
1223 VFP_OP2(sub)
1224 VFP_OP2(mul)
1225 VFP_OP2(div)
1226
1227 #undef VFP_OP2
1228
1229 static inline void gen_vfp_F1_mul(int dp)
1230 {
1231 /* Like gen_vfp_mul() but put result in F1 */
1232 TCGv_ptr fpst = get_fpstatus_ptr(0);
1233 if (dp) {
1234 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1235 } else {
1236 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1237 }
1238 tcg_temp_free_ptr(fpst);
1239 }
1240
1241 static inline void gen_vfp_F1_neg(int dp)
1242 {
1243 /* Like gen_vfp_neg() but put result in F1 */
1244 if (dp) {
1245 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1246 } else {
1247 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1248 }
1249 }
1250
1251 static inline void gen_vfp_abs(int dp)
1252 {
1253 if (dp)
1254 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1255 else
1256 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1257 }
1258
1259 static inline void gen_vfp_neg(int dp)
1260 {
1261 if (dp)
1262 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1263 else
1264 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1265 }
1266
1267 static inline void gen_vfp_sqrt(int dp)
1268 {
1269 if (dp)
1270 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1271 else
1272 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1273 }
1274
1275 static inline void gen_vfp_cmp(int dp)
1276 {
1277 if (dp)
1278 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1279 else
1280 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1281 }
1282
1283 static inline void gen_vfp_cmpe(int dp)
1284 {
1285 if (dp)
1286 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1287 else
1288 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1289 }
1290
1291 static inline void gen_vfp_F1_ld0(int dp)
1292 {
1293 if (dp)
1294 tcg_gen_movi_i64(cpu_F1d, 0);
1295 else
1296 tcg_gen_movi_i32(cpu_F1s, 0);
1297 }
1298
1299 #define VFP_GEN_ITOF(name) \
1300 static inline void gen_vfp_##name(int dp, int neon) \
1301 { \
1302 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1303 if (dp) { \
1304 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1305 } else { \
1306 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1307 } \
1308 tcg_temp_free_ptr(statusptr); \
1309 }
1310
1311 VFP_GEN_ITOF(uito)
1312 VFP_GEN_ITOF(sito)
1313 #undef VFP_GEN_ITOF
1314
1315 #define VFP_GEN_FTOI(name) \
1316 static inline void gen_vfp_##name(int dp, int neon) \
1317 { \
1318 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1319 if (dp) { \
1320 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1321 } else { \
1322 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1323 } \
1324 tcg_temp_free_ptr(statusptr); \
1325 }
1326
1327 VFP_GEN_FTOI(toui)
1328 VFP_GEN_FTOI(touiz)
1329 VFP_GEN_FTOI(tosi)
1330 VFP_GEN_FTOI(tosiz)
1331 #undef VFP_GEN_FTOI
1332
1333 #define VFP_GEN_FIX(name, round) \
1334 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1335 { \
1336 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1337 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1338 if (dp) { \
1339 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1340 statusptr); \
1341 } else { \
1342 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1343 statusptr); \
1344 } \
1345 tcg_temp_free_i32(tmp_shift); \
1346 tcg_temp_free_ptr(statusptr); \
1347 }
1348 VFP_GEN_FIX(tosh, _round_to_zero)
1349 VFP_GEN_FIX(tosl, _round_to_zero)
1350 VFP_GEN_FIX(touh, _round_to_zero)
1351 VFP_GEN_FIX(toul, _round_to_zero)
1352 VFP_GEN_FIX(shto, )
1353 VFP_GEN_FIX(slto, )
1354 VFP_GEN_FIX(uhto, )
1355 VFP_GEN_FIX(ulto, )
1356 #undef VFP_GEN_FIX
1357
1358 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1359 {
1360 if (dp) {
1361 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
1362 } else {
1363 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
1364 }
1365 }
1366
1367 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1368 {
1369 if (dp) {
1370 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
1371 } else {
1372 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
1373 }
1374 }
1375
1376 static inline long
1377 vfp_reg_offset (int dp, int reg)
1378 {
1379 if (dp)
1380 return offsetof(CPUARMState, vfp.regs[reg]);
1381 else if (reg & 1) {
1382 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1383 + offsetof(CPU_DoubleU, l.upper);
1384 } else {
1385 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1386 + offsetof(CPU_DoubleU, l.lower);
1387 }
1388 }
1389
1390 /* Return the offset of a 32-bit piece of a NEON register.
1391 zero is the least significant end of the register. */
1392 static inline long
1393 neon_reg_offset (int reg, int n)
1394 {
1395 int sreg;
1396 sreg = reg * 2 + n;
1397 return vfp_reg_offset(0, sreg);
1398 }
1399
1400 static TCGv_i32 neon_load_reg(int reg, int pass)
1401 {
1402 TCGv_i32 tmp = tcg_temp_new_i32();
1403 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1404 return tmp;
1405 }
1406
1407 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1408 {
1409 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1410 tcg_temp_free_i32(var);
1411 }
1412
1413 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1414 {
1415 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1416 }
1417
1418 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1419 {
1420 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1421 }
1422
1423 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1424 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1425 #define tcg_gen_st_f32 tcg_gen_st_i32
1426 #define tcg_gen_st_f64 tcg_gen_st_i64
1427
1428 static inline void gen_mov_F0_vreg(int dp, int reg)
1429 {
1430 if (dp)
1431 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1432 else
1433 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1434 }
1435
1436 static inline void gen_mov_F1_vreg(int dp, int reg)
1437 {
1438 if (dp)
1439 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1440 else
1441 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1442 }
1443
1444 static inline void gen_mov_vreg_F0(int dp, int reg)
1445 {
1446 if (dp)
1447 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1448 else
1449 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1450 }
1451
1452 #define ARM_CP_RW_BIT (1 << 20)
1453
1454 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1455 {
1456 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1457 }
1458
1459 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1460 {
1461 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1462 }
1463
1464 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1465 {
1466 TCGv_i32 var = tcg_temp_new_i32();
1467 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1468 return var;
1469 }
1470
1471 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1472 {
1473 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1474 tcg_temp_free_i32(var);
1475 }
1476
1477 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1478 {
1479 iwmmxt_store_reg(cpu_M0, rn);
1480 }
1481
1482 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1483 {
1484 iwmmxt_load_reg(cpu_M0, rn);
1485 }
1486
1487 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1488 {
1489 iwmmxt_load_reg(cpu_V1, rn);
1490 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1491 }
1492
1493 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1494 {
1495 iwmmxt_load_reg(cpu_V1, rn);
1496 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1497 }
1498
1499 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1500 {
1501 iwmmxt_load_reg(cpu_V1, rn);
1502 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1503 }
1504
1505 #define IWMMXT_OP(name) \
1506 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1507 { \
1508 iwmmxt_load_reg(cpu_V1, rn); \
1509 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1510 }
1511
1512 #define IWMMXT_OP_ENV(name) \
1513 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1514 { \
1515 iwmmxt_load_reg(cpu_V1, rn); \
1516 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1517 }
1518
1519 #define IWMMXT_OP_ENV_SIZE(name) \
1520 IWMMXT_OP_ENV(name##b) \
1521 IWMMXT_OP_ENV(name##w) \
1522 IWMMXT_OP_ENV(name##l)
1523
1524 #define IWMMXT_OP_ENV1(name) \
1525 static inline void gen_op_iwmmxt_##name##_M0(void) \
1526 { \
1527 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1528 }
1529
1530 IWMMXT_OP(maddsq)
1531 IWMMXT_OP(madduq)
1532 IWMMXT_OP(sadb)
1533 IWMMXT_OP(sadw)
1534 IWMMXT_OP(mulslw)
1535 IWMMXT_OP(mulshw)
1536 IWMMXT_OP(mululw)
1537 IWMMXT_OP(muluhw)
1538 IWMMXT_OP(macsw)
1539 IWMMXT_OP(macuw)
1540
1541 IWMMXT_OP_ENV_SIZE(unpackl)
1542 IWMMXT_OP_ENV_SIZE(unpackh)
1543
1544 IWMMXT_OP_ENV1(unpacklub)
1545 IWMMXT_OP_ENV1(unpackluw)
1546 IWMMXT_OP_ENV1(unpacklul)
1547 IWMMXT_OP_ENV1(unpackhub)
1548 IWMMXT_OP_ENV1(unpackhuw)
1549 IWMMXT_OP_ENV1(unpackhul)
1550 IWMMXT_OP_ENV1(unpacklsb)
1551 IWMMXT_OP_ENV1(unpacklsw)
1552 IWMMXT_OP_ENV1(unpacklsl)
1553 IWMMXT_OP_ENV1(unpackhsb)
1554 IWMMXT_OP_ENV1(unpackhsw)
1555 IWMMXT_OP_ENV1(unpackhsl)
1556
1557 IWMMXT_OP_ENV_SIZE(cmpeq)
1558 IWMMXT_OP_ENV_SIZE(cmpgtu)
1559 IWMMXT_OP_ENV_SIZE(cmpgts)
1560
1561 IWMMXT_OP_ENV_SIZE(mins)
1562 IWMMXT_OP_ENV_SIZE(minu)
1563 IWMMXT_OP_ENV_SIZE(maxs)
1564 IWMMXT_OP_ENV_SIZE(maxu)
1565
1566 IWMMXT_OP_ENV_SIZE(subn)
1567 IWMMXT_OP_ENV_SIZE(addn)
1568 IWMMXT_OP_ENV_SIZE(subu)
1569 IWMMXT_OP_ENV_SIZE(addu)
1570 IWMMXT_OP_ENV_SIZE(subs)
1571 IWMMXT_OP_ENV_SIZE(adds)
1572
1573 IWMMXT_OP_ENV(avgb0)
1574 IWMMXT_OP_ENV(avgb1)
1575 IWMMXT_OP_ENV(avgw0)
1576 IWMMXT_OP_ENV(avgw1)
1577
1578 IWMMXT_OP_ENV(packuw)
1579 IWMMXT_OP_ENV(packul)
1580 IWMMXT_OP_ENV(packuq)
1581 IWMMXT_OP_ENV(packsw)
1582 IWMMXT_OP_ENV(packsl)
1583 IWMMXT_OP_ENV(packsq)
1584
1585 static void gen_op_iwmmxt_set_mup(void)
1586 {
1587 TCGv_i32 tmp;
1588 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1589 tcg_gen_ori_i32(tmp, tmp, 2);
1590 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1591 }
1592
1593 static void gen_op_iwmmxt_set_cup(void)
1594 {
1595 TCGv_i32 tmp;
1596 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1597 tcg_gen_ori_i32(tmp, tmp, 1);
1598 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1599 }
1600
1601 static void gen_op_iwmmxt_setpsr_nz(void)
1602 {
1603 TCGv_i32 tmp = tcg_temp_new_i32();
1604 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1605 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1606 }
1607
1608 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1609 {
1610 iwmmxt_load_reg(cpu_V1, rn);
1611 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1612 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1613 }
1614
1615 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1616 TCGv_i32 dest)
1617 {
1618 int rd;
1619 uint32_t offset;
1620 TCGv_i32 tmp;
1621
1622 rd = (insn >> 16) & 0xf;
1623 tmp = load_reg(s, rd);
1624
1625 offset = (insn & 0xff) << ((insn >> 7) & 2);
1626 if (insn & (1 << 24)) {
1627 /* Pre indexed */
1628 if (insn & (1 << 23))
1629 tcg_gen_addi_i32(tmp, tmp, offset);
1630 else
1631 tcg_gen_addi_i32(tmp, tmp, -offset);
1632 tcg_gen_mov_i32(dest, tmp);
1633 if (insn & (1 << 21))
1634 store_reg(s, rd, tmp);
1635 else
1636 tcg_temp_free_i32(tmp);
1637 } else if (insn & (1 << 21)) {
1638 /* Post indexed */
1639 tcg_gen_mov_i32(dest, tmp);
1640 if (insn & (1 << 23))
1641 tcg_gen_addi_i32(tmp, tmp, offset);
1642 else
1643 tcg_gen_addi_i32(tmp, tmp, -offset);
1644 store_reg(s, rd, tmp);
1645 } else if (!(insn & (1 << 23)))
1646 return 1;
1647 return 0;
1648 }
1649
1650 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1651 {
1652 int rd = (insn >> 0) & 0xf;
1653 TCGv_i32 tmp;
1654
1655 if (insn & (1 << 8)) {
1656 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1657 return 1;
1658 } else {
1659 tmp = iwmmxt_load_creg(rd);
1660 }
1661 } else {
1662 tmp = tcg_temp_new_i32();
1663 iwmmxt_load_reg(cpu_V0, rd);
1664 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1665 }
1666 tcg_gen_andi_i32(tmp, tmp, mask);
1667 tcg_gen_mov_i32(dest, tmp);
1668 tcg_temp_free_i32(tmp);
1669 return 0;
1670 }
1671
1672 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1673 (ie. an undefined instruction). */
1674 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1675 {
1676 int rd, wrd;
1677 int rdhi, rdlo, rd0, rd1, i;
1678 TCGv_i32 addr;
1679 TCGv_i32 tmp, tmp2, tmp3;
1680
1681 if ((insn & 0x0e000e00) == 0x0c000000) {
1682 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1683 wrd = insn & 0xf;
1684 rdlo = (insn >> 12) & 0xf;
1685 rdhi = (insn >> 16) & 0xf;
1686 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1687 iwmmxt_load_reg(cpu_V0, wrd);
1688 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1689 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1690 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1691 } else { /* TMCRR */
1692 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1693 iwmmxt_store_reg(cpu_V0, wrd);
1694 gen_op_iwmmxt_set_mup();
1695 }
1696 return 0;
1697 }
1698
1699 wrd = (insn >> 12) & 0xf;
1700 addr = tcg_temp_new_i32();
1701 if (gen_iwmmxt_address(s, insn, addr)) {
1702 tcg_temp_free_i32(addr);
1703 return 1;
1704 }
1705 if (insn & ARM_CP_RW_BIT) {
1706 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1707 tmp = tcg_temp_new_i32();
1708 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1709 iwmmxt_store_creg(wrd, tmp);
1710 } else {
1711 i = 1;
1712 if (insn & (1 << 8)) {
1713 if (insn & (1 << 22)) { /* WLDRD */
1714 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1715 i = 0;
1716 } else { /* WLDRW wRd */
1717 tmp = tcg_temp_new_i32();
1718 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1719 }
1720 } else {
1721 tmp = tcg_temp_new_i32();
1722 if (insn & (1 << 22)) { /* WLDRH */
1723 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1724 } else { /* WLDRB */
1725 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1726 }
1727 }
1728 if (i) {
1729 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1730 tcg_temp_free_i32(tmp);
1731 }
1732 gen_op_iwmmxt_movq_wRn_M0(wrd);
1733 }
1734 } else {
1735 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1736 tmp = iwmmxt_load_creg(wrd);
1737 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1738 } else {
1739 gen_op_iwmmxt_movq_M0_wRn(wrd);
1740 tmp = tcg_temp_new_i32();
1741 if (insn & (1 << 8)) {
1742 if (insn & (1 << 22)) { /* WSTRD */
1743 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1744 } else { /* WSTRW wRd */
1745 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1746 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1747 }
1748 } else {
1749 if (insn & (1 << 22)) { /* WSTRH */
1750 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1751 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1752 } else { /* WSTRB */
1753 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1754 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1755 }
1756 }
1757 }
1758 tcg_temp_free_i32(tmp);
1759 }
1760 tcg_temp_free_i32(addr);
1761 return 0;
1762 }
1763
1764 if ((insn & 0x0f000000) != 0x0e000000)
1765 return 1;
1766
1767 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1768 case 0x000: /* WOR */
1769 wrd = (insn >> 12) & 0xf;
1770 rd0 = (insn >> 0) & 0xf;
1771 rd1 = (insn >> 16) & 0xf;
1772 gen_op_iwmmxt_movq_M0_wRn(rd0);
1773 gen_op_iwmmxt_orq_M0_wRn(rd1);
1774 gen_op_iwmmxt_setpsr_nz();
1775 gen_op_iwmmxt_movq_wRn_M0(wrd);
1776 gen_op_iwmmxt_set_mup();
1777 gen_op_iwmmxt_set_cup();
1778 break;
1779 case 0x011: /* TMCR */
1780 if (insn & 0xf)
1781 return 1;
1782 rd = (insn >> 12) & 0xf;
1783 wrd = (insn >> 16) & 0xf;
1784 switch (wrd) {
1785 case ARM_IWMMXT_wCID:
1786 case ARM_IWMMXT_wCASF:
1787 break;
1788 case ARM_IWMMXT_wCon:
1789 gen_op_iwmmxt_set_cup();
1790 /* Fall through. */
1791 case ARM_IWMMXT_wCSSF:
1792 tmp = iwmmxt_load_creg(wrd);
1793 tmp2 = load_reg(s, rd);
1794 tcg_gen_andc_i32(tmp, tmp, tmp2);
1795 tcg_temp_free_i32(tmp2);
1796 iwmmxt_store_creg(wrd, tmp);
1797 break;
1798 case ARM_IWMMXT_wCGR0:
1799 case ARM_IWMMXT_wCGR1:
1800 case ARM_IWMMXT_wCGR2:
1801 case ARM_IWMMXT_wCGR3:
1802 gen_op_iwmmxt_set_cup();
1803 tmp = load_reg(s, rd);
1804 iwmmxt_store_creg(wrd, tmp);
1805 break;
1806 default:
1807 return 1;
1808 }
1809 break;
1810 case 0x100: /* WXOR */
1811 wrd = (insn >> 12) & 0xf;
1812 rd0 = (insn >> 0) & 0xf;
1813 rd1 = (insn >> 16) & 0xf;
1814 gen_op_iwmmxt_movq_M0_wRn(rd0);
1815 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1816 gen_op_iwmmxt_setpsr_nz();
1817 gen_op_iwmmxt_movq_wRn_M0(wrd);
1818 gen_op_iwmmxt_set_mup();
1819 gen_op_iwmmxt_set_cup();
1820 break;
1821 case 0x111: /* TMRC */
1822 if (insn & 0xf)
1823 return 1;
1824 rd = (insn >> 12) & 0xf;
1825 wrd = (insn >> 16) & 0xf;
1826 tmp = iwmmxt_load_creg(wrd);
1827 store_reg(s, rd, tmp);
1828 break;
1829 case 0x300: /* WANDN */
1830 wrd = (insn >> 12) & 0xf;
1831 rd0 = (insn >> 0) & 0xf;
1832 rd1 = (insn >> 16) & 0xf;
1833 gen_op_iwmmxt_movq_M0_wRn(rd0);
1834 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1835 gen_op_iwmmxt_andq_M0_wRn(rd1);
1836 gen_op_iwmmxt_setpsr_nz();
1837 gen_op_iwmmxt_movq_wRn_M0(wrd);
1838 gen_op_iwmmxt_set_mup();
1839 gen_op_iwmmxt_set_cup();
1840 break;
1841 case 0x200: /* WAND */
1842 wrd = (insn >> 12) & 0xf;
1843 rd0 = (insn >> 0) & 0xf;
1844 rd1 = (insn >> 16) & 0xf;
1845 gen_op_iwmmxt_movq_M0_wRn(rd0);
1846 gen_op_iwmmxt_andq_M0_wRn(rd1);
1847 gen_op_iwmmxt_setpsr_nz();
1848 gen_op_iwmmxt_movq_wRn_M0(wrd);
1849 gen_op_iwmmxt_set_mup();
1850 gen_op_iwmmxt_set_cup();
1851 break;
1852 case 0x810: case 0xa10: /* WMADD */
1853 wrd = (insn >> 12) & 0xf;
1854 rd0 = (insn >> 0) & 0xf;
1855 rd1 = (insn >> 16) & 0xf;
1856 gen_op_iwmmxt_movq_M0_wRn(rd0);
1857 if (insn & (1 << 21))
1858 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1859 else
1860 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1861 gen_op_iwmmxt_movq_wRn_M0(wrd);
1862 gen_op_iwmmxt_set_mup();
1863 break;
1864 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1865 wrd = (insn >> 12) & 0xf;
1866 rd0 = (insn >> 16) & 0xf;
1867 rd1 = (insn >> 0) & 0xf;
1868 gen_op_iwmmxt_movq_M0_wRn(rd0);
1869 switch ((insn >> 22) & 3) {
1870 case 0:
1871 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1872 break;
1873 case 1:
1874 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1875 break;
1876 case 2:
1877 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1878 break;
1879 case 3:
1880 return 1;
1881 }
1882 gen_op_iwmmxt_movq_wRn_M0(wrd);
1883 gen_op_iwmmxt_set_mup();
1884 gen_op_iwmmxt_set_cup();
1885 break;
1886 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1887 wrd = (insn >> 12) & 0xf;
1888 rd0 = (insn >> 16) & 0xf;
1889 rd1 = (insn >> 0) & 0xf;
1890 gen_op_iwmmxt_movq_M0_wRn(rd0);
1891 switch ((insn >> 22) & 3) {
1892 case 0:
1893 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1894 break;
1895 case 1:
1896 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1897 break;
1898 case 2:
1899 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1900 break;
1901 case 3:
1902 return 1;
1903 }
1904 gen_op_iwmmxt_movq_wRn_M0(wrd);
1905 gen_op_iwmmxt_set_mup();
1906 gen_op_iwmmxt_set_cup();
1907 break;
1908 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1909 wrd = (insn >> 12) & 0xf;
1910 rd0 = (insn >> 16) & 0xf;
1911 rd1 = (insn >> 0) & 0xf;
1912 gen_op_iwmmxt_movq_M0_wRn(rd0);
1913 if (insn & (1 << 22))
1914 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1915 else
1916 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1917 if (!(insn & (1 << 20)))
1918 gen_op_iwmmxt_addl_M0_wRn(wrd);
1919 gen_op_iwmmxt_movq_wRn_M0(wrd);
1920 gen_op_iwmmxt_set_mup();
1921 break;
1922 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1923 wrd = (insn >> 12) & 0xf;
1924 rd0 = (insn >> 16) & 0xf;
1925 rd1 = (insn >> 0) & 0xf;
1926 gen_op_iwmmxt_movq_M0_wRn(rd0);
1927 if (insn & (1 << 21)) {
1928 if (insn & (1 << 20))
1929 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1930 else
1931 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1932 } else {
1933 if (insn & (1 << 20))
1934 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1935 else
1936 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1937 }
1938 gen_op_iwmmxt_movq_wRn_M0(wrd);
1939 gen_op_iwmmxt_set_mup();
1940 break;
1941 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1942 wrd = (insn >> 12) & 0xf;
1943 rd0 = (insn >> 16) & 0xf;
1944 rd1 = (insn >> 0) & 0xf;
1945 gen_op_iwmmxt_movq_M0_wRn(rd0);
1946 if (insn & (1 << 21))
1947 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1948 else
1949 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1950 if (!(insn & (1 << 20))) {
1951 iwmmxt_load_reg(cpu_V1, wrd);
1952 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1953 }
1954 gen_op_iwmmxt_movq_wRn_M0(wrd);
1955 gen_op_iwmmxt_set_mup();
1956 break;
1957 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1958 wrd = (insn >> 12) & 0xf;
1959 rd0 = (insn >> 16) & 0xf;
1960 rd1 = (insn >> 0) & 0xf;
1961 gen_op_iwmmxt_movq_M0_wRn(rd0);
1962 switch ((insn >> 22) & 3) {
1963 case 0:
1964 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1965 break;
1966 case 1:
1967 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1968 break;
1969 case 2:
1970 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1971 break;
1972 case 3:
1973 return 1;
1974 }
1975 gen_op_iwmmxt_movq_wRn_M0(wrd);
1976 gen_op_iwmmxt_set_mup();
1977 gen_op_iwmmxt_set_cup();
1978 break;
1979 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1980 wrd = (insn >> 12) & 0xf;
1981 rd0 = (insn >> 16) & 0xf;
1982 rd1 = (insn >> 0) & 0xf;
1983 gen_op_iwmmxt_movq_M0_wRn(rd0);
1984 if (insn & (1 << 22)) {
1985 if (insn & (1 << 20))
1986 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1987 else
1988 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1989 } else {
1990 if (insn & (1 << 20))
1991 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1992 else
1993 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1994 }
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2000 wrd = (insn >> 12) & 0xf;
2001 rd0 = (insn >> 16) & 0xf;
2002 rd1 = (insn >> 0) & 0xf;
2003 gen_op_iwmmxt_movq_M0_wRn(rd0);
2004 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2005 tcg_gen_andi_i32(tmp, tmp, 7);
2006 iwmmxt_load_reg(cpu_V1, rd1);
2007 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2008 tcg_temp_free_i32(tmp);
2009 gen_op_iwmmxt_movq_wRn_M0(wrd);
2010 gen_op_iwmmxt_set_mup();
2011 break;
2012 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2013 if (((insn >> 6) & 3) == 3)
2014 return 1;
2015 rd = (insn >> 12) & 0xf;
2016 wrd = (insn >> 16) & 0xf;
2017 tmp = load_reg(s, rd);
2018 gen_op_iwmmxt_movq_M0_wRn(wrd);
2019 switch ((insn >> 6) & 3) {
2020 case 0:
2021 tmp2 = tcg_const_i32(0xff);
2022 tmp3 = tcg_const_i32((insn & 7) << 3);
2023 break;
2024 case 1:
2025 tmp2 = tcg_const_i32(0xffff);
2026 tmp3 = tcg_const_i32((insn & 3) << 4);
2027 break;
2028 case 2:
2029 tmp2 = tcg_const_i32(0xffffffff);
2030 tmp3 = tcg_const_i32((insn & 1) << 5);
2031 break;
2032 default:
2033 TCGV_UNUSED_I32(tmp2);
2034 TCGV_UNUSED_I32(tmp3);
2035 }
2036 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2037 tcg_temp_free_i32(tmp3);
2038 tcg_temp_free_i32(tmp2);
2039 tcg_temp_free_i32(tmp);
2040 gen_op_iwmmxt_movq_wRn_M0(wrd);
2041 gen_op_iwmmxt_set_mup();
2042 break;
2043 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2044 rd = (insn >> 12) & 0xf;
2045 wrd = (insn >> 16) & 0xf;
2046 if (rd == 15 || ((insn >> 22) & 3) == 3)
2047 return 1;
2048 gen_op_iwmmxt_movq_M0_wRn(wrd);
2049 tmp = tcg_temp_new_i32();
2050 switch ((insn >> 22) & 3) {
2051 case 0:
2052 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2053 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2054 if (insn & 8) {
2055 tcg_gen_ext8s_i32(tmp, tmp);
2056 } else {
2057 tcg_gen_andi_i32(tmp, tmp, 0xff);
2058 }
2059 break;
2060 case 1:
2061 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2062 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2063 if (insn & 8) {
2064 tcg_gen_ext16s_i32(tmp, tmp);
2065 } else {
2066 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2067 }
2068 break;
2069 case 2:
2070 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2071 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2072 break;
2073 }
2074 store_reg(s, rd, tmp);
2075 break;
2076 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2077 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2078 return 1;
2079 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2080 switch ((insn >> 22) & 3) {
2081 case 0:
2082 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2083 break;
2084 case 1:
2085 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2086 break;
2087 case 2:
2088 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2089 break;
2090 }
2091 tcg_gen_shli_i32(tmp, tmp, 28);
2092 gen_set_nzcv(tmp);
2093 tcg_temp_free_i32(tmp);
2094 break;
2095 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2096 if (((insn >> 6) & 3) == 3)
2097 return 1;
2098 rd = (insn >> 12) & 0xf;
2099 wrd = (insn >> 16) & 0xf;
2100 tmp = load_reg(s, rd);
2101 switch ((insn >> 6) & 3) {
2102 case 0:
2103 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2104 break;
2105 case 1:
2106 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2107 break;
2108 case 2:
2109 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2110 break;
2111 }
2112 tcg_temp_free_i32(tmp);
2113 gen_op_iwmmxt_movq_wRn_M0(wrd);
2114 gen_op_iwmmxt_set_mup();
2115 break;
2116 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2117 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2118 return 1;
2119 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2120 tmp2 = tcg_temp_new_i32();
2121 tcg_gen_mov_i32(tmp2, tmp);
2122 switch ((insn >> 22) & 3) {
2123 case 0:
2124 for (i = 0; i < 7; i ++) {
2125 tcg_gen_shli_i32(tmp2, tmp2, 4);
2126 tcg_gen_and_i32(tmp, tmp, tmp2);
2127 }
2128 break;
2129 case 1:
2130 for (i = 0; i < 3; i ++) {
2131 tcg_gen_shli_i32(tmp2, tmp2, 8);
2132 tcg_gen_and_i32(tmp, tmp, tmp2);
2133 }
2134 break;
2135 case 2:
2136 tcg_gen_shli_i32(tmp2, tmp2, 16);
2137 tcg_gen_and_i32(tmp, tmp, tmp2);
2138 break;
2139 }
2140 gen_set_nzcv(tmp);
2141 tcg_temp_free_i32(tmp2);
2142 tcg_temp_free_i32(tmp);
2143 break;
2144 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2145 wrd = (insn >> 12) & 0xf;
2146 rd0 = (insn >> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
2148 switch ((insn >> 22) & 3) {
2149 case 0:
2150 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2151 break;
2152 case 1:
2153 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2154 break;
2155 case 2:
2156 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2157 break;
2158 case 3:
2159 return 1;
2160 }
2161 gen_op_iwmmxt_movq_wRn_M0(wrd);
2162 gen_op_iwmmxt_set_mup();
2163 break;
2164 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2165 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2166 return 1;
2167 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2168 tmp2 = tcg_temp_new_i32();
2169 tcg_gen_mov_i32(tmp2, tmp);
2170 switch ((insn >> 22) & 3) {
2171 case 0:
2172 for (i = 0; i < 7; i ++) {
2173 tcg_gen_shli_i32(tmp2, tmp2, 4);
2174 tcg_gen_or_i32(tmp, tmp, tmp2);
2175 }
2176 break;
2177 case 1:
2178 for (i = 0; i < 3; i ++) {
2179 tcg_gen_shli_i32(tmp2, tmp2, 8);
2180 tcg_gen_or_i32(tmp, tmp, tmp2);
2181 }
2182 break;
2183 case 2:
2184 tcg_gen_shli_i32(tmp2, tmp2, 16);
2185 tcg_gen_or_i32(tmp, tmp, tmp2);
2186 break;
2187 }
2188 gen_set_nzcv(tmp);
2189 tcg_temp_free_i32(tmp2);
2190 tcg_temp_free_i32(tmp);
2191 break;
2192 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2193 rd = (insn >> 12) & 0xf;
2194 rd0 = (insn >> 16) & 0xf;
2195 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2196 return 1;
2197 gen_op_iwmmxt_movq_M0_wRn(rd0);
2198 tmp = tcg_temp_new_i32();
2199 switch ((insn >> 22) & 3) {
2200 case 0:
2201 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2202 break;
2203 case 1:
2204 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2205 break;
2206 case 2:
2207 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2208 break;
2209 }
2210 store_reg(s, rd, tmp);
2211 break;
2212 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2213 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2214 wrd = (insn >> 12) & 0xf;
2215 rd0 = (insn >> 16) & 0xf;
2216 rd1 = (insn >> 0) & 0xf;
2217 gen_op_iwmmxt_movq_M0_wRn(rd0);
2218 switch ((insn >> 22) & 3) {
2219 case 0:
2220 if (insn & (1 << 21))
2221 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2222 else
2223 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2224 break;
2225 case 1:
2226 if (insn & (1 << 21))
2227 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2228 else
2229 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2230 break;
2231 case 2:
2232 if (insn & (1 << 21))
2233 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2234 else
2235 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2236 break;
2237 case 3:
2238 return 1;
2239 }
2240 gen_op_iwmmxt_movq_wRn_M0(wrd);
2241 gen_op_iwmmxt_set_mup();
2242 gen_op_iwmmxt_set_cup();
2243 break;
2244 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2245 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2246 wrd = (insn >> 12) & 0xf;
2247 rd0 = (insn >> 16) & 0xf;
2248 gen_op_iwmmxt_movq_M0_wRn(rd0);
2249 switch ((insn >> 22) & 3) {
2250 case 0:
2251 if (insn & (1 << 21))
2252 gen_op_iwmmxt_unpacklsb_M0();
2253 else
2254 gen_op_iwmmxt_unpacklub_M0();
2255 break;
2256 case 1:
2257 if (insn & (1 << 21))
2258 gen_op_iwmmxt_unpacklsw_M0();
2259 else
2260 gen_op_iwmmxt_unpackluw_M0();
2261 break;
2262 case 2:
2263 if (insn & (1 << 21))
2264 gen_op_iwmmxt_unpacklsl_M0();
2265 else
2266 gen_op_iwmmxt_unpacklul_M0();
2267 break;
2268 case 3:
2269 return 1;
2270 }
2271 gen_op_iwmmxt_movq_wRn_M0(wrd);
2272 gen_op_iwmmxt_set_mup();
2273 gen_op_iwmmxt_set_cup();
2274 break;
2275 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2276 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2277 wrd = (insn >> 12) & 0xf;
2278 rd0 = (insn >> 16) & 0xf;
2279 gen_op_iwmmxt_movq_M0_wRn(rd0);
2280 switch ((insn >> 22) & 3) {
2281 case 0:
2282 if (insn & (1 << 21))
2283 gen_op_iwmmxt_unpackhsb_M0();
2284 else
2285 gen_op_iwmmxt_unpackhub_M0();
2286 break;
2287 case 1:
2288 if (insn & (1 << 21))
2289 gen_op_iwmmxt_unpackhsw_M0();
2290 else
2291 gen_op_iwmmxt_unpackhuw_M0();
2292 break;
2293 case 2:
2294 if (insn & (1 << 21))
2295 gen_op_iwmmxt_unpackhsl_M0();
2296 else
2297 gen_op_iwmmxt_unpackhul_M0();
2298 break;
2299 case 3:
2300 return 1;
2301 }
2302 gen_op_iwmmxt_movq_wRn_M0(wrd);
2303 gen_op_iwmmxt_set_mup();
2304 gen_op_iwmmxt_set_cup();
2305 break;
2306 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2307 case 0x214: case 0x614: case 0xa14: case 0xe14:
2308 if (((insn >> 22) & 3) == 0)
2309 return 1;
2310 wrd = (insn >> 12) & 0xf;
2311 rd0 = (insn >> 16) & 0xf;
2312 gen_op_iwmmxt_movq_M0_wRn(rd0);
2313 tmp = tcg_temp_new_i32();
2314 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2315 tcg_temp_free_i32(tmp);
2316 return 1;
2317 }
2318 switch ((insn >> 22) & 3) {
2319 case 1:
2320 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2321 break;
2322 case 2:
2323 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2324 break;
2325 case 3:
2326 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2327 break;
2328 }
2329 tcg_temp_free_i32(tmp);
2330 gen_op_iwmmxt_movq_wRn_M0(wrd);
2331 gen_op_iwmmxt_set_mup();
2332 gen_op_iwmmxt_set_cup();
2333 break;
2334 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2335 case 0x014: case 0x414: case 0x814: case 0xc14:
2336 if (((insn >> 22) & 3) == 0)
2337 return 1;
2338 wrd = (insn >> 12) & 0xf;
2339 rd0 = (insn >> 16) & 0xf;
2340 gen_op_iwmmxt_movq_M0_wRn(rd0);
2341 tmp = tcg_temp_new_i32();
2342 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2343 tcg_temp_free_i32(tmp);
2344 return 1;
2345 }
2346 switch ((insn >> 22) & 3) {
2347 case 1:
2348 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2349 break;
2350 case 2:
2351 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2352 break;
2353 case 3:
2354 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2355 break;
2356 }
2357 tcg_temp_free_i32(tmp);
2358 gen_op_iwmmxt_movq_wRn_M0(wrd);
2359 gen_op_iwmmxt_set_mup();
2360 gen_op_iwmmxt_set_cup();
2361 break;
2362 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2363 case 0x114: case 0x514: case 0x914: case 0xd14:
2364 if (((insn >> 22) & 3) == 0)
2365 return 1;
2366 wrd = (insn >> 12) & 0xf;
2367 rd0 = (insn >> 16) & 0xf;
2368 gen_op_iwmmxt_movq_M0_wRn(rd0);
2369 tmp = tcg_temp_new_i32();
2370 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2371 tcg_temp_free_i32(tmp);
2372 return 1;
2373 }
2374 switch ((insn >> 22) & 3) {
2375 case 1:
2376 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2377 break;
2378 case 2:
2379 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2380 break;
2381 case 3:
2382 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2383 break;
2384 }
2385 tcg_temp_free_i32(tmp);
2386 gen_op_iwmmxt_movq_wRn_M0(wrd);
2387 gen_op_iwmmxt_set_mup();
2388 gen_op_iwmmxt_set_cup();
2389 break;
2390 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2391 case 0x314: case 0x714: case 0xb14: case 0xf14:
2392 if (((insn >> 22) & 3) == 0)
2393 return 1;
2394 wrd = (insn >> 12) & 0xf;
2395 rd0 = (insn >> 16) & 0xf;
2396 gen_op_iwmmxt_movq_M0_wRn(rd0);
2397 tmp = tcg_temp_new_i32();
2398 switch ((insn >> 22) & 3) {
2399 case 1:
2400 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2401 tcg_temp_free_i32(tmp);
2402 return 1;
2403 }
2404 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2405 break;
2406 case 2:
2407 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2408 tcg_temp_free_i32(tmp);
2409 return 1;
2410 }
2411 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2412 break;
2413 case 3:
2414 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2415 tcg_temp_free_i32(tmp);
2416 return 1;
2417 }
2418 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2419 break;
2420 }
2421 tcg_temp_free_i32(tmp);
2422 gen_op_iwmmxt_movq_wRn_M0(wrd);
2423 gen_op_iwmmxt_set_mup();
2424 gen_op_iwmmxt_set_cup();
2425 break;
2426 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2427 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2428 wrd = (insn >> 12) & 0xf;
2429 rd0 = (insn >> 16) & 0xf;
2430 rd1 = (insn >> 0) & 0xf;
2431 gen_op_iwmmxt_movq_M0_wRn(rd0);
2432 switch ((insn >> 22) & 3) {
2433 case 0:
2434 if (insn & (1 << 21))
2435 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2436 else
2437 gen_op_iwmmxt_minub_M0_wRn(rd1);
2438 break;
2439 case 1:
2440 if (insn & (1 << 21))
2441 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2442 else
2443 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2444 break;
2445 case 2:
2446 if (insn & (1 << 21))
2447 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2448 else
2449 gen_op_iwmmxt_minul_M0_wRn(rd1);
2450 break;
2451 case 3:
2452 return 1;
2453 }
2454 gen_op_iwmmxt_movq_wRn_M0(wrd);
2455 gen_op_iwmmxt_set_mup();
2456 break;
2457 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2458 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2459 wrd = (insn >> 12) & 0xf;
2460 rd0 = (insn >> 16) & 0xf;
2461 rd1 = (insn >> 0) & 0xf;
2462 gen_op_iwmmxt_movq_M0_wRn(rd0);
2463 switch ((insn >> 22) & 3) {
2464 case 0:
2465 if (insn & (1 << 21))
2466 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2467 else
2468 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2469 break;
2470 case 1:
2471 if (insn & (1 << 21))
2472 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2473 else
2474 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2475 break;
2476 case 2:
2477 if (insn & (1 << 21))
2478 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2479 else
2480 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2481 break;
2482 case 3:
2483 return 1;
2484 }
2485 gen_op_iwmmxt_movq_wRn_M0(wrd);
2486 gen_op_iwmmxt_set_mup();
2487 break;
2488 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2489 case 0x402: case 0x502: case 0x602: case 0x702:
2490 wrd = (insn >> 12) & 0xf;
2491 rd0 = (insn >> 16) & 0xf;
2492 rd1 = (insn >> 0) & 0xf;
2493 gen_op_iwmmxt_movq_M0_wRn(rd0);
2494 tmp = tcg_const_i32((insn >> 20) & 3);
2495 iwmmxt_load_reg(cpu_V1, rd1);
2496 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2497 tcg_temp_free_i32(tmp);
2498 gen_op_iwmmxt_movq_wRn_M0(wrd);
2499 gen_op_iwmmxt_set_mup();
2500 break;
2501 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2502 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2503 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2504 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2505 wrd = (insn >> 12) & 0xf;
2506 rd0 = (insn >> 16) & 0xf;
2507 rd1 = (insn >> 0) & 0xf;
2508 gen_op_iwmmxt_movq_M0_wRn(rd0);
2509 switch ((insn >> 20) & 0xf) {
2510 case 0x0:
2511 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2512 break;
2513 case 0x1:
2514 gen_op_iwmmxt_subub_M0_wRn(rd1);
2515 break;
2516 case 0x3:
2517 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2518 break;
2519 case 0x4:
2520 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2521 break;
2522 case 0x5:
2523 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2524 break;
2525 case 0x7:
2526 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2527 break;
2528 case 0x8:
2529 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2530 break;
2531 case 0x9:
2532 gen_op_iwmmxt_subul_M0_wRn(rd1);
2533 break;
2534 case 0xb:
2535 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2536 break;
2537 default:
2538 return 1;
2539 }
2540 gen_op_iwmmxt_movq_wRn_M0(wrd);
2541 gen_op_iwmmxt_set_mup();
2542 gen_op_iwmmxt_set_cup();
2543 break;
2544 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2545 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2546 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2547 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2548 wrd = (insn >> 12) & 0xf;
2549 rd0 = (insn >> 16) & 0xf;
2550 gen_op_iwmmxt_movq_M0_wRn(rd0);
2551 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2552 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2553 tcg_temp_free_i32(tmp);
2554 gen_op_iwmmxt_movq_wRn_M0(wrd);
2555 gen_op_iwmmxt_set_mup();
2556 gen_op_iwmmxt_set_cup();
2557 break;
2558 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2559 case 0x418: case 0x518: case 0x618: case 0x718:
2560 case 0x818: case 0x918: case 0xa18: case 0xb18:
2561 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2562 wrd = (insn >> 12) & 0xf;
2563 rd0 = (insn >> 16) & 0xf;
2564 rd1 = (insn >> 0) & 0xf;
2565 gen_op_iwmmxt_movq_M0_wRn(rd0);
2566 switch ((insn >> 20) & 0xf) {
2567 case 0x0:
2568 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2569 break;
2570 case 0x1:
2571 gen_op_iwmmxt_addub_M0_wRn(rd1);
2572 break;
2573 case 0x3:
2574 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2575 break;
2576 case 0x4:
2577 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2578 break;
2579 case 0x5:
2580 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2581 break;
2582 case 0x7:
2583 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2584 break;
2585 case 0x8:
2586 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2587 break;
2588 case 0x9:
2589 gen_op_iwmmxt_addul_M0_wRn(rd1);
2590 break;
2591 case 0xb:
2592 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2593 break;
2594 default:
2595 return 1;
2596 }
2597 gen_op_iwmmxt_movq_wRn_M0(wrd);
2598 gen_op_iwmmxt_set_mup();
2599 gen_op_iwmmxt_set_cup();
2600 break;
2601 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2602 case 0x408: case 0x508: case 0x608: case 0x708:
2603 case 0x808: case 0x908: case 0xa08: case 0xb08:
2604 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2605 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2606 return 1;
2607 wrd = (insn >> 12) & 0xf;
2608 rd0 = (insn >> 16) & 0xf;
2609 rd1 = (insn >> 0) & 0xf;
2610 gen_op_iwmmxt_movq_M0_wRn(rd0);
2611 switch ((insn >> 22) & 3) {
2612 case 1:
2613 if (insn & (1 << 21))
2614 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2615 else
2616 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2617 break;
2618 case 2:
2619 if (insn & (1 << 21))
2620 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2621 else
2622 gen_op_iwmmxt_packul_M0_wRn(rd1);
2623 break;
2624 case 3:
2625 if (insn & (1 << 21))
2626 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2627 else
2628 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2629 break;
2630 }
2631 gen_op_iwmmxt_movq_wRn_M0(wrd);
2632 gen_op_iwmmxt_set_mup();
2633 gen_op_iwmmxt_set_cup();
2634 break;
2635 case 0x201: case 0x203: case 0x205: case 0x207:
2636 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2637 case 0x211: case 0x213: case 0x215: case 0x217:
2638 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2639 wrd = (insn >> 5) & 0xf;
2640 rd0 = (insn >> 12) & 0xf;
2641 rd1 = (insn >> 0) & 0xf;
2642 if (rd0 == 0xf || rd1 == 0xf)
2643 return 1;
2644 gen_op_iwmmxt_movq_M0_wRn(wrd);
2645 tmp = load_reg(s, rd0);
2646 tmp2 = load_reg(s, rd1);
2647 switch ((insn >> 16) & 0xf) {
2648 case 0x0: /* TMIA */
2649 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2650 break;
2651 case 0x8: /* TMIAPH */
2652 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2653 break;
2654 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2655 if (insn & (1 << 16))
2656 tcg_gen_shri_i32(tmp, tmp, 16);
2657 if (insn & (1 << 17))
2658 tcg_gen_shri_i32(tmp2, tmp2, 16);
2659 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2660 break;
2661 default:
2662 tcg_temp_free_i32(tmp2);
2663 tcg_temp_free_i32(tmp);
2664 return 1;
2665 }
2666 tcg_temp_free_i32(tmp2);
2667 tcg_temp_free_i32(tmp);
2668 gen_op_iwmmxt_movq_wRn_M0(wrd);
2669 gen_op_iwmmxt_set_mup();
2670 break;
2671 default:
2672 return 1;
2673 }
2674
2675 return 0;
2676 }
2677
2678 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2679 (ie. an undefined instruction). */
2680 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2681 {
2682 int acc, rd0, rd1, rdhi, rdlo;
2683 TCGv_i32 tmp, tmp2;
2684
2685 if ((insn & 0x0ff00f10) == 0x0e200010) {
2686 /* Multiply with Internal Accumulate Format */
2687 rd0 = (insn >> 12) & 0xf;
2688 rd1 = insn & 0xf;
2689 acc = (insn >> 5) & 7;
2690
2691 if (acc != 0)
2692 return 1;
2693
2694 tmp = load_reg(s, rd0);
2695 tmp2 = load_reg(s, rd1);
2696 switch ((insn >> 16) & 0xf) {
2697 case 0x0: /* MIA */
2698 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2699 break;
2700 case 0x8: /* MIAPH */
2701 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2702 break;
2703 case 0xc: /* MIABB */
2704 case 0xd: /* MIABT */
2705 case 0xe: /* MIATB */
2706 case 0xf: /* MIATT */
2707 if (insn & (1 << 16))
2708 tcg_gen_shri_i32(tmp, tmp, 16);
2709 if (insn & (1 << 17))
2710 tcg_gen_shri_i32(tmp2, tmp2, 16);
2711 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2712 break;
2713 default:
2714 return 1;
2715 }
2716 tcg_temp_free_i32(tmp2);
2717 tcg_temp_free_i32(tmp);
2718
2719 gen_op_iwmmxt_movq_wRn_M0(acc);
2720 return 0;
2721 }
2722
2723 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2724 /* Internal Accumulator Access Format */
2725 rdhi = (insn >> 16) & 0xf;
2726 rdlo = (insn >> 12) & 0xf;
2727 acc = insn & 7;
2728
2729 if (acc != 0)
2730 return 1;
2731
2732 if (insn & ARM_CP_RW_BIT) { /* MRA */
2733 iwmmxt_load_reg(cpu_V0, acc);
2734 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2735 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2736 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2737 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2738 } else { /* MAR */
2739 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2740 iwmmxt_store_reg(cpu_V0, acc);
2741 }
2742 return 0;
2743 }
2744
2745 return 1;
2746 }
2747
2748 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2749 #define VFP_SREG(insn, bigbit, smallbit) \
2750 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2751 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2752 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2753 reg = (((insn) >> (bigbit)) & 0x0f) \
2754 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2755 } else { \
2756 if (insn & (1 << (smallbit))) \
2757 return 1; \
2758 reg = ((insn) >> (bigbit)) & 0x0f; \
2759 }} while (0)
2760
2761 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2762 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2763 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2764 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2765 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2766 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2767
2768 /* Move between integer and VFP cores. */
2769 static TCGv_i32 gen_vfp_mrs(void)
2770 {
2771 TCGv_i32 tmp = tcg_temp_new_i32();
2772 tcg_gen_mov_i32(tmp, cpu_F0s);
2773 return tmp;
2774 }
2775
2776 static void gen_vfp_msr(TCGv_i32 tmp)
2777 {
2778 tcg_gen_mov_i32(cpu_F0s, tmp);
2779 tcg_temp_free_i32(tmp);
2780 }
2781
2782 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2783 {
2784 TCGv_i32 tmp = tcg_temp_new_i32();
2785 if (shift)
2786 tcg_gen_shri_i32(var, var, shift);
2787 tcg_gen_ext8u_i32(var, var);
2788 tcg_gen_shli_i32(tmp, var, 8);
2789 tcg_gen_or_i32(var, var, tmp);
2790 tcg_gen_shli_i32(tmp, var, 16);
2791 tcg_gen_or_i32(var, var, tmp);
2792 tcg_temp_free_i32(tmp);
2793 }
2794
2795 static void gen_neon_dup_low16(TCGv_i32 var)
2796 {
2797 TCGv_i32 tmp = tcg_temp_new_i32();
2798 tcg_gen_ext16u_i32(var, var);
2799 tcg_gen_shli_i32(tmp, var, 16);
2800 tcg_gen_or_i32(var, var, tmp);
2801 tcg_temp_free_i32(tmp);
2802 }
2803
2804 static void gen_neon_dup_high16(TCGv_i32 var)
2805 {
2806 TCGv_i32 tmp = tcg_temp_new_i32();
2807 tcg_gen_andi_i32(var, var, 0xffff0000);
2808 tcg_gen_shri_i32(tmp, var, 16);
2809 tcg_gen_or_i32(var, var, tmp);
2810 tcg_temp_free_i32(tmp);
2811 }
2812
2813 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2814 {
2815 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2816 TCGv_i32 tmp = tcg_temp_new_i32();
2817 switch (size) {
2818 case 0:
2819 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2820 gen_neon_dup_u8(tmp, 0);
2821 break;
2822 case 1:
2823 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2824 gen_neon_dup_low16(tmp);
2825 break;
2826 case 2:
2827 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2828 break;
2829 default: /* Avoid compiler warnings. */
2830 abort();
2831 }
2832 return tmp;
2833 }
2834
2835 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2836 uint32_t dp)
2837 {
2838 uint32_t cc = extract32(insn, 20, 2);
2839
2840 if (dp) {
2841 TCGv_i64 frn, frm, dest;
2842 TCGv_i64 tmp, zero, zf, nf, vf;
2843
2844 zero = tcg_const_i64(0);
2845
2846 frn = tcg_temp_new_i64();
2847 frm = tcg_temp_new_i64();
2848 dest = tcg_temp_new_i64();
2849
2850 zf = tcg_temp_new_i64();
2851 nf = tcg_temp_new_i64();
2852 vf = tcg_temp_new_i64();
2853
2854 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2855 tcg_gen_ext_i32_i64(nf, cpu_NF);
2856 tcg_gen_ext_i32_i64(vf, cpu_VF);
2857
2858 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2859 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2860 switch (cc) {
2861 case 0: /* eq: Z */
2862 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2863 frn, frm);
2864 break;
2865 case 1: /* vs: V */
2866 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2867 frn, frm);
2868 break;
2869 case 2: /* ge: N == V -> N ^ V == 0 */
2870 tmp = tcg_temp_new_i64();
2871 tcg_gen_xor_i64(tmp, vf, nf);
2872 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2873 frn, frm);
2874 tcg_temp_free_i64(tmp);
2875 break;
2876 case 3: /* gt: !Z && N == V */
2877 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2878 frn, frm);
2879 tmp = tcg_temp_new_i64();
2880 tcg_gen_xor_i64(tmp, vf, nf);
2881 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2882 dest, frm);
2883 tcg_temp_free_i64(tmp);
2884 break;
2885 }
2886 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2887 tcg_temp_free_i64(frn);
2888 tcg_temp_free_i64(frm);
2889 tcg_temp_free_i64(dest);
2890
2891 tcg_temp_free_i64(zf);
2892 tcg_temp_free_i64(nf);
2893 tcg_temp_free_i64(vf);
2894
2895 tcg_temp_free_i64(zero);
2896 } else {
2897 TCGv_i32 frn, frm, dest;
2898 TCGv_i32 tmp, zero;
2899
2900 zero = tcg_const_i32(0);
2901
2902 frn = tcg_temp_new_i32();
2903 frm = tcg_temp_new_i32();
2904 dest = tcg_temp_new_i32();
2905 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2906 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2907 switch (cc) {
2908 case 0: /* eq: Z */
2909 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2910 frn, frm);
2911 break;
2912 case 1: /* vs: V */
2913 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2914 frn, frm);
2915 break;
2916 case 2: /* ge: N == V -> N ^ V == 0 */
2917 tmp = tcg_temp_new_i32();
2918 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2919 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2920 frn, frm);
2921 tcg_temp_free_i32(tmp);
2922 break;
2923 case 3: /* gt: !Z && N == V */
2924 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2925 frn, frm);
2926 tmp = tcg_temp_new_i32();
2927 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2928 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2929 dest, frm);
2930 tcg_temp_free_i32(tmp);
2931 break;
2932 }
2933 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2934 tcg_temp_free_i32(frn);
2935 tcg_temp_free_i32(frm);
2936 tcg_temp_free_i32(dest);
2937
2938 tcg_temp_free_i32(zero);
2939 }
2940
2941 return 0;
2942 }
2943
2944 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2945 uint32_t rm, uint32_t dp)
2946 {
2947 uint32_t vmin = extract32(insn, 6, 1);
2948 TCGv_ptr fpst = get_fpstatus_ptr(0);
2949
2950 if (dp) {
2951 TCGv_i64 frn, frm, dest;
2952
2953 frn = tcg_temp_new_i64();
2954 frm = tcg_temp_new_i64();
2955 dest = tcg_temp_new_i64();
2956
2957 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2958 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2959 if (vmin) {
2960 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
2961 } else {
2962 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
2963 }
2964 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2965 tcg_temp_free_i64(frn);
2966 tcg_temp_free_i64(frm);
2967 tcg_temp_free_i64(dest);
2968 } else {
2969 TCGv_i32 frn, frm, dest;
2970
2971 frn = tcg_temp_new_i32();
2972 frm = tcg_temp_new_i32();
2973 dest = tcg_temp_new_i32();
2974
2975 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2976 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2977 if (vmin) {
2978 gen_helper_vfp_minnums(dest, frn, frm, fpst);
2979 } else {
2980 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
2981 }
2982 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2983 tcg_temp_free_i32(frn);
2984 tcg_temp_free_i32(frm);
2985 tcg_temp_free_i32(dest);
2986 }
2987
2988 tcg_temp_free_ptr(fpst);
2989 return 0;
2990 }
2991
2992 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2993 int rounding)
2994 {
2995 TCGv_ptr fpst = get_fpstatus_ptr(0);
2996 TCGv_i32 tcg_rmode;
2997
2998 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2999 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3000
3001 if (dp) {
3002 TCGv_i64 tcg_op;
3003 TCGv_i64 tcg_res;
3004 tcg_op = tcg_temp_new_i64();
3005 tcg_res = tcg_temp_new_i64();
3006 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3007 gen_helper_rintd(tcg_res, tcg_op, fpst);
3008 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3009 tcg_temp_free_i64(tcg_op);
3010 tcg_temp_free_i64(tcg_res);
3011 } else {
3012 TCGv_i32 tcg_op;
3013 TCGv_i32 tcg_res;
3014 tcg_op = tcg_temp_new_i32();
3015 tcg_res = tcg_temp_new_i32();
3016 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3017 gen_helper_rints(tcg_res, tcg_op, fpst);
3018 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3019 tcg_temp_free_i32(tcg_op);
3020 tcg_temp_free_i32(tcg_res);
3021 }
3022
3023 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3024 tcg_temp_free_i32(tcg_rmode);
3025
3026 tcg_temp_free_ptr(fpst);
3027 return 0;
3028 }
3029
3030 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3031 int rounding)
3032 {
3033 bool is_signed = extract32(insn, 7, 1);
3034 TCGv_ptr fpst = get_fpstatus_ptr(0);
3035 TCGv_i32 tcg_rmode, tcg_shift;
3036
3037 tcg_shift = tcg_const_i32(0);
3038
3039 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3040 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3041
3042 if (dp) {
3043 TCGv_i64 tcg_double, tcg_res;
3044 TCGv_i32 tcg_tmp;
3045 /* Rd is encoded as a single precision register even when the source
3046 * is double precision.
3047 */
3048 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3049 tcg_double = tcg_temp_new_i64();
3050 tcg_res = tcg_temp_new_i64();
3051 tcg_tmp = tcg_temp_new_i32();
3052 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3053 if (is_signed) {
3054 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3055 } else {
3056 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3057 }
3058 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
3059 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3060 tcg_temp_free_i32(tcg_tmp);
3061 tcg_temp_free_i64(tcg_res);
3062 tcg_temp_free_i64(tcg_double);
3063 } else {
3064 TCGv_i32 tcg_single, tcg_res;
3065 tcg_single = tcg_temp_new_i32();
3066 tcg_res = tcg_temp_new_i32();
3067 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3068 if (is_signed) {
3069 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3070 } else {
3071 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3072 }
3073 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3074 tcg_temp_free_i32(tcg_res);
3075 tcg_temp_free_i32(tcg_single);
3076 }
3077
3078 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3079 tcg_temp_free_i32(tcg_rmode);
3080
3081 tcg_temp_free_i32(tcg_shift);
3082
3083 tcg_temp_free_ptr(fpst);
3084
3085 return 0;
3086 }
3087
3088 /* Table for converting the most common AArch32 encoding of
3089 * rounding mode to arm_fprounding order (which matches the
3090 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3091 */
3092 static const uint8_t fp_decode_rm[] = {
3093 FPROUNDING_TIEAWAY,
3094 FPROUNDING_TIEEVEN,
3095 FPROUNDING_POSINF,
3096 FPROUNDING_NEGINF,
3097 };
3098
3099 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
3100 {
3101 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3102
3103 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3104 return 1;
3105 }
3106
3107 if (dp) {
3108 VFP_DREG_D(rd, insn);
3109 VFP_DREG_N(rn, insn);
3110 VFP_DREG_M(rm, insn);
3111 } else {
3112 rd = VFP_SREG_D(insn);
3113 rn = VFP_SREG_N(insn);
3114 rm = VFP_SREG_M(insn);
3115 }
3116
3117 if ((insn & 0x0f800e50) == 0x0e000a00) {
3118 return handle_vsel(insn, rd, rn, rm, dp);
3119 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3120 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3121 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3122 /* VRINTA, VRINTN, VRINTP, VRINTM */
3123 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3124 return handle_vrint(insn, rd, rm, dp, rounding);
3125 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3126 /* VCVTA, VCVTN, VCVTP, VCVTM */
3127 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3128 return handle_vcvt(insn, rd, rm, dp, rounding);
3129 }
3130 return 1;
3131 }
3132
3133 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3134 (ie. an undefined instruction). */
3135 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3136 {
3137 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3138 int dp, veclen;
3139 TCGv_i32 addr;
3140 TCGv_i32 tmp;
3141 TCGv_i32 tmp2;
3142
3143 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3144 return 1;
3145 }
3146
3147 /* FIXME: this access check should not take precedence over UNDEF
3148 * for invalid encodings; we will generate incorrect syndrome information
3149 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3150 */
3151 if (s->fp_excp_el) {
3152 gen_exception_insn(s, 4, EXCP_UDEF,
3153 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
3154 return 0;
3155 }
3156
3157 if (!s->vfp_enabled) {
3158 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3159 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3160 return 1;
3161 rn = (insn >> 16) & 0xf;
3162 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3163 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3164 return 1;
3165 }
3166 }
3167
3168 if (extract32(insn, 28, 4) == 0xf) {
3169 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3170 * only used in v8 and above.
3171 */