virtio-scsi: suppress virtqueue kick during processing
[qemu.git] / target / arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "qemu/log.h"
29 #include "qemu/bitops.h"
30 #include "arm_ldst.h"
31 #include "exec/semihost.h"
32
33 #include "exec/helper-proto.h"
34 #include "exec/helper-gen.h"
35
36 #include "trace-tcg.h"
37 #include "exec/log.h"
38
39
40 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
42 /* currently all emulated v5 cores are also v5TE, so don't bother */
43 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
44 #define ENABLE_ARCH_5J 0
45 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
50
51 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
52
53 #include "translate.h"
54
55 #if defined(CONFIG_USER_ONLY)
56 #define IS_USER(s) 1
57 #else
58 #define IS_USER(s) (s->user)
59 #endif
60
61 TCGv_env cpu_env;
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
64 static TCGv_i32 cpu_R[16];
65 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66 TCGv_i64 cpu_exclusive_addr;
67 TCGv_i64 cpu_exclusive_val;
68
69 /* FIXME: These should be removed. */
70 static TCGv_i32 cpu_F0s, cpu_F1s;
71 static TCGv_i64 cpu_F0d, cpu_F1d;
72
73 #include "exec/gen-icount.h"
74
75 static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
79 /* initialize TCG globals. */
80 void arm_translate_init(void)
81 {
82 int i;
83
84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
85 tcg_ctx.tcg_env = cpu_env;
86
87 for (i = 0; i < 16; i++) {
88 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
89 offsetof(CPUARMState, regs[i]),
90 regnames[i]);
91 }
92 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
93 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
94 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
95 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
96
97 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
98 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
99 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
100 offsetof(CPUARMState, exclusive_val), "exclusive_val");
101
102 a64_translate_init();
103 }
104
105 static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
106 {
107 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
108 * insns:
109 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
110 * otherwise, access as if at PL0.
111 */
112 switch (s->mmu_idx) {
113 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
114 case ARMMMUIdx_S12NSE0:
115 case ARMMMUIdx_S12NSE1:
116 return ARMMMUIdx_S12NSE0;
117 case ARMMMUIdx_S1E3:
118 case ARMMMUIdx_S1SE0:
119 case ARMMMUIdx_S1SE1:
120 return ARMMMUIdx_S1SE0;
121 case ARMMMUIdx_S2NS:
122 default:
123 g_assert_not_reached();
124 }
125 }
126
127 static inline TCGv_i32 load_cpu_offset(int offset)
128 {
129 TCGv_i32 tmp = tcg_temp_new_i32();
130 tcg_gen_ld_i32(tmp, cpu_env, offset);
131 return tmp;
132 }
133
134 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
135
136 static inline void store_cpu_offset(TCGv_i32 var, int offset)
137 {
138 tcg_gen_st_i32(var, cpu_env, offset);
139 tcg_temp_free_i32(var);
140 }
141
142 #define store_cpu_field(var, name) \
143 store_cpu_offset(var, offsetof(CPUARMState, name))
144
145 /* Set a variable to the value of a CPU register. */
146 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
147 {
148 if (reg == 15) {
149 uint32_t addr;
150 /* normally, since we updated PC, we need only to add one insn */
151 if (s->thumb)
152 addr = (long)s->pc + 2;
153 else
154 addr = (long)s->pc + 4;
155 tcg_gen_movi_i32(var, addr);
156 } else {
157 tcg_gen_mov_i32(var, cpu_R[reg]);
158 }
159 }
160
161 /* Create a new temporary and set it to the value of a CPU register. */
162 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
163 {
164 TCGv_i32 tmp = tcg_temp_new_i32();
165 load_reg_var(s, tmp, reg);
166 return tmp;
167 }
168
169 /* Set a CPU register. The source must be a temporary and will be
170 marked as dead. */
171 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
172 {
173 if (reg == 15) {
174 /* In Thumb mode, we must ignore bit 0.
175 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
176 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
177 * We choose to ignore [1:0] in ARM mode for all architecture versions.
178 */
179 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
180 s->is_jmp = DISAS_JUMP;
181 }
182 tcg_gen_mov_i32(cpu_R[reg], var);
183 tcg_temp_free_i32(var);
184 }
185
186 /* Value extensions. */
187 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
188 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
189 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
190 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
191
192 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
193 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
194
195
196 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
197 {
198 TCGv_i32 tmp_mask = tcg_const_i32(mask);
199 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
200 tcg_temp_free_i32(tmp_mask);
201 }
202 /* Set NZCV flags from the high 4 bits of var. */
203 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
204
205 static void gen_exception_internal(int excp)
206 {
207 TCGv_i32 tcg_excp = tcg_const_i32(excp);
208
209 assert(excp_is_internal(excp));
210 gen_helper_exception_internal(cpu_env, tcg_excp);
211 tcg_temp_free_i32(tcg_excp);
212 }
213
214 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
215 {
216 TCGv_i32 tcg_excp = tcg_const_i32(excp);
217 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
218 TCGv_i32 tcg_el = tcg_const_i32(target_el);
219
220 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
221 tcg_syn, tcg_el);
222
223 tcg_temp_free_i32(tcg_el);
224 tcg_temp_free_i32(tcg_syn);
225 tcg_temp_free_i32(tcg_excp);
226 }
227
228 static void gen_ss_advance(DisasContext *s)
229 {
230 /* If the singlestep state is Active-not-pending, advance to
231 * Active-pending.
232 */
233 if (s->ss_active) {
234 s->pstate_ss = 0;
235 gen_helper_clear_pstate_ss(cpu_env);
236 }
237 }
238
239 static void gen_step_complete_exception(DisasContext *s)
240 {
241 /* We just completed step of an insn. Move from Active-not-pending
242 * to Active-pending, and then also take the swstep exception.
243 * This corresponds to making the (IMPDEF) choice to prioritize
244 * swstep exceptions over asynchronous exceptions taken to an exception
245 * level where debug is disabled. This choice has the advantage that
246 * we do not need to maintain internal state corresponding to the
247 * ISV/EX syndrome bits between completion of the step and generation
248 * of the exception, and our syndrome information is always correct.
249 */
250 gen_ss_advance(s);
251 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
252 default_exception_el(s));
253 s->is_jmp = DISAS_EXC;
254 }
255
256 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
257 {
258 TCGv_i32 tmp1 = tcg_temp_new_i32();
259 TCGv_i32 tmp2 = tcg_temp_new_i32();
260 tcg_gen_ext16s_i32(tmp1, a);
261 tcg_gen_ext16s_i32(tmp2, b);
262 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
263 tcg_temp_free_i32(tmp2);
264 tcg_gen_sari_i32(a, a, 16);
265 tcg_gen_sari_i32(b, b, 16);
266 tcg_gen_mul_i32(b, b, a);
267 tcg_gen_mov_i32(a, tmp1);
268 tcg_temp_free_i32(tmp1);
269 }
270
271 /* Byteswap each halfword. */
272 static void gen_rev16(TCGv_i32 var)
273 {
274 TCGv_i32 tmp = tcg_temp_new_i32();
275 tcg_gen_shri_i32(tmp, var, 8);
276 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
277 tcg_gen_shli_i32(var, var, 8);
278 tcg_gen_andi_i32(var, var, 0xff00ff00);
279 tcg_gen_or_i32(var, var, tmp);
280 tcg_temp_free_i32(tmp);
281 }
282
283 /* Byteswap low halfword and sign extend. */
284 static void gen_revsh(TCGv_i32 var)
285 {
286 tcg_gen_ext16u_i32(var, var);
287 tcg_gen_bswap16_i32(var, var);
288 tcg_gen_ext16s_i32(var, var);
289 }
290
291 /* Unsigned bitfield extract. */
292 static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
293 {
294 if (shift)
295 tcg_gen_shri_i32(var, var, shift);
296 tcg_gen_andi_i32(var, var, mask);
297 }
298
299 /* Signed bitfield extract. */
300 static void gen_sbfx(TCGv_i32 var, int shift, int width)
301 {
302 uint32_t signbit;
303
304 if (shift)
305 tcg_gen_sari_i32(var, var, shift);
306 if (shift + width < 32) {
307 signbit = 1u << (width - 1);
308 tcg_gen_andi_i32(var, var, (1u << width) - 1);
309 tcg_gen_xori_i32(var, var, signbit);
310 tcg_gen_subi_i32(var, var, signbit);
311 }
312 }
313
314 /* Return (b << 32) + a. Mark inputs as dead */
315 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
316 {
317 TCGv_i64 tmp64 = tcg_temp_new_i64();
318
319 tcg_gen_extu_i32_i64(tmp64, b);
320 tcg_temp_free_i32(b);
321 tcg_gen_shli_i64(tmp64, tmp64, 32);
322 tcg_gen_add_i64(a, tmp64, a);
323
324 tcg_temp_free_i64(tmp64);
325 return a;
326 }
327
328 /* Return (b << 32) - a. Mark inputs as dead. */
329 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
330 {
331 TCGv_i64 tmp64 = tcg_temp_new_i64();
332
333 tcg_gen_extu_i32_i64(tmp64, b);
334 tcg_temp_free_i32(b);
335 tcg_gen_shli_i64(tmp64, tmp64, 32);
336 tcg_gen_sub_i64(a, tmp64, a);
337
338 tcg_temp_free_i64(tmp64);
339 return a;
340 }
341
342 /* 32x32->64 multiply. Marks inputs as dead. */
343 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
344 {
345 TCGv_i32 lo = tcg_temp_new_i32();
346 TCGv_i32 hi = tcg_temp_new_i32();
347 TCGv_i64 ret;
348
349 tcg_gen_mulu2_i32(lo, hi, a, b);
350 tcg_temp_free_i32(a);
351 tcg_temp_free_i32(b);
352
353 ret = tcg_temp_new_i64();
354 tcg_gen_concat_i32_i64(ret, lo, hi);
355 tcg_temp_free_i32(lo);
356 tcg_temp_free_i32(hi);
357
358 return ret;
359 }
360
361 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
362 {
363 TCGv_i32 lo = tcg_temp_new_i32();
364 TCGv_i32 hi = tcg_temp_new_i32();
365 TCGv_i64 ret;
366
367 tcg_gen_muls2_i32(lo, hi, a, b);
368 tcg_temp_free_i32(a);
369 tcg_temp_free_i32(b);
370
371 ret = tcg_temp_new_i64();
372 tcg_gen_concat_i32_i64(ret, lo, hi);
373 tcg_temp_free_i32(lo);
374 tcg_temp_free_i32(hi);
375
376 return ret;
377 }
378
379 /* Swap low and high halfwords. */
380 static void gen_swap_half(TCGv_i32 var)
381 {
382 TCGv_i32 tmp = tcg_temp_new_i32();
383 tcg_gen_shri_i32(tmp, var, 16);
384 tcg_gen_shli_i32(var, var, 16);
385 tcg_gen_or_i32(var, var, tmp);
386 tcg_temp_free_i32(tmp);
387 }
388
389 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
390 tmp = (t0 ^ t1) & 0x8000;
391 t0 &= ~0x8000;
392 t1 &= ~0x8000;
393 t0 = (t0 + t1) ^ tmp;
394 */
395
396 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
397 {
398 TCGv_i32 tmp = tcg_temp_new_i32();
399 tcg_gen_xor_i32(tmp, t0, t1);
400 tcg_gen_andi_i32(tmp, tmp, 0x8000);
401 tcg_gen_andi_i32(t0, t0, ~0x8000);
402 tcg_gen_andi_i32(t1, t1, ~0x8000);
403 tcg_gen_add_i32(t0, t0, t1);
404 tcg_gen_xor_i32(t0, t0, tmp);
405 tcg_temp_free_i32(tmp);
406 tcg_temp_free_i32(t1);
407 }
408
409 /* Set CF to the top bit of var. */
410 static void gen_set_CF_bit31(TCGv_i32 var)
411 {
412 tcg_gen_shri_i32(cpu_CF, var, 31);
413 }
414
415 /* Set N and Z flags from var. */
416 static inline void gen_logic_CC(TCGv_i32 var)
417 {
418 tcg_gen_mov_i32(cpu_NF, var);
419 tcg_gen_mov_i32(cpu_ZF, var);
420 }
421
422 /* T0 += T1 + CF. */
423 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
424 {
425 tcg_gen_add_i32(t0, t0, t1);
426 tcg_gen_add_i32(t0, t0, cpu_CF);
427 }
428
429 /* dest = T0 + T1 + CF. */
430 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
431 {
432 tcg_gen_add_i32(dest, t0, t1);
433 tcg_gen_add_i32(dest, dest, cpu_CF);
434 }
435
436 /* dest = T0 - T1 + CF - 1. */
437 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
438 {
439 tcg_gen_sub_i32(dest, t0, t1);
440 tcg_gen_add_i32(dest, dest, cpu_CF);
441 tcg_gen_subi_i32(dest, dest, 1);
442 }
443
444 /* dest = T0 + T1. Compute C, N, V and Z flags */
445 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
446 {
447 TCGv_i32 tmp = tcg_temp_new_i32();
448 tcg_gen_movi_i32(tmp, 0);
449 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
450 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
451 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
452 tcg_gen_xor_i32(tmp, t0, t1);
453 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
454 tcg_temp_free_i32(tmp);
455 tcg_gen_mov_i32(dest, cpu_NF);
456 }
457
458 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
459 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
460 {
461 TCGv_i32 tmp = tcg_temp_new_i32();
462 if (TCG_TARGET_HAS_add2_i32) {
463 tcg_gen_movi_i32(tmp, 0);
464 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
465 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
466 } else {
467 TCGv_i64 q0 = tcg_temp_new_i64();
468 TCGv_i64 q1 = tcg_temp_new_i64();
469 tcg_gen_extu_i32_i64(q0, t0);
470 tcg_gen_extu_i32_i64(q1, t1);
471 tcg_gen_add_i64(q0, q0, q1);
472 tcg_gen_extu_i32_i64(q1, cpu_CF);
473 tcg_gen_add_i64(q0, q0, q1);
474 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
475 tcg_temp_free_i64(q0);
476 tcg_temp_free_i64(q1);
477 }
478 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
479 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
480 tcg_gen_xor_i32(tmp, t0, t1);
481 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
482 tcg_temp_free_i32(tmp);
483 tcg_gen_mov_i32(dest, cpu_NF);
484 }
485
486 /* dest = T0 - T1. Compute C, N, V and Z flags */
487 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
488 {
489 TCGv_i32 tmp;
490 tcg_gen_sub_i32(cpu_NF, t0, t1);
491 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
492 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
493 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
494 tmp = tcg_temp_new_i32();
495 tcg_gen_xor_i32(tmp, t0, t1);
496 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
497 tcg_temp_free_i32(tmp);
498 tcg_gen_mov_i32(dest, cpu_NF);
499 }
500
501 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
502 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
503 {
504 TCGv_i32 tmp = tcg_temp_new_i32();
505 tcg_gen_not_i32(tmp, t1);
506 gen_adc_CC(dest, t0, tmp);
507 tcg_temp_free_i32(tmp);
508 }
509
510 #define GEN_SHIFT(name) \
511 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
512 { \
513 TCGv_i32 tmp1, tmp2, tmp3; \
514 tmp1 = tcg_temp_new_i32(); \
515 tcg_gen_andi_i32(tmp1, t1, 0xff); \
516 tmp2 = tcg_const_i32(0); \
517 tmp3 = tcg_const_i32(0x1f); \
518 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
519 tcg_temp_free_i32(tmp3); \
520 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
521 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
522 tcg_temp_free_i32(tmp2); \
523 tcg_temp_free_i32(tmp1); \
524 }
525 GEN_SHIFT(shl)
526 GEN_SHIFT(shr)
527 #undef GEN_SHIFT
528
529 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
530 {
531 TCGv_i32 tmp1, tmp2;
532 tmp1 = tcg_temp_new_i32();
533 tcg_gen_andi_i32(tmp1, t1, 0xff);
534 tmp2 = tcg_const_i32(0x1f);
535 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
536 tcg_temp_free_i32(tmp2);
537 tcg_gen_sar_i32(dest, t0, tmp1);
538 tcg_temp_free_i32(tmp1);
539 }
540
541 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
542 {
543 TCGv_i32 c0 = tcg_const_i32(0);
544 TCGv_i32 tmp = tcg_temp_new_i32();
545 tcg_gen_neg_i32(tmp, src);
546 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
547 tcg_temp_free_i32(c0);
548 tcg_temp_free_i32(tmp);
549 }
550
551 static void shifter_out_im(TCGv_i32 var, int shift)
552 {
553 if (shift == 0) {
554 tcg_gen_andi_i32(cpu_CF, var, 1);
555 } else {
556 tcg_gen_shri_i32(cpu_CF, var, shift);
557 if (shift != 31) {
558 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
559 }
560 }
561 }
562
563 /* Shift by immediate. Includes special handling for shift == 0. */
564 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
565 int shift, int flags)
566 {
567 switch (shiftop) {
568 case 0: /* LSL */
569 if (shift != 0) {
570 if (flags)
571 shifter_out_im(var, 32 - shift);
572 tcg_gen_shli_i32(var, var, shift);
573 }
574 break;
575 case 1: /* LSR */
576 if (shift == 0) {
577 if (flags) {
578 tcg_gen_shri_i32(cpu_CF, var, 31);
579 }
580 tcg_gen_movi_i32(var, 0);
581 } else {
582 if (flags)
583 shifter_out_im(var, shift - 1);
584 tcg_gen_shri_i32(var, var, shift);
585 }
586 break;
587 case 2: /* ASR */
588 if (shift == 0)
589 shift = 32;
590 if (flags)
591 shifter_out_im(var, shift - 1);
592 if (shift == 32)
593 shift = 31;
594 tcg_gen_sari_i32(var, var, shift);
595 break;
596 case 3: /* ROR/RRX */
597 if (shift != 0) {
598 if (flags)
599 shifter_out_im(var, shift - 1);
600 tcg_gen_rotri_i32(var, var, shift); break;
601 } else {
602 TCGv_i32 tmp = tcg_temp_new_i32();
603 tcg_gen_shli_i32(tmp, cpu_CF, 31);
604 if (flags)
605 shifter_out_im(var, 0);
606 tcg_gen_shri_i32(var, var, 1);
607 tcg_gen_or_i32(var, var, tmp);
608 tcg_temp_free_i32(tmp);
609 }
610 }
611 };
612
613 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
614 TCGv_i32 shift, int flags)
615 {
616 if (flags) {
617 switch (shiftop) {
618 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
619 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
620 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
621 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
622 }
623 } else {
624 switch (shiftop) {
625 case 0:
626 gen_shl(var, var, shift);
627 break;
628 case 1:
629 gen_shr(var, var, shift);
630 break;
631 case 2:
632 gen_sar(var, var, shift);
633 break;
634 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
635 tcg_gen_rotr_i32(var, var, shift); break;
636 }
637 }
638 tcg_temp_free_i32(shift);
639 }
640
641 #define PAS_OP(pfx) \
642 switch (op2) { \
643 case 0: gen_pas_helper(glue(pfx,add16)); break; \
644 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
645 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
646 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
647 case 4: gen_pas_helper(glue(pfx,add8)); break; \
648 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
649 }
650 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
651 {
652 TCGv_ptr tmp;
653
654 switch (op1) {
655 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
656 case 1:
657 tmp = tcg_temp_new_ptr();
658 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
659 PAS_OP(s)
660 tcg_temp_free_ptr(tmp);
661 break;
662 case 5:
663 tmp = tcg_temp_new_ptr();
664 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
665 PAS_OP(u)
666 tcg_temp_free_ptr(tmp);
667 break;
668 #undef gen_pas_helper
669 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
670 case 2:
671 PAS_OP(q);
672 break;
673 case 3:
674 PAS_OP(sh);
675 break;
676 case 6:
677 PAS_OP(uq);
678 break;
679 case 7:
680 PAS_OP(uh);
681 break;
682 #undef gen_pas_helper
683 }
684 }
685 #undef PAS_OP
686
687 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
688 #define PAS_OP(pfx) \
689 switch (op1) { \
690 case 0: gen_pas_helper(glue(pfx,add8)); break; \
691 case 1: gen_pas_helper(glue(pfx,add16)); break; \
692 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
693 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
694 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
695 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
696 }
697 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
698 {
699 TCGv_ptr tmp;
700
701 switch (op2) {
702 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
703 case 0:
704 tmp = tcg_temp_new_ptr();
705 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
706 PAS_OP(s)
707 tcg_temp_free_ptr(tmp);
708 break;
709 case 4:
710 tmp = tcg_temp_new_ptr();
711 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
712 PAS_OP(u)
713 tcg_temp_free_ptr(tmp);
714 break;
715 #undef gen_pas_helper
716 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
717 case 1:
718 PAS_OP(q);
719 break;
720 case 2:
721 PAS_OP(sh);
722 break;
723 case 5:
724 PAS_OP(uq);
725 break;
726 case 6:
727 PAS_OP(uh);
728 break;
729 #undef gen_pas_helper
730 }
731 }
732 #undef PAS_OP
733
734 /*
735 * Generate a conditional based on ARM condition code cc.
736 * This is common between ARM and Aarch64 targets.
737 */
738 void arm_test_cc(DisasCompare *cmp, int cc)
739 {
740 TCGv_i32 value;
741 TCGCond cond;
742 bool global = true;
743
744 switch (cc) {
745 case 0: /* eq: Z */
746 case 1: /* ne: !Z */
747 cond = TCG_COND_EQ;
748 value = cpu_ZF;
749 break;
750
751 case 2: /* cs: C */
752 case 3: /* cc: !C */
753 cond = TCG_COND_NE;
754 value = cpu_CF;
755 break;
756
757 case 4: /* mi: N */
758 case 5: /* pl: !N */
759 cond = TCG_COND_LT;
760 value = cpu_NF;
761 break;
762
763 case 6: /* vs: V */
764 case 7: /* vc: !V */
765 cond = TCG_COND_LT;
766 value = cpu_VF;
767 break;
768
769 case 8: /* hi: C && !Z */
770 case 9: /* ls: !C || Z -> !(C && !Z) */
771 cond = TCG_COND_NE;
772 value = tcg_temp_new_i32();
773 global = false;
774 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
775 ZF is non-zero for !Z; so AND the two subexpressions. */
776 tcg_gen_neg_i32(value, cpu_CF);
777 tcg_gen_and_i32(value, value, cpu_ZF);
778 break;
779
780 case 10: /* ge: N == V -> N ^ V == 0 */
781 case 11: /* lt: N != V -> N ^ V != 0 */
782 /* Since we're only interested in the sign bit, == 0 is >= 0. */
783 cond = TCG_COND_GE;
784 value = tcg_temp_new_i32();
785 global = false;
786 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
787 break;
788
789 case 12: /* gt: !Z && N == V */
790 case 13: /* le: Z || N != V */
791 cond = TCG_COND_NE;
792 value = tcg_temp_new_i32();
793 global = false;
794 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
795 * the sign bit then AND with ZF to yield the result. */
796 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
797 tcg_gen_sari_i32(value, value, 31);
798 tcg_gen_andc_i32(value, cpu_ZF, value);
799 break;
800
801 case 14: /* always */
802 case 15: /* always */
803 /* Use the ALWAYS condition, which will fold early.
804 * It doesn't matter what we use for the value. */
805 cond = TCG_COND_ALWAYS;
806 value = cpu_ZF;
807 goto no_invert;
808
809 default:
810 fprintf(stderr, "Bad condition code 0x%x\n", cc);
811 abort();
812 }
813
814 if (cc & 1) {
815 cond = tcg_invert_cond(cond);
816 }
817
818 no_invert:
819 cmp->cond = cond;
820 cmp->value = value;
821 cmp->value_global = global;
822 }
823
824 void arm_free_cc(DisasCompare *cmp)
825 {
826 if (!cmp->value_global) {
827 tcg_temp_free_i32(cmp->value);
828 }
829 }
830
831 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
832 {
833 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
834 }
835
836 void arm_gen_test_cc(int cc, TCGLabel *label)
837 {
838 DisasCompare cmp;
839 arm_test_cc(&cmp, cc);
840 arm_jump_cc(&cmp, label);
841 arm_free_cc(&cmp);
842 }
843
844 static const uint8_t table_logic_cc[16] = {
845 1, /* and */
846 1, /* xor */
847 0, /* sub */
848 0, /* rsb */
849 0, /* add */
850 0, /* adc */
851 0, /* sbc */
852 0, /* rsc */
853 1, /* andl */
854 1, /* xorl */
855 0, /* cmp */
856 0, /* cmn */
857 1, /* orr */
858 1, /* mov */
859 1, /* bic */
860 1, /* mvn */
861 };
862
863 /* Set PC and Thumb state from an immediate address. */
864 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
865 {
866 TCGv_i32 tmp;
867
868 s->is_jmp = DISAS_JUMP;
869 if (s->thumb != (addr & 1)) {
870 tmp = tcg_temp_new_i32();
871 tcg_gen_movi_i32(tmp, addr & 1);
872 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
873 tcg_temp_free_i32(tmp);
874 }
875 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
876 }
877
878 /* Set PC and Thumb state from var. var is marked as dead. */
879 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
880 {
881 s->is_jmp = DISAS_JUMP;
882 tcg_gen_andi_i32(cpu_R[15], var, ~1);
883 tcg_gen_andi_i32(var, var, 1);
884 store_cpu_field(var, thumb);
885 }
886
887 /* Variant of store_reg which uses branch&exchange logic when storing
888 to r15 in ARM architecture v7 and above. The source must be a temporary
889 and will be marked as dead. */
890 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
891 {
892 if (reg == 15 && ENABLE_ARCH_7) {
893 gen_bx(s, var);
894 } else {
895 store_reg(s, reg, var);
896 }
897 }
898
899 /* Variant of store_reg which uses branch&exchange logic when storing
900 * to r15 in ARM architecture v5T and above. This is used for storing
901 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
902 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
903 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
904 {
905 if (reg == 15 && ENABLE_ARCH_5) {
906 gen_bx(s, var);
907 } else {
908 store_reg(s, reg, var);
909 }
910 }
911
912 #ifdef CONFIG_USER_ONLY
913 #define IS_USER_ONLY 1
914 #else
915 #define IS_USER_ONLY 0
916 #endif
917
918 /* Abstractions of "generate code to do a guest load/store for
919 * AArch32", where a vaddr is always 32 bits (and is zero
920 * extended if we're a 64 bit core) and data is also
921 * 32 bits unless specifically doing a 64 bit access.
922 * These functions work like tcg_gen_qemu_{ld,st}* except
923 * that the address argument is TCGv_i32 rather than TCGv.
924 */
925
926 static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
927 {
928 TCGv addr = tcg_temp_new();
929 tcg_gen_extu_i32_tl(addr, a32);
930
931 /* Not needed for user-mode BE32, where we use MO_BE instead. */
932 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
933 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
934 }
935 return addr;
936 }
937
938 static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
939 int index, TCGMemOp opc)
940 {
941 TCGv addr = gen_aa32_addr(s, a32, opc);
942 tcg_gen_qemu_ld_i32(val, addr, index, opc);
943 tcg_temp_free(addr);
944 }
945
946 static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
947 int index, TCGMemOp opc)
948 {
949 TCGv addr = gen_aa32_addr(s, a32, opc);
950 tcg_gen_qemu_st_i32(val, addr, index, opc);
951 tcg_temp_free(addr);
952 }
953
954 #define DO_GEN_LD(SUFF, OPC) \
955 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
956 TCGv_i32 a32, int index) \
957 { \
958 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
959 }
960
961 #define DO_GEN_ST(SUFF, OPC) \
962 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
963 TCGv_i32 a32, int index) \
964 { \
965 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
966 }
967
968 static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
969 {
970 /* Not needed for user-mode BE32, where we use MO_BE instead. */
971 if (!IS_USER_ONLY && s->sctlr_b) {
972 tcg_gen_rotri_i64(val, val, 32);
973 }
974 }
975
976 static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
977 int index, TCGMemOp opc)
978 {
979 TCGv addr = gen_aa32_addr(s, a32, opc);
980 tcg_gen_qemu_ld_i64(val, addr, index, opc);
981 gen_aa32_frob64(s, val);
982 tcg_temp_free(addr);
983 }
984
985 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
986 TCGv_i32 a32, int index)
987 {
988 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
989 }
990
991 static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
992 int index, TCGMemOp opc)
993 {
994 TCGv addr = gen_aa32_addr(s, a32, opc);
995
996 /* Not needed for user-mode BE32, where we use MO_BE instead. */
997 if (!IS_USER_ONLY && s->sctlr_b) {
998 TCGv_i64 tmp = tcg_temp_new_i64();
999 tcg_gen_rotri_i64(tmp, val, 32);
1000 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1001 tcg_temp_free_i64(tmp);
1002 } else {
1003 tcg_gen_qemu_st_i64(val, addr, index, opc);
1004 }
1005 tcg_temp_free(addr);
1006 }
1007
1008 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1009 TCGv_i32 a32, int index)
1010 {
1011 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1012 }
1013
1014 DO_GEN_LD(8s, MO_SB)
1015 DO_GEN_LD(8u, MO_UB)
1016 DO_GEN_LD(16s, MO_SW)
1017 DO_GEN_LD(16u, MO_UW)
1018 DO_GEN_LD(32u, MO_UL)
1019 DO_GEN_ST(8, MO_UB)
1020 DO_GEN_ST(16, MO_UW)
1021 DO_GEN_ST(32, MO_UL)
1022
1023 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
1024 {
1025 tcg_gen_movi_i32(cpu_R[15], val);
1026 }
1027
1028 static inline void gen_hvc(DisasContext *s, int imm16)
1029 {
1030 /* The pre HVC helper handles cases when HVC gets trapped
1031 * as an undefined insn by runtime configuration (ie before
1032 * the insn really executes).
1033 */
1034 gen_set_pc_im(s, s->pc - 4);
1035 gen_helper_pre_hvc(cpu_env);
1036 /* Otherwise we will treat this as a real exception which
1037 * happens after execution of the insn. (The distinction matters
1038 * for the PC value reported to the exception handler and also
1039 * for single stepping.)
1040 */
1041 s->svc_imm = imm16;
1042 gen_set_pc_im(s, s->pc);
1043 s->is_jmp = DISAS_HVC;
1044 }
1045
1046 static inline void gen_smc(DisasContext *s)
1047 {
1048 /* As with HVC, we may take an exception either before or after
1049 * the insn executes.
1050 */
1051 TCGv_i32 tmp;
1052
1053 gen_set_pc_im(s, s->pc - 4);
1054 tmp = tcg_const_i32(syn_aa32_smc());
1055 gen_helper_pre_smc(cpu_env, tmp);
1056 tcg_temp_free_i32(tmp);
1057 gen_set_pc_im(s, s->pc);
1058 s->is_jmp = DISAS_SMC;
1059 }
1060
1061 static inline void
1062 gen_set_condexec (DisasContext *s)
1063 {
1064 if (s->condexec_mask) {
1065 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1066 TCGv_i32 tmp = tcg_temp_new_i32();
1067 tcg_gen_movi_i32(tmp, val);
1068 store_cpu_field(tmp, condexec_bits);
1069 }
1070 }
1071
1072 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1073 {
1074 gen_set_condexec(s);
1075 gen_set_pc_im(s, s->pc - offset);
1076 gen_exception_internal(excp);
1077 s->is_jmp = DISAS_JUMP;
1078 }
1079
1080 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1081 int syn, uint32_t target_el)
1082 {
1083 gen_set_condexec(s);
1084 gen_set_pc_im(s, s->pc - offset);
1085 gen_exception(excp, syn, target_el);
1086 s->is_jmp = DISAS_JUMP;
1087 }
1088
1089 /* Force a TB lookup after an instruction that changes the CPU state. */
1090 static inline void gen_lookup_tb(DisasContext *s)
1091 {
1092 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1093 s->is_jmp = DISAS_JUMP;
1094 }
1095
1096 static inline void gen_hlt(DisasContext *s, int imm)
1097 {
1098 /* HLT. This has two purposes.
1099 * Architecturally, it is an external halting debug instruction.
1100 * Since QEMU doesn't implement external debug, we treat this as
1101 * it is required for halting debug disabled: it will UNDEF.
1102 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1103 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1104 * must trigger semihosting even for ARMv7 and earlier, where
1105 * HLT was an undefined encoding.
1106 * In system mode, we don't allow userspace access to
1107 * semihosting, to provide some semblance of security
1108 * (and for consistency with our 32-bit semihosting).
1109 */
1110 if (semihosting_enabled() &&
1111 #ifndef CONFIG_USER_ONLY
1112 s->current_el != 0 &&
1113 #endif
1114 (imm == (s->thumb ? 0x3c : 0xf000))) {
1115 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1116 return;
1117 }
1118
1119 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1120 default_exception_el(s));
1121 }
1122
1123 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1124 TCGv_i32 var)
1125 {
1126 int val, rm, shift, shiftop;
1127 TCGv_i32 offset;
1128
1129 if (!(insn & (1 << 25))) {
1130 /* immediate */
1131 val = insn & 0xfff;
1132 if (!(insn & (1 << 23)))
1133 val = -val;
1134 if (val != 0)
1135 tcg_gen_addi_i32(var, var, val);
1136 } else {
1137 /* shift/register */
1138 rm = (insn) & 0xf;
1139 shift = (insn >> 7) & 0x1f;
1140 shiftop = (insn >> 5) & 3;
1141 offset = load_reg(s, rm);
1142 gen_arm_shift_im(offset, shiftop, shift, 0);
1143 if (!(insn & (1 << 23)))
1144 tcg_gen_sub_i32(var, var, offset);
1145 else
1146 tcg_gen_add_i32(var, var, offset);
1147 tcg_temp_free_i32(offset);
1148 }
1149 }
1150
1151 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1152 int extra, TCGv_i32 var)
1153 {
1154 int val, rm;
1155 TCGv_i32 offset;
1156
1157 if (insn & (1 << 22)) {
1158 /* immediate */
1159 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1160 if (!(insn & (1 << 23)))
1161 val = -val;
1162 val += extra;
1163 if (val != 0)
1164 tcg_gen_addi_i32(var, var, val);
1165 } else {
1166 /* register */
1167 if (extra)
1168 tcg_gen_addi_i32(var, var, extra);
1169 rm = (insn) & 0xf;
1170 offset = load_reg(s, rm);
1171 if (!(insn & (1 << 23)))
1172 tcg_gen_sub_i32(var, var, offset);
1173 else
1174 tcg_gen_add_i32(var, var, offset);
1175 tcg_temp_free_i32(offset);
1176 }
1177 }
1178
1179 static TCGv_ptr get_fpstatus_ptr(int neon)
1180 {
1181 TCGv_ptr statusptr = tcg_temp_new_ptr();
1182 int offset;
1183 if (neon) {
1184 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1185 } else {
1186 offset = offsetof(CPUARMState, vfp.fp_status);
1187 }
1188 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1189 return statusptr;
1190 }
1191
1192 #define VFP_OP2(name) \
1193 static inline void gen_vfp_##name(int dp) \
1194 { \
1195 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1196 if (dp) { \
1197 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1198 } else { \
1199 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1200 } \
1201 tcg_temp_free_ptr(fpst); \
1202 }
1203
1204 VFP_OP2(add)
1205 VFP_OP2(sub)
1206 VFP_OP2(mul)
1207 VFP_OP2(div)
1208
1209 #undef VFP_OP2
1210
1211 static inline void gen_vfp_F1_mul(int dp)
1212 {
1213 /* Like gen_vfp_mul() but put result in F1 */
1214 TCGv_ptr fpst = get_fpstatus_ptr(0);
1215 if (dp) {
1216 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1217 } else {
1218 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1219 }
1220 tcg_temp_free_ptr(fpst);
1221 }
1222
1223 static inline void gen_vfp_F1_neg(int dp)
1224 {
1225 /* Like gen_vfp_neg() but put result in F1 */
1226 if (dp) {
1227 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1228 } else {
1229 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1230 }
1231 }
1232
1233 static inline void gen_vfp_abs(int dp)
1234 {
1235 if (dp)
1236 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1237 else
1238 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1239 }
1240
1241 static inline void gen_vfp_neg(int dp)
1242 {
1243 if (dp)
1244 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1245 else
1246 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1247 }
1248
1249 static inline void gen_vfp_sqrt(int dp)
1250 {
1251 if (dp)
1252 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1253 else
1254 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1255 }
1256
1257 static inline void gen_vfp_cmp(int dp)
1258 {
1259 if (dp)
1260 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1261 else
1262 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1263 }
1264
1265 static inline void gen_vfp_cmpe(int dp)
1266 {
1267 if (dp)
1268 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1269 else
1270 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1271 }
1272
1273 static inline void gen_vfp_F1_ld0(int dp)
1274 {
1275 if (dp)
1276 tcg_gen_movi_i64(cpu_F1d, 0);
1277 else
1278 tcg_gen_movi_i32(cpu_F1s, 0);
1279 }
1280
1281 #define VFP_GEN_ITOF(name) \
1282 static inline void gen_vfp_##name(int dp, int neon) \
1283 { \
1284 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1285 if (dp) { \
1286 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1287 } else { \
1288 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1289 } \
1290 tcg_temp_free_ptr(statusptr); \
1291 }
1292
1293 VFP_GEN_ITOF(uito)
1294 VFP_GEN_ITOF(sito)
1295 #undef VFP_GEN_ITOF
1296
1297 #define VFP_GEN_FTOI(name) \
1298 static inline void gen_vfp_##name(int dp, int neon) \
1299 { \
1300 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1301 if (dp) { \
1302 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1303 } else { \
1304 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1305 } \
1306 tcg_temp_free_ptr(statusptr); \
1307 }
1308
1309 VFP_GEN_FTOI(toui)
1310 VFP_GEN_FTOI(touiz)
1311 VFP_GEN_FTOI(tosi)
1312 VFP_GEN_FTOI(tosiz)
1313 #undef VFP_GEN_FTOI
1314
1315 #define VFP_GEN_FIX(name, round) \
1316 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1317 { \
1318 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1319 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1320 if (dp) { \
1321 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1322 statusptr); \
1323 } else { \
1324 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1325 statusptr); \
1326 } \
1327 tcg_temp_free_i32(tmp_shift); \
1328 tcg_temp_free_ptr(statusptr); \
1329 }
1330 VFP_GEN_FIX(tosh, _round_to_zero)
1331 VFP_GEN_FIX(tosl, _round_to_zero)
1332 VFP_GEN_FIX(touh, _round_to_zero)
1333 VFP_GEN_FIX(toul, _round_to_zero)
1334 VFP_GEN_FIX(shto, )
1335 VFP_GEN_FIX(slto, )
1336 VFP_GEN_FIX(uhto, )
1337 VFP_GEN_FIX(ulto, )
1338 #undef VFP_GEN_FIX
1339
1340 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1341 {
1342 if (dp) {
1343 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
1344 } else {
1345 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
1346 }
1347 }
1348
1349 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1350 {
1351 if (dp) {
1352 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
1353 } else {
1354 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
1355 }
1356 }
1357
1358 static inline long
1359 vfp_reg_offset (int dp, int reg)
1360 {
1361 if (dp)
1362 return offsetof(CPUARMState, vfp.regs[reg]);
1363 else if (reg & 1) {
1364 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1365 + offsetof(CPU_DoubleU, l.upper);
1366 } else {
1367 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1368 + offsetof(CPU_DoubleU, l.lower);
1369 }
1370 }
1371
1372 /* Return the offset of a 32-bit piece of a NEON register.
1373 zero is the least significant end of the register. */
1374 static inline long
1375 neon_reg_offset (int reg, int n)
1376 {
1377 int sreg;
1378 sreg = reg * 2 + n;
1379 return vfp_reg_offset(0, sreg);
1380 }
1381
1382 static TCGv_i32 neon_load_reg(int reg, int pass)
1383 {
1384 TCGv_i32 tmp = tcg_temp_new_i32();
1385 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1386 return tmp;
1387 }
1388
1389 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1390 {
1391 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1392 tcg_temp_free_i32(var);
1393 }
1394
1395 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1396 {
1397 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1398 }
1399
1400 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1401 {
1402 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1403 }
1404
1405 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1406 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1407 #define tcg_gen_st_f32 tcg_gen_st_i32
1408 #define tcg_gen_st_f64 tcg_gen_st_i64
1409
1410 static inline void gen_mov_F0_vreg(int dp, int reg)
1411 {
1412 if (dp)
1413 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1414 else
1415 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1416 }
1417
1418 static inline void gen_mov_F1_vreg(int dp, int reg)
1419 {
1420 if (dp)
1421 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1422 else
1423 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1424 }
1425
1426 static inline void gen_mov_vreg_F0(int dp, int reg)
1427 {
1428 if (dp)
1429 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1430 else
1431 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1432 }
1433
1434 #define ARM_CP_RW_BIT (1 << 20)
1435
1436 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1437 {
1438 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1439 }
1440
1441 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1442 {
1443 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1444 }
1445
1446 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1447 {
1448 TCGv_i32 var = tcg_temp_new_i32();
1449 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1450 return var;
1451 }
1452
1453 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1454 {
1455 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1456 tcg_temp_free_i32(var);
1457 }
1458
1459 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1460 {
1461 iwmmxt_store_reg(cpu_M0, rn);
1462 }
1463
1464 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1465 {
1466 iwmmxt_load_reg(cpu_M0, rn);
1467 }
1468
1469 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1470 {
1471 iwmmxt_load_reg(cpu_V1, rn);
1472 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1473 }
1474
1475 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1476 {
1477 iwmmxt_load_reg(cpu_V1, rn);
1478 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1479 }
1480
1481 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1482 {
1483 iwmmxt_load_reg(cpu_V1, rn);
1484 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1485 }
1486
1487 #define IWMMXT_OP(name) \
1488 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1489 { \
1490 iwmmxt_load_reg(cpu_V1, rn); \
1491 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1492 }
1493
1494 #define IWMMXT_OP_ENV(name) \
1495 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1496 { \
1497 iwmmxt_load_reg(cpu_V1, rn); \
1498 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1499 }
1500
1501 #define IWMMXT_OP_ENV_SIZE(name) \
1502 IWMMXT_OP_ENV(name##b) \
1503 IWMMXT_OP_ENV(name##w) \
1504 IWMMXT_OP_ENV(name##l)
1505
1506 #define IWMMXT_OP_ENV1(name) \
1507 static inline void gen_op_iwmmxt_##name##_M0(void) \
1508 { \
1509 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1510 }
1511
1512 IWMMXT_OP(maddsq)
1513 IWMMXT_OP(madduq)
1514 IWMMXT_OP(sadb)
1515 IWMMXT_OP(sadw)
1516 IWMMXT_OP(mulslw)
1517 IWMMXT_OP(mulshw)
1518 IWMMXT_OP(mululw)
1519 IWMMXT_OP(muluhw)
1520 IWMMXT_OP(macsw)
1521 IWMMXT_OP(macuw)
1522
1523 IWMMXT_OP_ENV_SIZE(unpackl)
1524 IWMMXT_OP_ENV_SIZE(unpackh)
1525
1526 IWMMXT_OP_ENV1(unpacklub)
1527 IWMMXT_OP_ENV1(unpackluw)
1528 IWMMXT_OP_ENV1(unpacklul)
1529 IWMMXT_OP_ENV1(unpackhub)
1530 IWMMXT_OP_ENV1(unpackhuw)
1531 IWMMXT_OP_ENV1(unpackhul)
1532 IWMMXT_OP_ENV1(unpacklsb)
1533 IWMMXT_OP_ENV1(unpacklsw)
1534 IWMMXT_OP_ENV1(unpacklsl)
1535 IWMMXT_OP_ENV1(unpackhsb)
1536 IWMMXT_OP_ENV1(unpackhsw)
1537 IWMMXT_OP_ENV1(unpackhsl)
1538
1539 IWMMXT_OP_ENV_SIZE(cmpeq)
1540 IWMMXT_OP_ENV_SIZE(cmpgtu)
1541 IWMMXT_OP_ENV_SIZE(cmpgts)
1542
1543 IWMMXT_OP_ENV_SIZE(mins)
1544 IWMMXT_OP_ENV_SIZE(minu)
1545 IWMMXT_OP_ENV_SIZE(maxs)
1546 IWMMXT_OP_ENV_SIZE(maxu)
1547
1548 IWMMXT_OP_ENV_SIZE(subn)
1549 IWMMXT_OP_ENV_SIZE(addn)
1550 IWMMXT_OP_ENV_SIZE(subu)
1551 IWMMXT_OP_ENV_SIZE(addu)
1552 IWMMXT_OP_ENV_SIZE(subs)
1553 IWMMXT_OP_ENV_SIZE(adds)
1554
1555 IWMMXT_OP_ENV(avgb0)
1556 IWMMXT_OP_ENV(avgb1)
1557 IWMMXT_OP_ENV(avgw0)
1558 IWMMXT_OP_ENV(avgw1)
1559
1560 IWMMXT_OP_ENV(packuw)
1561 IWMMXT_OP_ENV(packul)
1562 IWMMXT_OP_ENV(packuq)
1563 IWMMXT_OP_ENV(packsw)
1564 IWMMXT_OP_ENV(packsl)
1565 IWMMXT_OP_ENV(packsq)
1566
1567 static void gen_op_iwmmxt_set_mup(void)
1568 {
1569 TCGv_i32 tmp;
1570 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1571 tcg_gen_ori_i32(tmp, tmp, 2);
1572 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1573 }
1574
1575 static void gen_op_iwmmxt_set_cup(void)
1576 {
1577 TCGv_i32 tmp;
1578 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1579 tcg_gen_ori_i32(tmp, tmp, 1);
1580 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1581 }
1582
1583 static void gen_op_iwmmxt_setpsr_nz(void)
1584 {
1585 TCGv_i32 tmp = tcg_temp_new_i32();
1586 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1587 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1588 }
1589
1590 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1591 {
1592 iwmmxt_load_reg(cpu_V1, rn);
1593 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1594 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1595 }
1596
1597 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1598 TCGv_i32 dest)
1599 {
1600 int rd;
1601 uint32_t offset;
1602 TCGv_i32 tmp;
1603
1604 rd = (insn >> 16) & 0xf;
1605 tmp = load_reg(s, rd);
1606
1607 offset = (insn & 0xff) << ((insn >> 7) & 2);
1608 if (insn & (1 << 24)) {
1609 /* Pre indexed */
1610 if (insn & (1 << 23))
1611 tcg_gen_addi_i32(tmp, tmp, offset);
1612 else
1613 tcg_gen_addi_i32(tmp, tmp, -offset);
1614 tcg_gen_mov_i32(dest, tmp);
1615 if (insn & (1 << 21))
1616 store_reg(s, rd, tmp);
1617 else
1618 tcg_temp_free_i32(tmp);
1619 } else if (insn & (1 << 21)) {
1620 /* Post indexed */
1621 tcg_gen_mov_i32(dest, tmp);
1622 if (insn & (1 << 23))
1623 tcg_gen_addi_i32(tmp, tmp, offset);
1624 else
1625 tcg_gen_addi_i32(tmp, tmp, -offset);
1626 store_reg(s, rd, tmp);
1627 } else if (!(insn & (1 << 23)))
1628 return 1;
1629 return 0;
1630 }
1631
1632 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1633 {
1634 int rd = (insn >> 0) & 0xf;
1635 TCGv_i32 tmp;
1636
1637 if (insn & (1 << 8)) {
1638 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1639 return 1;
1640 } else {
1641 tmp = iwmmxt_load_creg(rd);
1642 }
1643 } else {
1644 tmp = tcg_temp_new_i32();
1645 iwmmxt_load_reg(cpu_V0, rd);
1646 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1647 }
1648 tcg_gen_andi_i32(tmp, tmp, mask);
1649 tcg_gen_mov_i32(dest, tmp);
1650 tcg_temp_free_i32(tmp);
1651 return 0;
1652 }
1653
1654 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1655 (ie. an undefined instruction). */
1656 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1657 {
1658 int rd, wrd;
1659 int rdhi, rdlo, rd0, rd1, i;
1660 TCGv_i32 addr;
1661 TCGv_i32 tmp, tmp2, tmp3;
1662
1663 if ((insn & 0x0e000e00) == 0x0c000000) {
1664 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1665 wrd = insn & 0xf;
1666 rdlo = (insn >> 12) & 0xf;
1667 rdhi = (insn >> 16) & 0xf;
1668 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1669 iwmmxt_load_reg(cpu_V0, wrd);
1670 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1671 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1672 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1673 } else { /* TMCRR */
1674 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1675 iwmmxt_store_reg(cpu_V0, wrd);
1676 gen_op_iwmmxt_set_mup();
1677 }
1678 return 0;
1679 }
1680
1681 wrd = (insn >> 12) & 0xf;
1682 addr = tcg_temp_new_i32();
1683 if (gen_iwmmxt_address(s, insn, addr)) {
1684 tcg_temp_free_i32(addr);
1685 return 1;
1686 }
1687 if (insn & ARM_CP_RW_BIT) {
1688 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1689 tmp = tcg_temp_new_i32();
1690 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1691 iwmmxt_store_creg(wrd, tmp);
1692 } else {
1693 i = 1;
1694 if (insn & (1 << 8)) {
1695 if (insn & (1 << 22)) { /* WLDRD */
1696 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1697 i = 0;
1698 } else { /* WLDRW wRd */
1699 tmp = tcg_temp_new_i32();
1700 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1701 }
1702 } else {
1703 tmp = tcg_temp_new_i32();
1704 if (insn & (1 << 22)) { /* WLDRH */
1705 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1706 } else { /* WLDRB */
1707 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1708 }
1709 }
1710 if (i) {
1711 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1712 tcg_temp_free_i32(tmp);
1713 }
1714 gen_op_iwmmxt_movq_wRn_M0(wrd);
1715 }
1716 } else {
1717 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1718 tmp = iwmmxt_load_creg(wrd);
1719 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1720 } else {
1721 gen_op_iwmmxt_movq_M0_wRn(wrd);
1722 tmp = tcg_temp_new_i32();
1723 if (insn & (1 << 8)) {
1724 if (insn & (1 << 22)) { /* WSTRD */
1725 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1726 } else { /* WSTRW wRd */
1727 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1728 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1729 }
1730 } else {
1731 if (insn & (1 << 22)) { /* WSTRH */
1732 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1733 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1734 } else { /* WSTRB */
1735 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1736 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1737 }
1738 }
1739 }
1740 tcg_temp_free_i32(tmp);
1741 }
1742 tcg_temp_free_i32(addr);
1743 return 0;
1744 }
1745
1746 if ((insn & 0x0f000000) != 0x0e000000)
1747 return 1;
1748
1749 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1750 case 0x000: /* WOR */
1751 wrd = (insn >> 12) & 0xf;
1752 rd0 = (insn >> 0) & 0xf;
1753 rd1 = (insn >> 16) & 0xf;
1754 gen_op_iwmmxt_movq_M0_wRn(rd0);
1755 gen_op_iwmmxt_orq_M0_wRn(rd1);
1756 gen_op_iwmmxt_setpsr_nz();
1757 gen_op_iwmmxt_movq_wRn_M0(wrd);
1758 gen_op_iwmmxt_set_mup();
1759 gen_op_iwmmxt_set_cup();
1760 break;
1761 case 0x011: /* TMCR */
1762 if (insn & 0xf)
1763 return 1;
1764 rd = (insn >> 12) & 0xf;
1765 wrd = (insn >> 16) & 0xf;
1766 switch (wrd) {
1767 case ARM_IWMMXT_wCID:
1768 case ARM_IWMMXT_wCASF:
1769 break;
1770 case ARM_IWMMXT_wCon:
1771 gen_op_iwmmxt_set_cup();
1772 /* Fall through. */
1773 case ARM_IWMMXT_wCSSF:
1774 tmp = iwmmxt_load_creg(wrd);
1775 tmp2 = load_reg(s, rd);
1776 tcg_gen_andc_i32(tmp, tmp, tmp2);
1777 tcg_temp_free_i32(tmp2);
1778 iwmmxt_store_creg(wrd, tmp);
1779 break;
1780 case ARM_IWMMXT_wCGR0:
1781 case ARM_IWMMXT_wCGR1:
1782 case ARM_IWMMXT_wCGR2:
1783 case ARM_IWMMXT_wCGR3:
1784 gen_op_iwmmxt_set_cup();
1785 tmp = load_reg(s, rd);
1786 iwmmxt_store_creg(wrd, tmp);
1787 break;
1788 default:
1789 return 1;
1790 }
1791 break;
1792 case 0x100: /* WXOR */
1793 wrd = (insn >> 12) & 0xf;
1794 rd0 = (insn >> 0) & 0xf;
1795 rd1 = (insn >> 16) & 0xf;
1796 gen_op_iwmmxt_movq_M0_wRn(rd0);
1797 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1798 gen_op_iwmmxt_setpsr_nz();
1799 gen_op_iwmmxt_movq_wRn_M0(wrd);
1800 gen_op_iwmmxt_set_mup();
1801 gen_op_iwmmxt_set_cup();
1802 break;
1803 case 0x111: /* TMRC */
1804 if (insn & 0xf)
1805 return 1;
1806 rd = (insn >> 12) & 0xf;
1807 wrd = (insn >> 16) & 0xf;
1808 tmp = iwmmxt_load_creg(wrd);
1809 store_reg(s, rd, tmp);
1810 break;
1811 case 0x300: /* WANDN */
1812 wrd = (insn >> 12) & 0xf;
1813 rd0 = (insn >> 0) & 0xf;
1814 rd1 = (insn >> 16) & 0xf;
1815 gen_op_iwmmxt_movq_M0_wRn(rd0);
1816 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1817 gen_op_iwmmxt_andq_M0_wRn(rd1);
1818 gen_op_iwmmxt_setpsr_nz();
1819 gen_op_iwmmxt_movq_wRn_M0(wrd);
1820 gen_op_iwmmxt_set_mup();
1821 gen_op_iwmmxt_set_cup();
1822 break;
1823 case 0x200: /* WAND */
1824 wrd = (insn >> 12) & 0xf;
1825 rd0 = (insn >> 0) & 0xf;
1826 rd1 = (insn >> 16) & 0xf;
1827 gen_op_iwmmxt_movq_M0_wRn(rd0);
1828 gen_op_iwmmxt_andq_M0_wRn(rd1);
1829 gen_op_iwmmxt_setpsr_nz();
1830 gen_op_iwmmxt_movq_wRn_M0(wrd);
1831 gen_op_iwmmxt_set_mup();
1832 gen_op_iwmmxt_set_cup();
1833 break;
1834 case 0x810: case 0xa10: /* WMADD */
1835 wrd = (insn >> 12) & 0xf;
1836 rd0 = (insn >> 0) & 0xf;
1837 rd1 = (insn >> 16) & 0xf;
1838 gen_op_iwmmxt_movq_M0_wRn(rd0);
1839 if (insn & (1 << 21))
1840 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1841 else
1842 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1843 gen_op_iwmmxt_movq_wRn_M0(wrd);
1844 gen_op_iwmmxt_set_mup();
1845 break;
1846 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1847 wrd = (insn >> 12) & 0xf;
1848 rd0 = (insn >> 16) & 0xf;
1849 rd1 = (insn >> 0) & 0xf;
1850 gen_op_iwmmxt_movq_M0_wRn(rd0);
1851 switch ((insn >> 22) & 3) {
1852 case 0:
1853 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1854 break;
1855 case 1:
1856 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1857 break;
1858 case 2:
1859 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1860 break;
1861 case 3:
1862 return 1;
1863 }
1864 gen_op_iwmmxt_movq_wRn_M0(wrd);
1865 gen_op_iwmmxt_set_mup();
1866 gen_op_iwmmxt_set_cup();
1867 break;
1868 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1869 wrd = (insn >> 12) & 0xf;
1870 rd0 = (insn >> 16) & 0xf;
1871 rd1 = (insn >> 0) & 0xf;
1872 gen_op_iwmmxt_movq_M0_wRn(rd0);
1873 switch ((insn >> 22) & 3) {
1874 case 0:
1875 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1876 break;
1877 case 1:
1878 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1879 break;
1880 case 2:
1881 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1882 break;
1883 case 3:
1884 return 1;
1885 }
1886 gen_op_iwmmxt_movq_wRn_M0(wrd);
1887 gen_op_iwmmxt_set_mup();
1888 gen_op_iwmmxt_set_cup();
1889 break;
1890 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1891 wrd = (insn >> 12) & 0xf;
1892 rd0 = (insn >> 16) & 0xf;
1893 rd1 = (insn >> 0) & 0xf;
1894 gen_op_iwmmxt_movq_M0_wRn(rd0);
1895 if (insn & (1 << 22))
1896 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1897 else
1898 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1899 if (!(insn & (1 << 20)))
1900 gen_op_iwmmxt_addl_M0_wRn(wrd);
1901 gen_op_iwmmxt_movq_wRn_M0(wrd);
1902 gen_op_iwmmxt_set_mup();
1903 break;
1904 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1905 wrd = (insn >> 12) & 0xf;
1906 rd0 = (insn >> 16) & 0xf;
1907 rd1 = (insn >> 0) & 0xf;
1908 gen_op_iwmmxt_movq_M0_wRn(rd0);
1909 if (insn & (1 << 21)) {
1910 if (insn & (1 << 20))
1911 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1912 else
1913 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1914 } else {
1915 if (insn & (1 << 20))
1916 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1917 else
1918 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1919 }
1920 gen_op_iwmmxt_movq_wRn_M0(wrd);
1921 gen_op_iwmmxt_set_mup();
1922 break;
1923 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1924 wrd = (insn >> 12) & 0xf;
1925 rd0 = (insn >> 16) & 0xf;
1926 rd1 = (insn >> 0) & 0xf;
1927 gen_op_iwmmxt_movq_M0_wRn(rd0);
1928 if (insn & (1 << 21))
1929 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1930 else
1931 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1932 if (!(insn & (1 << 20))) {
1933 iwmmxt_load_reg(cpu_V1, wrd);
1934 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1935 }
1936 gen_op_iwmmxt_movq_wRn_M0(wrd);
1937 gen_op_iwmmxt_set_mup();
1938 break;
1939 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1940 wrd = (insn >> 12) & 0xf;
1941 rd0 = (insn >> 16) & 0xf;
1942 rd1 = (insn >> 0) & 0xf;
1943 gen_op_iwmmxt_movq_M0_wRn(rd0);
1944 switch ((insn >> 22) & 3) {
1945 case 0:
1946 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1947 break;
1948 case 1:
1949 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1950 break;
1951 case 2:
1952 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1953 break;
1954 case 3:
1955 return 1;
1956 }
1957 gen_op_iwmmxt_movq_wRn_M0(wrd);
1958 gen_op_iwmmxt_set_mup();
1959 gen_op_iwmmxt_set_cup();
1960 break;
1961 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1962 wrd = (insn >> 12) & 0xf;
1963 rd0 = (insn >> 16) & 0xf;
1964 rd1 = (insn >> 0) & 0xf;
1965 gen_op_iwmmxt_movq_M0_wRn(rd0);
1966 if (insn & (1 << 22)) {
1967 if (insn & (1 << 20))
1968 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1969 else
1970 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1971 } else {
1972 if (insn & (1 << 20))
1973 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1974 else
1975 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1976 }
1977 gen_op_iwmmxt_movq_wRn_M0(wrd);
1978 gen_op_iwmmxt_set_mup();
1979 gen_op_iwmmxt_set_cup();
1980 break;
1981 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1982 wrd = (insn >> 12) & 0xf;
1983 rd0 = (insn >> 16) & 0xf;
1984 rd1 = (insn >> 0) & 0xf;
1985 gen_op_iwmmxt_movq_M0_wRn(rd0);
1986 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1987 tcg_gen_andi_i32(tmp, tmp, 7);
1988 iwmmxt_load_reg(cpu_V1, rd1);
1989 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1990 tcg_temp_free_i32(tmp);
1991 gen_op_iwmmxt_movq_wRn_M0(wrd);
1992 gen_op_iwmmxt_set_mup();
1993 break;
1994 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1995 if (((insn >> 6) & 3) == 3)
1996 return 1;
1997 rd = (insn >> 12) & 0xf;
1998 wrd = (insn >> 16) & 0xf;
1999 tmp = load_reg(s, rd);
2000 gen_op_iwmmxt_movq_M0_wRn(wrd);
2001 switch ((insn >> 6) & 3) {
2002 case 0:
2003 tmp2 = tcg_const_i32(0xff);
2004 tmp3 = tcg_const_i32((insn & 7) << 3);
2005 break;
2006 case 1:
2007 tmp2 = tcg_const_i32(0xffff);
2008 tmp3 = tcg_const_i32((insn & 3) << 4);
2009 break;
2010 case 2:
2011 tmp2 = tcg_const_i32(0xffffffff);
2012 tmp3 = tcg_const_i32((insn & 1) << 5);
2013 break;
2014 default:
2015 TCGV_UNUSED_I32(tmp2);
2016 TCGV_UNUSED_I32(tmp3);
2017 }
2018 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2019 tcg_temp_free_i32(tmp3);
2020 tcg_temp_free_i32(tmp2);
2021 tcg_temp_free_i32(tmp);
2022 gen_op_iwmmxt_movq_wRn_M0(wrd);
2023 gen_op_iwmmxt_set_mup();
2024 break;
2025 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2026 rd = (insn >> 12) & 0xf;
2027 wrd = (insn >> 16) & 0xf;
2028 if (rd == 15 || ((insn >> 22) & 3) == 3)
2029 return 1;
2030 gen_op_iwmmxt_movq_M0_wRn(wrd);
2031 tmp = tcg_temp_new_i32();
2032 switch ((insn >> 22) & 3) {
2033 case 0:
2034 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2035 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2036 if (insn & 8) {
2037 tcg_gen_ext8s_i32(tmp, tmp);
2038 } else {
2039 tcg_gen_andi_i32(tmp, tmp, 0xff);
2040 }
2041 break;
2042 case 1:
2043 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2044 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2045 if (insn & 8) {
2046 tcg_gen_ext16s_i32(tmp, tmp);
2047 } else {
2048 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2049 }
2050 break;
2051 case 2:
2052 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2053 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2054 break;
2055 }
2056 store_reg(s, rd, tmp);
2057 break;
2058 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2059 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2060 return 1;
2061 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2062 switch ((insn >> 22) & 3) {
2063 case 0:
2064 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2065 break;
2066 case 1:
2067 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2068 break;
2069 case 2:
2070 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2071 break;
2072 }
2073 tcg_gen_shli_i32(tmp, tmp, 28);
2074 gen_set_nzcv(tmp);
2075 tcg_temp_free_i32(tmp);
2076 break;
2077 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2078 if (((insn >> 6) & 3) == 3)
2079 return 1;
2080 rd = (insn >> 12) & 0xf;
2081 wrd = (insn >> 16) & 0xf;
2082 tmp = load_reg(s, rd);
2083 switch ((insn >> 6) & 3) {
2084 case 0:
2085 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2086 break;
2087 case 1:
2088 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2089 break;
2090 case 2:
2091 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2092 break;
2093 }
2094 tcg_temp_free_i32(tmp);
2095 gen_op_iwmmxt_movq_wRn_M0(wrd);
2096 gen_op_iwmmxt_set_mup();
2097 break;
2098 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2099 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2100 return 1;
2101 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2102 tmp2 = tcg_temp_new_i32();
2103 tcg_gen_mov_i32(tmp2, tmp);
2104 switch ((insn >> 22) & 3) {
2105 case 0:
2106 for (i = 0; i < 7; i ++) {
2107 tcg_gen_shli_i32(tmp2, tmp2, 4);
2108 tcg_gen_and_i32(tmp, tmp, tmp2);
2109 }
2110 break;
2111 case 1:
2112 for (i = 0; i < 3; i ++) {
2113 tcg_gen_shli_i32(tmp2, tmp2, 8);
2114 tcg_gen_and_i32(tmp, tmp, tmp2);
2115 }
2116 break;
2117 case 2:
2118 tcg_gen_shli_i32(tmp2, tmp2, 16);
2119 tcg_gen_and_i32(tmp, tmp, tmp2);
2120 break;
2121 }
2122 gen_set_nzcv(tmp);
2123 tcg_temp_free_i32(tmp2);
2124 tcg_temp_free_i32(tmp);
2125 break;
2126 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2127 wrd = (insn >> 12) & 0xf;
2128 rd0 = (insn >> 16) & 0xf;
2129 gen_op_iwmmxt_movq_M0_wRn(rd0);
2130 switch ((insn >> 22) & 3) {
2131 case 0:
2132 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2133 break;
2134 case 1:
2135 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2136 break;
2137 case 2:
2138 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2139 break;
2140 case 3:
2141 return 1;
2142 }
2143 gen_op_iwmmxt_movq_wRn_M0(wrd);
2144 gen_op_iwmmxt_set_mup();
2145 break;
2146 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2147 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2148 return 1;
2149 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2150 tmp2 = tcg_temp_new_i32();
2151 tcg_gen_mov_i32(tmp2, tmp);
2152 switch ((insn >> 22) & 3) {
2153 case 0:
2154 for (i = 0; i < 7; i ++) {
2155 tcg_gen_shli_i32(tmp2, tmp2, 4);
2156 tcg_gen_or_i32(tmp, tmp, tmp2);
2157 }
2158 break;
2159 case 1:
2160 for (i = 0; i < 3; i ++) {
2161 tcg_gen_shli_i32(tmp2, tmp2, 8);
2162 tcg_gen_or_i32(tmp, tmp, tmp2);
2163 }
2164 break;
2165 case 2:
2166 tcg_gen_shli_i32(tmp2, tmp2, 16);
2167 tcg_gen_or_i32(tmp, tmp, tmp2);
2168 break;
2169 }
2170 gen_set_nzcv(tmp);
2171 tcg_temp_free_i32(tmp2);
2172 tcg_temp_free_i32(tmp);
2173 break;
2174 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2175 rd = (insn >> 12) & 0xf;
2176 rd0 = (insn >> 16) & 0xf;
2177 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2178 return 1;
2179 gen_op_iwmmxt_movq_M0_wRn(rd0);
2180 tmp = tcg_temp_new_i32();
2181 switch ((insn >> 22) & 3) {
2182 case 0:
2183 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2184 break;
2185 case 1:
2186 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2187 break;
2188 case 2:
2189 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2190 break;
2191 }
2192 store_reg(s, rd, tmp);
2193 break;
2194 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2195 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2196 wrd = (insn >> 12) & 0xf;
2197 rd0 = (insn >> 16) & 0xf;
2198 rd1 = (insn >> 0) & 0xf;
2199 gen_op_iwmmxt_movq_M0_wRn(rd0);
2200 switch ((insn >> 22) & 3) {
2201 case 0:
2202 if (insn & (1 << 21))
2203 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2204 else
2205 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2206 break;
2207 case 1:
2208 if (insn & (1 << 21))
2209 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2210 else
2211 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2212 break;
2213 case 2:
2214 if (insn & (1 << 21))
2215 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2216 else
2217 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2218 break;
2219 case 3:
2220 return 1;
2221 }
2222 gen_op_iwmmxt_movq_wRn_M0(wrd);
2223 gen_op_iwmmxt_set_mup();
2224 gen_op_iwmmxt_set_cup();
2225 break;
2226 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2227 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2228 wrd = (insn >> 12) & 0xf;
2229 rd0 = (insn >> 16) & 0xf;
2230 gen_op_iwmmxt_movq_M0_wRn(rd0);
2231 switch ((insn >> 22) & 3) {
2232 case 0:
2233 if (insn & (1 << 21))
2234 gen_op_iwmmxt_unpacklsb_M0();
2235 else
2236 gen_op_iwmmxt_unpacklub_M0();
2237 break;
2238 case 1:
2239 if (insn & (1 << 21))
2240 gen_op_iwmmxt_unpacklsw_M0();
2241 else
2242 gen_op_iwmmxt_unpackluw_M0();
2243 break;
2244 case 2:
2245 if (insn & (1 << 21))
2246 gen_op_iwmmxt_unpacklsl_M0();
2247 else
2248 gen_op_iwmmxt_unpacklul_M0();
2249 break;
2250 case 3:
2251 return 1;
2252 }
2253 gen_op_iwmmxt_movq_wRn_M0(wrd);
2254 gen_op_iwmmxt_set_mup();
2255 gen_op_iwmmxt_set_cup();
2256 break;
2257 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2258 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2259 wrd = (insn >> 12) & 0xf;
2260 rd0 = (insn >> 16) & 0xf;
2261 gen_op_iwmmxt_movq_M0_wRn(rd0);
2262 switch ((insn >> 22) & 3) {
2263 case 0:
2264 if (insn & (1 << 21))
2265 gen_op_iwmmxt_unpackhsb_M0();
2266 else
2267 gen_op_iwmmxt_unpackhub_M0();
2268 break;
2269 case 1:
2270 if (insn & (1 << 21))
2271 gen_op_iwmmxt_unpackhsw_M0();
2272 else
2273 gen_op_iwmmxt_unpackhuw_M0();
2274 break;
2275 case 2:
2276 if (insn & (1 << 21))
2277 gen_op_iwmmxt_unpackhsl_M0();
2278 else
2279 gen_op_iwmmxt_unpackhul_M0();
2280 break;
2281 case 3:
2282 return 1;
2283 }
2284 gen_op_iwmmxt_movq_wRn_M0(wrd);
2285 gen_op_iwmmxt_set_mup();
2286 gen_op_iwmmxt_set_cup();
2287 break;
2288 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2289 case 0x214: case 0x614: case 0xa14: case 0xe14:
2290 if (((insn >> 22) & 3) == 0)
2291 return 1;
2292 wrd = (insn >> 12) & 0xf;
2293 rd0 = (insn >> 16) & 0xf;
2294 gen_op_iwmmxt_movq_M0_wRn(rd0);
2295 tmp = tcg_temp_new_i32();
2296 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2297 tcg_temp_free_i32(tmp);
2298 return 1;
2299 }
2300 switch ((insn >> 22) & 3) {
2301 case 1:
2302 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2303 break;
2304 case 2:
2305 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2306 break;
2307 case 3:
2308 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2309 break;
2310 }
2311 tcg_temp_free_i32(tmp);
2312 gen_op_iwmmxt_movq_wRn_M0(wrd);
2313 gen_op_iwmmxt_set_mup();
2314 gen_op_iwmmxt_set_cup();
2315 break;
2316 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2317 case 0x014: case 0x414: case 0x814: case 0xc14:
2318 if (((insn >> 22) & 3) == 0)
2319 return 1;
2320 wrd = (insn >> 12) & 0xf;
2321 rd0 = (insn >> 16) & 0xf;
2322 gen_op_iwmmxt_movq_M0_wRn(rd0);
2323 tmp = tcg_temp_new_i32();
2324 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2325 tcg_temp_free_i32(tmp);
2326 return 1;
2327 }
2328 switch ((insn >> 22) & 3) {
2329 case 1:
2330 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2331 break;
2332 case 2:
2333 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2334 break;
2335 case 3:
2336 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2337 break;
2338 }
2339 tcg_temp_free_i32(tmp);
2340 gen_op_iwmmxt_movq_wRn_M0(wrd);
2341 gen_op_iwmmxt_set_mup();
2342 gen_op_iwmmxt_set_cup();
2343 break;
2344 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2345 case 0x114: case 0x514: case 0x914: case 0xd14:
2346 if (((insn >> 22) & 3) == 0)
2347 return 1;
2348 wrd = (insn >> 12) & 0xf;
2349 rd0 = (insn >> 16) & 0xf;
2350 gen_op_iwmmxt_movq_M0_wRn(rd0);
2351 tmp = tcg_temp_new_i32();
2352 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2353 tcg_temp_free_i32(tmp);
2354 return 1;
2355 }
2356 switch ((insn >> 22) & 3) {
2357 case 1:
2358 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2359 break;
2360 case 2:
2361 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2362 break;
2363 case 3:
2364 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2365 break;
2366 }
2367 tcg_temp_free_i32(tmp);
2368 gen_op_iwmmxt_movq_wRn_M0(wrd);
2369 gen_op_iwmmxt_set_mup();
2370 gen_op_iwmmxt_set_cup();
2371 break;
2372 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2373 case 0x314: case 0x714: case 0xb14: case 0xf14:
2374 if (((insn >> 22) & 3) == 0)
2375 return 1;
2376 wrd = (insn >> 12) & 0xf;
2377 rd0 = (insn >> 16) & 0xf;
2378 gen_op_iwmmxt_movq_M0_wRn(rd0);
2379 tmp = tcg_temp_new_i32();
2380 switch ((insn >> 22) & 3) {
2381 case 1:
2382 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2383 tcg_temp_free_i32(tmp);
2384 return 1;
2385 }
2386 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2387 break;
2388 case 2:
2389 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2390 tcg_temp_free_i32(tmp);
2391 return 1;
2392 }
2393 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2394 break;
2395 case 3:
2396 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2397 tcg_temp_free_i32(tmp);
2398 return 1;
2399 }
2400 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2401 break;
2402 }
2403 tcg_temp_free_i32(tmp);
2404 gen_op_iwmmxt_movq_wRn_M0(wrd);
2405 gen_op_iwmmxt_set_mup();
2406 gen_op_iwmmxt_set_cup();
2407 break;
2408 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2409 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2410 wrd = (insn >> 12) & 0xf;
2411 rd0 = (insn >> 16) & 0xf;
2412 rd1 = (insn >> 0) & 0xf;
2413 gen_op_iwmmxt_movq_M0_wRn(rd0);
2414 switch ((insn >> 22) & 3) {
2415 case 0:
2416 if (insn & (1 << 21))
2417 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2418 else
2419 gen_op_iwmmxt_minub_M0_wRn(rd1);
2420 break;
2421 case 1:
2422 if (insn & (1 << 21))
2423 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2424 else
2425 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2426 break;
2427 case 2:
2428 if (insn & (1 << 21))
2429 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2430 else
2431 gen_op_iwmmxt_minul_M0_wRn(rd1);
2432 break;
2433 case 3:
2434 return 1;
2435 }
2436 gen_op_iwmmxt_movq_wRn_M0(wrd);
2437 gen_op_iwmmxt_set_mup();
2438 break;
2439 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2440 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2441 wrd = (insn >> 12) & 0xf;
2442 rd0 = (insn >> 16) & 0xf;
2443 rd1 = (insn >> 0) & 0xf;
2444 gen_op_iwmmxt_movq_M0_wRn(rd0);
2445 switch ((insn >> 22) & 3) {
2446 case 0:
2447 if (insn & (1 << 21))
2448 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2449 else
2450 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2451 break;
2452 case 1:
2453 if (insn & (1 << 21))
2454 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2455 else
2456 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2457 break;
2458 case 2:
2459 if (insn & (1 << 21))
2460 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2461 else
2462 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2463 break;
2464 case 3:
2465 return 1;
2466 }
2467 gen_op_iwmmxt_movq_wRn_M0(wrd);
2468 gen_op_iwmmxt_set_mup();
2469 break;
2470 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2471 case 0x402: case 0x502: case 0x602: case 0x702:
2472 wrd = (insn >> 12) & 0xf;
2473 rd0 = (insn >> 16) & 0xf;
2474 rd1 = (insn >> 0) & 0xf;
2475 gen_op_iwmmxt_movq_M0_wRn(rd0);
2476 tmp = tcg_const_i32((insn >> 20) & 3);
2477 iwmmxt_load_reg(cpu_V1, rd1);
2478 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2479 tcg_temp_free_i32(tmp);
2480 gen_op_iwmmxt_movq_wRn_M0(wrd);
2481 gen_op_iwmmxt_set_mup();
2482 break;
2483 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2484 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2485 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2486 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2487 wrd = (insn >> 12) & 0xf;
2488 rd0 = (insn >> 16) & 0xf;
2489 rd1 = (insn >> 0) & 0xf;
2490 gen_op_iwmmxt_movq_M0_wRn(rd0);
2491 switch ((insn >> 20) & 0xf) {
2492 case 0x0:
2493 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2494 break;
2495 case 0x1:
2496 gen_op_iwmmxt_subub_M0_wRn(rd1);
2497 break;
2498 case 0x3:
2499 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2500 break;
2501 case 0x4:
2502 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2503 break;
2504 case 0x5:
2505 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2506 break;
2507 case 0x7:
2508 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2509 break;
2510 case 0x8:
2511 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2512 break;
2513 case 0x9:
2514 gen_op_iwmmxt_subul_M0_wRn(rd1);
2515 break;
2516 case 0xb:
2517 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2518 break;
2519 default:
2520 return 1;
2521 }
2522 gen_op_iwmmxt_movq_wRn_M0(wrd);
2523 gen_op_iwmmxt_set_mup();
2524 gen_op_iwmmxt_set_cup();
2525 break;
2526 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2527 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2528 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2529 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2530 wrd = (insn >> 12) & 0xf;
2531 rd0 = (insn >> 16) & 0xf;
2532 gen_op_iwmmxt_movq_M0_wRn(rd0);
2533 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2534 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2535 tcg_temp_free_i32(tmp);
2536 gen_op_iwmmxt_movq_wRn_M0(wrd);
2537 gen_op_iwmmxt_set_mup();
2538 gen_op_iwmmxt_set_cup();
2539 break;
2540 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2541 case 0x418: case 0x518: case 0x618: case 0x718:
2542 case 0x818: case 0x918: case 0xa18: case 0xb18:
2543 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2544 wrd = (insn >> 12) & 0xf;
2545 rd0 = (insn >> 16) & 0xf;
2546 rd1 = (insn >> 0) & 0xf;
2547 gen_op_iwmmxt_movq_M0_wRn(rd0);
2548 switch ((insn >> 20) & 0xf) {
2549 case 0x0:
2550 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2551 break;
2552 case 0x1:
2553 gen_op_iwmmxt_addub_M0_wRn(rd1);
2554 break;
2555 case 0x3:
2556 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2557 break;
2558 case 0x4:
2559 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2560 break;
2561 case 0x5:
2562 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2563 break;
2564 case 0x7:
2565 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2566 break;
2567 case 0x8:
2568 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2569 break;
2570 case 0x9:
2571 gen_op_iwmmxt_addul_M0_wRn(rd1);
2572 break;
2573 case 0xb:
2574 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2575 break;
2576 default:
2577 return 1;
2578 }
2579 gen_op_iwmmxt_movq_wRn_M0(wrd);
2580 gen_op_iwmmxt_set_mup();
2581 gen_op_iwmmxt_set_cup();
2582 break;
2583 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2584 case 0x408: case 0x508: case 0x608: case 0x708:
2585 case 0x808: case 0x908: case 0xa08: case 0xb08:
2586 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2587 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2588 return 1;
2589 wrd = (insn >> 12) & 0xf;
2590 rd0 = (insn >> 16) & 0xf;
2591 rd1 = (insn >> 0) & 0xf;
2592 gen_op_iwmmxt_movq_M0_wRn(rd0);
2593 switch ((insn >> 22) & 3) {
2594 case 1:
2595 if (insn & (1 << 21))
2596 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2597 else
2598 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2599 break;
2600 case 2:
2601 if (insn & (1 << 21))
2602 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2603 else
2604 gen_op_iwmmxt_packul_M0_wRn(rd1);
2605 break;
2606 case 3:
2607 if (insn & (1 << 21))
2608 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2609 else
2610 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2611 break;
2612 }
2613 gen_op_iwmmxt_movq_wRn_M0(wrd);
2614 gen_op_iwmmxt_set_mup();
2615 gen_op_iwmmxt_set_cup();
2616 break;
2617 case 0x201: case 0x203: case 0x205: case 0x207:
2618 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2619 case 0x211: case 0x213: case 0x215: case 0x217:
2620 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2621 wrd = (insn >> 5) & 0xf;
2622 rd0 = (insn >> 12) & 0xf;
2623 rd1 = (insn >> 0) & 0xf;
2624 if (rd0 == 0xf || rd1 == 0xf)
2625 return 1;
2626 gen_op_iwmmxt_movq_M0_wRn(wrd);
2627 tmp = load_reg(s, rd0);
2628 tmp2 = load_reg(s, rd1);
2629 switch ((insn >> 16) & 0xf) {
2630 case 0x0: /* TMIA */
2631 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2632 break;
2633 case 0x8: /* TMIAPH */
2634 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2635 break;
2636 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2637 if (insn & (1 << 16))
2638 tcg_gen_shri_i32(tmp, tmp, 16);
2639 if (insn & (1 << 17))
2640 tcg_gen_shri_i32(tmp2, tmp2, 16);
2641 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2642 break;
2643 default:
2644 tcg_temp_free_i32(tmp2);
2645 tcg_temp_free_i32(tmp);
2646 return 1;
2647 }
2648 tcg_temp_free_i32(tmp2);
2649 tcg_temp_free_i32(tmp);
2650 gen_op_iwmmxt_movq_wRn_M0(wrd);
2651 gen_op_iwmmxt_set_mup();
2652 break;
2653 default:
2654 return 1;
2655 }
2656
2657 return 0;
2658 }
2659
2660 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2661 (ie. an undefined instruction). */
2662 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2663 {
2664 int acc, rd0, rd1, rdhi, rdlo;
2665 TCGv_i32 tmp, tmp2;
2666
2667 if ((insn & 0x0ff00f10) == 0x0e200010) {
2668 /* Multiply with Internal Accumulate Format */
2669 rd0 = (insn >> 12) & 0xf;
2670 rd1 = insn & 0xf;
2671 acc = (insn >> 5) & 7;
2672
2673 if (acc != 0)
2674 return 1;
2675
2676 tmp = load_reg(s, rd0);
2677 tmp2 = load_reg(s, rd1);
2678 switch ((insn >> 16) & 0xf) {
2679 case 0x0: /* MIA */
2680 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2681 break;
2682 case 0x8: /* MIAPH */
2683 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2684 break;
2685 case 0xc: /* MIABB */
2686 case 0xd: /* MIABT */
2687 case 0xe: /* MIATB */
2688 case 0xf: /* MIATT */
2689 if (insn & (1 << 16))
2690 tcg_gen_shri_i32(tmp, tmp, 16);
2691 if (insn & (1 << 17))
2692 tcg_gen_shri_i32(tmp2, tmp2, 16);
2693 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2694 break;
2695 default:
2696 return 1;
2697 }
2698 tcg_temp_free_i32(tmp2);
2699 tcg_temp_free_i32(tmp);
2700
2701 gen_op_iwmmxt_movq_wRn_M0(acc);
2702 return 0;
2703 }
2704
2705 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2706 /* Internal Accumulator Access Format */
2707 rdhi = (insn >> 16) & 0xf;
2708 rdlo = (insn >> 12) & 0xf;
2709 acc = insn & 7;
2710
2711 if (acc != 0)
2712 return 1;
2713
2714 if (insn & ARM_CP_RW_BIT) { /* MRA */
2715 iwmmxt_load_reg(cpu_V0, acc);
2716 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2717 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2718 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2719 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2720 } else { /* MAR */
2721 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2722 iwmmxt_store_reg(cpu_V0, acc);
2723 }
2724 return 0;
2725 }
2726
2727 return 1;
2728 }
2729
2730 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2731 #define VFP_SREG(insn, bigbit, smallbit) \
2732 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2733 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2734 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2735 reg = (((insn) >> (bigbit)) & 0x0f) \
2736 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2737 } else { \
2738 if (insn & (1 << (smallbit))) \
2739 return 1; \
2740 reg = ((insn) >> (bigbit)) & 0x0f; \
2741 }} while (0)
2742
2743 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2744 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2745 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2746 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2747 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2748 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2749
2750 /* Move between integer and VFP cores. */
2751 static TCGv_i32 gen_vfp_mrs(void)
2752 {
2753 TCGv_i32 tmp = tcg_temp_new_i32();
2754 tcg_gen_mov_i32(tmp, cpu_F0s);
2755 return tmp;
2756 }
2757
2758 static void gen_vfp_msr(TCGv_i32 tmp)
2759 {
2760 tcg_gen_mov_i32(cpu_F0s, tmp);
2761 tcg_temp_free_i32(tmp);
2762 }
2763
2764 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2765 {
2766 TCGv_i32 tmp = tcg_temp_new_i32();
2767 if (shift)
2768 tcg_gen_shri_i32(var, var, shift);
2769 tcg_gen_ext8u_i32(var, var);
2770 tcg_gen_shli_i32(tmp, var, 8);
2771 tcg_gen_or_i32(var, var, tmp);
2772 tcg_gen_shli_i32(tmp, var, 16);
2773 tcg_gen_or_i32(var, var, tmp);
2774 tcg_temp_free_i32(tmp);
2775 }
2776
2777 static void gen_neon_dup_low16(TCGv_i32 var)
2778 {
2779 TCGv_i32 tmp = tcg_temp_new_i32();
2780 tcg_gen_ext16u_i32(var, var);
2781 tcg_gen_shli_i32(tmp, var, 16);
2782 tcg_gen_or_i32(var, var, tmp);
2783 tcg_temp_free_i32(tmp);
2784 }
2785
2786 static void gen_neon_dup_high16(TCGv_i32 var)
2787 {
2788 TCGv_i32 tmp = tcg_temp_new_i32();
2789 tcg_gen_andi_i32(var, var, 0xffff0000);
2790 tcg_gen_shri_i32(tmp, var, 16);
2791 tcg_gen_or_i32(var, var, tmp);
2792 tcg_temp_free_i32(tmp);
2793 }
2794
2795 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2796 {
2797 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2798 TCGv_i32 tmp = tcg_temp_new_i32();
2799 switch (size) {
2800 case 0:
2801 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2802 gen_neon_dup_u8(tmp, 0);
2803 break;
2804 case 1:
2805 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2806 gen_neon_dup_low16(tmp);
2807 break;
2808 case 2:
2809 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2810 break;
2811 default: /* Avoid compiler warnings. */
2812 abort();
2813 }
2814 return tmp;
2815 }
2816
2817 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2818 uint32_t dp)
2819 {
2820 uint32_t cc = extract32(insn, 20, 2);
2821
2822 if (dp) {
2823 TCGv_i64 frn, frm, dest;
2824 TCGv_i64 tmp, zero, zf, nf, vf;
2825
2826 zero = tcg_const_i64(0);
2827
2828 frn = tcg_temp_new_i64();
2829 frm = tcg_temp_new_i64();
2830 dest = tcg_temp_new_i64();
2831
2832 zf = tcg_temp_new_i64();
2833 nf = tcg_temp_new_i64();
2834 vf = tcg_temp_new_i64();
2835
2836 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2837 tcg_gen_ext_i32_i64(nf, cpu_NF);
2838 tcg_gen_ext_i32_i64(vf, cpu_VF);
2839
2840 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2841 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2842 switch (cc) {
2843 case 0: /* eq: Z */
2844 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2845 frn, frm);
2846 break;
2847 case 1: /* vs: V */
2848 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2849 frn, frm);
2850 break;
2851 case 2: /* ge: N == V -> N ^ V == 0 */
2852 tmp = tcg_temp_new_i64();
2853 tcg_gen_xor_i64(tmp, vf, nf);
2854 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2855 frn, frm);
2856 tcg_temp_free_i64(tmp);
2857 break;
2858 case 3: /* gt: !Z && N == V */
2859 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2860 frn, frm);
2861 tmp = tcg_temp_new_i64();
2862 tcg_gen_xor_i64(tmp, vf, nf);
2863 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2864 dest, frm);
2865 tcg_temp_free_i64(tmp);
2866 break;
2867 }
2868 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2869 tcg_temp_free_i64(frn);
2870 tcg_temp_free_i64(frm);
2871 tcg_temp_free_i64(dest);
2872
2873 tcg_temp_free_i64(zf);
2874 tcg_temp_free_i64(nf);
2875 tcg_temp_free_i64(vf);
2876
2877 tcg_temp_free_i64(zero);
2878 } else {
2879 TCGv_i32 frn, frm, dest;
2880 TCGv_i32 tmp, zero;
2881
2882 zero = tcg_const_i32(0);
2883
2884 frn = tcg_temp_new_i32();
2885 frm = tcg_temp_new_i32();
2886 dest = tcg_temp_new_i32();
2887 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2888 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2889 switch (cc) {
2890 case 0: /* eq: Z */
2891 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2892 frn, frm);
2893 break;
2894 case 1: /* vs: V */
2895 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2896 frn, frm);
2897 break;
2898 case 2: /* ge: N == V -> N ^ V == 0 */
2899 tmp = tcg_temp_new_i32();
2900 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2901 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2902 frn, frm);
2903 tcg_temp_free_i32(tmp);
2904 break;
2905 case 3: /* gt: !Z && N == V */
2906 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2907 frn, frm);
2908 tmp = tcg_temp_new_i32();
2909 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2910 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2911 dest, frm);
2912 tcg_temp_free_i32(tmp);
2913 break;
2914 }
2915 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2916 tcg_temp_free_i32(frn);
2917 tcg_temp_free_i32(frm);
2918 tcg_temp_free_i32(dest);
2919
2920 tcg_temp_free_i32(zero);
2921 }
2922
2923 return 0;
2924 }
2925
2926 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2927 uint32_t rm, uint32_t dp)
2928 {
2929 uint32_t vmin = extract32(insn, 6, 1);
2930 TCGv_ptr fpst = get_fpstatus_ptr(0);
2931
2932 if (dp) {
2933 TCGv_i64 frn, frm, dest;
2934
2935 frn = tcg_temp_new_i64();
2936 frm = tcg_temp_new_i64();
2937 dest = tcg_temp_new_i64();
2938
2939 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2940 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2941 if (vmin) {
2942 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
2943 } else {
2944 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
2945 }
2946 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2947 tcg_temp_free_i64(frn);
2948 tcg_temp_free_i64(frm);
2949 tcg_temp_free_i64(dest);
2950 } else {
2951 TCGv_i32 frn, frm, dest;
2952
2953 frn = tcg_temp_new_i32();
2954 frm = tcg_temp_new_i32();
2955 dest = tcg_temp_new_i32();
2956
2957 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2958 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2959 if (vmin) {
2960 gen_helper_vfp_minnums(dest, frn, frm, fpst);
2961 } else {
2962 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
2963 }
2964 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2965 tcg_temp_free_i32(frn);
2966 tcg_temp_free_i32(frm);
2967 tcg_temp_free_i32(dest);
2968 }
2969
2970 tcg_temp_free_ptr(fpst);
2971 return 0;
2972 }
2973
2974 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2975 int rounding)
2976 {
2977 TCGv_ptr fpst = get_fpstatus_ptr(0);
2978 TCGv_i32 tcg_rmode;
2979
2980 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2981 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2982
2983 if (dp) {
2984 TCGv_i64 tcg_op;
2985 TCGv_i64 tcg_res;
2986 tcg_op = tcg_temp_new_i64();
2987 tcg_res = tcg_temp_new_i64();
2988 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2989 gen_helper_rintd(tcg_res, tcg_op, fpst);
2990 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2991 tcg_temp_free_i64(tcg_op);
2992 tcg_temp_free_i64(tcg_res);
2993 } else {
2994 TCGv_i32 tcg_op;
2995 TCGv_i32 tcg_res;
2996 tcg_op = tcg_temp_new_i32();
2997 tcg_res = tcg_temp_new_i32();
2998 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2999 gen_helper_rints(tcg_res, tcg_op, fpst);
3000 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3001 tcg_temp_free_i32(tcg_op);
3002 tcg_temp_free_i32(tcg_res);
3003 }
3004
3005 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3006 tcg_temp_free_i32(tcg_rmode);
3007
3008 tcg_temp_free_ptr(fpst);
3009 return 0;
3010 }
3011
3012 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3013 int rounding)
3014 {
3015 bool is_signed = extract32(insn, 7, 1);
3016 TCGv_ptr fpst = get_fpstatus_ptr(0);
3017 TCGv_i32 tcg_rmode, tcg_shift;
3018
3019 tcg_shift = tcg_const_i32(0);
3020
3021 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3022 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3023
3024 if (dp) {
3025 TCGv_i64 tcg_double, tcg_res;
3026 TCGv_i32 tcg_tmp;
3027 /* Rd is encoded as a single precision register even when the source
3028 * is double precision.
3029 */
3030 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3031 tcg_double = tcg_temp_new_i64();
3032 tcg_res = tcg_temp_new_i64();
3033 tcg_tmp = tcg_temp_new_i32();
3034 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3035 if (is_signed) {
3036 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3037 } else {
3038 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3039 }
3040 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
3041 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3042 tcg_temp_free_i32(tcg_tmp);
3043 tcg_temp_free_i64(tcg_res);
3044 tcg_temp_free_i64(tcg_double);
3045 } else {
3046 TCGv_i32 tcg_single, tcg_res;
3047 tcg_single = tcg_temp_new_i32();
3048 tcg_res = tcg_temp_new_i32();
3049 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3050 if (is_signed) {
3051 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3052 } else {
3053 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3054 }
3055 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3056 tcg_temp_free_i32(tcg_res);
3057 tcg_temp_free_i32(tcg_single);
3058 }
3059
3060 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3061 tcg_temp_free_i32(tcg_rmode);
3062
3063 tcg_temp_free_i32(tcg_shift);
3064
3065 tcg_temp_free_ptr(fpst);
3066
3067 return 0;
3068 }
3069
3070 /* Table for converting the most common AArch32 encoding of
3071 * rounding mode to arm_fprounding order (which matches the
3072 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3073 */
3074 static const uint8_t fp_decode_rm[] = {
3075 FPROUNDING_TIEAWAY,
3076 FPROUNDING_TIEEVEN,
3077 FPROUNDING_POSINF,
3078 FPROUNDING_NEGINF,
3079 };
3080
3081 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
3082 {
3083 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3084
3085 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3086 return 1;
3087 }
3088
3089 if (dp) {
3090 VFP_DREG_D(rd, insn);
3091 VFP_DREG_N(rn, insn);
3092 VFP_DREG_M(rm, insn);
3093 } else {
3094 rd = VFP_SREG_D(insn);
3095 rn = VFP_SREG_N(insn);
3096 rm = VFP_SREG_M(insn);
3097 }
3098
3099 if ((insn & 0x0f800e50) == 0x0e000a00) {
3100 return handle_vsel(insn, rd, rn, rm, dp);
3101 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3102 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3103 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3104 /* VRINTA, VRINTN, VRINTP, VRINTM */
3105 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3106 return handle_vrint(insn, rd, rm, dp, rounding);
3107 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3108 /* VCVTA, VCVTN, VCVTP, VCVTM */
3109 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3110 return handle_vcvt(insn, rd, rm, dp, rounding);
3111 }
3112 return 1;
3113 }
3114
3115 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3116 (ie. an undefined instruction). */
3117 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3118 {
3119 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3120 int dp, veclen;
3121 TCGv_i32 addr;
3122 TCGv_i32 tmp;
3123 TCGv_i32 tmp2;
3124
3125 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3126 return 1;
3127 }
3128
3129 /* FIXME: this access check should not take precedence over UNDEF
3130 * for invalid encodings; we will generate incorrect syndrome information
3131 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3132 */
3133 if (s->fp_excp_el) {
3134 gen_exception_insn(s, 4, EXCP_UDEF,
3135 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
3136 return 0;
3137 }
3138
3139 if (!s->vfp_enabled) {
3140 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3141 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3142 return 1;
3143 rn = (insn >> 16) & 0xf;
3144 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3145 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3146 return 1;
3147 }
3148 }
3149
3150 if (extract32(insn, 28, 4) == 0xf) {
3151 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3152 * only used in v8 and above.
3153 */
3154 return disas_vfp_v8_insn(s, insn);
3155 }
3156
3157 dp = ((insn & 0xf00) == 0xb00);
3158 switch ((insn >> 24) & 0xf) {
3159 case 0xe:
3160 if (insn & (1 << 4)) {
3161 /* single register transfer */
3162 rd = (insn >> 12) & 0xf;
3163 if (dp) {
3164 int size;
3165 int pass;
3166
3167 VFP_DREG_N(rn, insn);
3168 if (insn & 0xf)
3169 return 1;