qemu-img: Omit error_report() after img_open()
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "internals.h"
29 #include "disas/disas.h"
30 #include "tcg-op.h"
31 #include "qemu/log.h"
32 #include "qemu/bitops.h"
33 #include "arm_ldst.h"
34
35 #include "exec/helper-proto.h"
36 #include "exec/helper-gen.h"
37
38 #include "trace-tcg.h"
39
40
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J 0
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
51
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
53
54 #include "translate.h"
55 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
56
57 #if defined(CONFIG_USER_ONLY)
58 #define IS_USER(s) 1
59 #else
60 #define IS_USER(s) (s->user)
61 #endif
62
63 TCGv_ptr cpu_env;
64 /* We reuse the same 64-bit temporaries for efficiency. */
65 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
66 static TCGv_i32 cpu_R[16];
67 static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
68 static TCGv_i64 cpu_exclusive_addr;
69 static TCGv_i64 cpu_exclusive_val;
70 #ifdef CONFIG_USER_ONLY
71 static TCGv_i64 cpu_exclusive_test;
72 static TCGv_i32 cpu_exclusive_info;
73 #endif
74
75 /* FIXME: These should be removed. */
76 static TCGv_i32 cpu_F0s, cpu_F1s;
77 static TCGv_i64 cpu_F0d, cpu_F1d;
78
79 #include "exec/gen-icount.h"
80
81 static const char *regnames[] =
82 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
83 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
84
85 /* initialize TCG globals. */
86 void arm_translate_init(void)
87 {
88 int i;
89
90 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
91
92 for (i = 0; i < 16; i++) {
93 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
94 offsetof(CPUARMState, regs[i]),
95 regnames[i]);
96 }
97 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
98 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
99 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
100 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
101
102 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
103 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
104 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
105 offsetof(CPUARMState, exclusive_val), "exclusive_val");
106 #ifdef CONFIG_USER_ONLY
107 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
108 offsetof(CPUARMState, exclusive_test), "exclusive_test");
109 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUARMState, exclusive_info), "exclusive_info");
111 #endif
112
113 a64_translate_init();
114 }
115
116 static inline TCGv_i32 load_cpu_offset(int offset)
117 {
118 TCGv_i32 tmp = tcg_temp_new_i32();
119 tcg_gen_ld_i32(tmp, cpu_env, offset);
120 return tmp;
121 }
122
123 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
124
125 static inline void store_cpu_offset(TCGv_i32 var, int offset)
126 {
127 tcg_gen_st_i32(var, cpu_env, offset);
128 tcg_temp_free_i32(var);
129 }
130
131 #define store_cpu_field(var, name) \
132 store_cpu_offset(var, offsetof(CPUARMState, name))
133
134 /* Set a variable to the value of a CPU register. */
135 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
136 {
137 if (reg == 15) {
138 uint32_t addr;
139 /* normally, since we updated PC, we need only to add one insn */
140 if (s->thumb)
141 addr = (long)s->pc + 2;
142 else
143 addr = (long)s->pc + 4;
144 tcg_gen_movi_i32(var, addr);
145 } else {
146 tcg_gen_mov_i32(var, cpu_R[reg]);
147 }
148 }
149
150 /* Create a new temporary and set it to the value of a CPU register. */
151 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
152 {
153 TCGv_i32 tmp = tcg_temp_new_i32();
154 load_reg_var(s, tmp, reg);
155 return tmp;
156 }
157
158 /* Set a CPU register. The source must be a temporary and will be
159 marked as dead. */
160 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
161 {
162 if (reg == 15) {
163 tcg_gen_andi_i32(var, var, ~1);
164 s->is_jmp = DISAS_JUMP;
165 }
166 tcg_gen_mov_i32(cpu_R[reg], var);
167 tcg_temp_free_i32(var);
168 }
169
170 /* Value extensions. */
171 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
172 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
173 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
174 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
175
176 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
177 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
178
179
180 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
181 {
182 TCGv_i32 tmp_mask = tcg_const_i32(mask);
183 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
184 tcg_temp_free_i32(tmp_mask);
185 }
186 /* Set NZCV flags from the high 4 bits of var. */
187 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
188
189 static void gen_exception_internal(int excp)
190 {
191 TCGv_i32 tcg_excp = tcg_const_i32(excp);
192
193 assert(excp_is_internal(excp));
194 gen_helper_exception_internal(cpu_env, tcg_excp);
195 tcg_temp_free_i32(tcg_excp);
196 }
197
198 static void gen_exception(int excp, uint32_t syndrome)
199 {
200 TCGv_i32 tcg_excp = tcg_const_i32(excp);
201 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
202
203 gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
204 tcg_temp_free_i32(tcg_syn);
205 tcg_temp_free_i32(tcg_excp);
206 }
207
208 static void gen_ss_advance(DisasContext *s)
209 {
210 /* If the singlestep state is Active-not-pending, advance to
211 * Active-pending.
212 */
213 if (s->ss_active) {
214 s->pstate_ss = 0;
215 gen_helper_clear_pstate_ss(cpu_env);
216 }
217 }
218
219 static void gen_step_complete_exception(DisasContext *s)
220 {
221 /* We just completed step of an insn. Move from Active-not-pending
222 * to Active-pending, and then also take the swstep exception.
223 * This corresponds to making the (IMPDEF) choice to prioritize
224 * swstep exceptions over asynchronous exceptions taken to an exception
225 * level where debug is disabled. This choice has the advantage that
226 * we do not need to maintain internal state corresponding to the
227 * ISV/EX syndrome bits between completion of the step and generation
228 * of the exception, and our syndrome information is always correct.
229 */
230 gen_ss_advance(s);
231 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex));
232 s->is_jmp = DISAS_EXC;
233 }
234
235 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
236 {
237 TCGv_i32 tmp1 = tcg_temp_new_i32();
238 TCGv_i32 tmp2 = tcg_temp_new_i32();
239 tcg_gen_ext16s_i32(tmp1, a);
240 tcg_gen_ext16s_i32(tmp2, b);
241 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
242 tcg_temp_free_i32(tmp2);
243 tcg_gen_sari_i32(a, a, 16);
244 tcg_gen_sari_i32(b, b, 16);
245 tcg_gen_mul_i32(b, b, a);
246 tcg_gen_mov_i32(a, tmp1);
247 tcg_temp_free_i32(tmp1);
248 }
249
250 /* Byteswap each halfword. */
251 static void gen_rev16(TCGv_i32 var)
252 {
253 TCGv_i32 tmp = tcg_temp_new_i32();
254 tcg_gen_shri_i32(tmp, var, 8);
255 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
256 tcg_gen_shli_i32(var, var, 8);
257 tcg_gen_andi_i32(var, var, 0xff00ff00);
258 tcg_gen_or_i32(var, var, tmp);
259 tcg_temp_free_i32(tmp);
260 }
261
262 /* Byteswap low halfword and sign extend. */
263 static void gen_revsh(TCGv_i32 var)
264 {
265 tcg_gen_ext16u_i32(var, var);
266 tcg_gen_bswap16_i32(var, var);
267 tcg_gen_ext16s_i32(var, var);
268 }
269
270 /* Unsigned bitfield extract. */
271 static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
272 {
273 if (shift)
274 tcg_gen_shri_i32(var, var, shift);
275 tcg_gen_andi_i32(var, var, mask);
276 }
277
278 /* Signed bitfield extract. */
279 static void gen_sbfx(TCGv_i32 var, int shift, int width)
280 {
281 uint32_t signbit;
282
283 if (shift)
284 tcg_gen_sari_i32(var, var, shift);
285 if (shift + width < 32) {
286 signbit = 1u << (width - 1);
287 tcg_gen_andi_i32(var, var, (1u << width) - 1);
288 tcg_gen_xori_i32(var, var, signbit);
289 tcg_gen_subi_i32(var, var, signbit);
290 }
291 }
292
293 /* Return (b << 32) + a. Mark inputs as dead */
294 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
295 {
296 TCGv_i64 tmp64 = tcg_temp_new_i64();
297
298 tcg_gen_extu_i32_i64(tmp64, b);
299 tcg_temp_free_i32(b);
300 tcg_gen_shli_i64(tmp64, tmp64, 32);
301 tcg_gen_add_i64(a, tmp64, a);
302
303 tcg_temp_free_i64(tmp64);
304 return a;
305 }
306
307 /* Return (b << 32) - a. Mark inputs as dead. */
308 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
309 {
310 TCGv_i64 tmp64 = tcg_temp_new_i64();
311
312 tcg_gen_extu_i32_i64(tmp64, b);
313 tcg_temp_free_i32(b);
314 tcg_gen_shli_i64(tmp64, tmp64, 32);
315 tcg_gen_sub_i64(a, tmp64, a);
316
317 tcg_temp_free_i64(tmp64);
318 return a;
319 }
320
321 /* 32x32->64 multiply. Marks inputs as dead. */
322 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
323 {
324 TCGv_i32 lo = tcg_temp_new_i32();
325 TCGv_i32 hi = tcg_temp_new_i32();
326 TCGv_i64 ret;
327
328 tcg_gen_mulu2_i32(lo, hi, a, b);
329 tcg_temp_free_i32(a);
330 tcg_temp_free_i32(b);
331
332 ret = tcg_temp_new_i64();
333 tcg_gen_concat_i32_i64(ret, lo, hi);
334 tcg_temp_free_i32(lo);
335 tcg_temp_free_i32(hi);
336
337 return ret;
338 }
339
340 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
341 {
342 TCGv_i32 lo = tcg_temp_new_i32();
343 TCGv_i32 hi = tcg_temp_new_i32();
344 TCGv_i64 ret;
345
346 tcg_gen_muls2_i32(lo, hi, a, b);
347 tcg_temp_free_i32(a);
348 tcg_temp_free_i32(b);
349
350 ret = tcg_temp_new_i64();
351 tcg_gen_concat_i32_i64(ret, lo, hi);
352 tcg_temp_free_i32(lo);
353 tcg_temp_free_i32(hi);
354
355 return ret;
356 }
357
358 /* Swap low and high halfwords. */
359 static void gen_swap_half(TCGv_i32 var)
360 {
361 TCGv_i32 tmp = tcg_temp_new_i32();
362 tcg_gen_shri_i32(tmp, var, 16);
363 tcg_gen_shli_i32(var, var, 16);
364 tcg_gen_or_i32(var, var, tmp);
365 tcg_temp_free_i32(tmp);
366 }
367
368 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
369 tmp = (t0 ^ t1) & 0x8000;
370 t0 &= ~0x8000;
371 t1 &= ~0x8000;
372 t0 = (t0 + t1) ^ tmp;
373 */
374
375 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
376 {
377 TCGv_i32 tmp = tcg_temp_new_i32();
378 tcg_gen_xor_i32(tmp, t0, t1);
379 tcg_gen_andi_i32(tmp, tmp, 0x8000);
380 tcg_gen_andi_i32(t0, t0, ~0x8000);
381 tcg_gen_andi_i32(t1, t1, ~0x8000);
382 tcg_gen_add_i32(t0, t0, t1);
383 tcg_gen_xor_i32(t0, t0, tmp);
384 tcg_temp_free_i32(tmp);
385 tcg_temp_free_i32(t1);
386 }
387
388 /* Set CF to the top bit of var. */
389 static void gen_set_CF_bit31(TCGv_i32 var)
390 {
391 tcg_gen_shri_i32(cpu_CF, var, 31);
392 }
393
394 /* Set N and Z flags from var. */
395 static inline void gen_logic_CC(TCGv_i32 var)
396 {
397 tcg_gen_mov_i32(cpu_NF, var);
398 tcg_gen_mov_i32(cpu_ZF, var);
399 }
400
401 /* T0 += T1 + CF. */
402 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
403 {
404 tcg_gen_add_i32(t0, t0, t1);
405 tcg_gen_add_i32(t0, t0, cpu_CF);
406 }
407
408 /* dest = T0 + T1 + CF. */
409 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
410 {
411 tcg_gen_add_i32(dest, t0, t1);
412 tcg_gen_add_i32(dest, dest, cpu_CF);
413 }
414
415 /* dest = T0 - T1 + CF - 1. */
416 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
417 {
418 tcg_gen_sub_i32(dest, t0, t1);
419 tcg_gen_add_i32(dest, dest, cpu_CF);
420 tcg_gen_subi_i32(dest, dest, 1);
421 }
422
423 /* dest = T0 + T1. Compute C, N, V and Z flags */
424 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
425 {
426 TCGv_i32 tmp = tcg_temp_new_i32();
427 tcg_gen_movi_i32(tmp, 0);
428 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
429 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
430 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
431 tcg_gen_xor_i32(tmp, t0, t1);
432 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
433 tcg_temp_free_i32(tmp);
434 tcg_gen_mov_i32(dest, cpu_NF);
435 }
436
437 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
438 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
439 {
440 TCGv_i32 tmp = tcg_temp_new_i32();
441 if (TCG_TARGET_HAS_add2_i32) {
442 tcg_gen_movi_i32(tmp, 0);
443 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
444 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
445 } else {
446 TCGv_i64 q0 = tcg_temp_new_i64();
447 TCGv_i64 q1 = tcg_temp_new_i64();
448 tcg_gen_extu_i32_i64(q0, t0);
449 tcg_gen_extu_i32_i64(q1, t1);
450 tcg_gen_add_i64(q0, q0, q1);
451 tcg_gen_extu_i32_i64(q1, cpu_CF);
452 tcg_gen_add_i64(q0, q0, q1);
453 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
454 tcg_temp_free_i64(q0);
455 tcg_temp_free_i64(q1);
456 }
457 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
458 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
459 tcg_gen_xor_i32(tmp, t0, t1);
460 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
461 tcg_temp_free_i32(tmp);
462 tcg_gen_mov_i32(dest, cpu_NF);
463 }
464
465 /* dest = T0 - T1. Compute C, N, V and Z flags */
466 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
467 {
468 TCGv_i32 tmp;
469 tcg_gen_sub_i32(cpu_NF, t0, t1);
470 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
471 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
472 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
473 tmp = tcg_temp_new_i32();
474 tcg_gen_xor_i32(tmp, t0, t1);
475 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
476 tcg_temp_free_i32(tmp);
477 tcg_gen_mov_i32(dest, cpu_NF);
478 }
479
480 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
481 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
482 {
483 TCGv_i32 tmp = tcg_temp_new_i32();
484 tcg_gen_not_i32(tmp, t1);
485 gen_adc_CC(dest, t0, tmp);
486 tcg_temp_free_i32(tmp);
487 }
488
489 #define GEN_SHIFT(name) \
490 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
491 { \
492 TCGv_i32 tmp1, tmp2, tmp3; \
493 tmp1 = tcg_temp_new_i32(); \
494 tcg_gen_andi_i32(tmp1, t1, 0xff); \
495 tmp2 = tcg_const_i32(0); \
496 tmp3 = tcg_const_i32(0x1f); \
497 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
498 tcg_temp_free_i32(tmp3); \
499 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
500 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
501 tcg_temp_free_i32(tmp2); \
502 tcg_temp_free_i32(tmp1); \
503 }
504 GEN_SHIFT(shl)
505 GEN_SHIFT(shr)
506 #undef GEN_SHIFT
507
508 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
509 {
510 TCGv_i32 tmp1, tmp2;
511 tmp1 = tcg_temp_new_i32();
512 tcg_gen_andi_i32(tmp1, t1, 0xff);
513 tmp2 = tcg_const_i32(0x1f);
514 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
515 tcg_temp_free_i32(tmp2);
516 tcg_gen_sar_i32(dest, t0, tmp1);
517 tcg_temp_free_i32(tmp1);
518 }
519
520 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
521 {
522 TCGv_i32 c0 = tcg_const_i32(0);
523 TCGv_i32 tmp = tcg_temp_new_i32();
524 tcg_gen_neg_i32(tmp, src);
525 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
526 tcg_temp_free_i32(c0);
527 tcg_temp_free_i32(tmp);
528 }
529
530 static void shifter_out_im(TCGv_i32 var, int shift)
531 {
532 if (shift == 0) {
533 tcg_gen_andi_i32(cpu_CF, var, 1);
534 } else {
535 tcg_gen_shri_i32(cpu_CF, var, shift);
536 if (shift != 31) {
537 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
538 }
539 }
540 }
541
542 /* Shift by immediate. Includes special handling for shift == 0. */
543 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
544 int shift, int flags)
545 {
546 switch (shiftop) {
547 case 0: /* LSL */
548 if (shift != 0) {
549 if (flags)
550 shifter_out_im(var, 32 - shift);
551 tcg_gen_shli_i32(var, var, shift);
552 }
553 break;
554 case 1: /* LSR */
555 if (shift == 0) {
556 if (flags) {
557 tcg_gen_shri_i32(cpu_CF, var, 31);
558 }
559 tcg_gen_movi_i32(var, 0);
560 } else {
561 if (flags)
562 shifter_out_im(var, shift - 1);
563 tcg_gen_shri_i32(var, var, shift);
564 }
565 break;
566 case 2: /* ASR */
567 if (shift == 0)
568 shift = 32;
569 if (flags)
570 shifter_out_im(var, shift - 1);
571 if (shift == 32)
572 shift = 31;
573 tcg_gen_sari_i32(var, var, shift);
574 break;
575 case 3: /* ROR/RRX */
576 if (shift != 0) {
577 if (flags)
578 shifter_out_im(var, shift - 1);
579 tcg_gen_rotri_i32(var, var, shift); break;
580 } else {
581 TCGv_i32 tmp = tcg_temp_new_i32();
582 tcg_gen_shli_i32(tmp, cpu_CF, 31);
583 if (flags)
584 shifter_out_im(var, 0);
585 tcg_gen_shri_i32(var, var, 1);
586 tcg_gen_or_i32(var, var, tmp);
587 tcg_temp_free_i32(tmp);
588 }
589 }
590 };
591
592 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
593 TCGv_i32 shift, int flags)
594 {
595 if (flags) {
596 switch (shiftop) {
597 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
598 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
599 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
600 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
601 }
602 } else {
603 switch (shiftop) {
604 case 0:
605 gen_shl(var, var, shift);
606 break;
607 case 1:
608 gen_shr(var, var, shift);
609 break;
610 case 2:
611 gen_sar(var, var, shift);
612 break;
613 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
614 tcg_gen_rotr_i32(var, var, shift); break;
615 }
616 }
617 tcg_temp_free_i32(shift);
618 }
619
620 #define PAS_OP(pfx) \
621 switch (op2) { \
622 case 0: gen_pas_helper(glue(pfx,add16)); break; \
623 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
624 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
625 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
626 case 4: gen_pas_helper(glue(pfx,add8)); break; \
627 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
628 }
629 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
630 {
631 TCGv_ptr tmp;
632
633 switch (op1) {
634 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
635 case 1:
636 tmp = tcg_temp_new_ptr();
637 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
638 PAS_OP(s)
639 tcg_temp_free_ptr(tmp);
640 break;
641 case 5:
642 tmp = tcg_temp_new_ptr();
643 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
644 PAS_OP(u)
645 tcg_temp_free_ptr(tmp);
646 break;
647 #undef gen_pas_helper
648 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
649 case 2:
650 PAS_OP(q);
651 break;
652 case 3:
653 PAS_OP(sh);
654 break;
655 case 6:
656 PAS_OP(uq);
657 break;
658 case 7:
659 PAS_OP(uh);
660 break;
661 #undef gen_pas_helper
662 }
663 }
664 #undef PAS_OP
665
666 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
667 #define PAS_OP(pfx) \
668 switch (op1) { \
669 case 0: gen_pas_helper(glue(pfx,add8)); break; \
670 case 1: gen_pas_helper(glue(pfx,add16)); break; \
671 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
672 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
673 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
674 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
675 }
676 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
677 {
678 TCGv_ptr tmp;
679
680 switch (op2) {
681 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
682 case 0:
683 tmp = tcg_temp_new_ptr();
684 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
685 PAS_OP(s)
686 tcg_temp_free_ptr(tmp);
687 break;
688 case 4:
689 tmp = tcg_temp_new_ptr();
690 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
691 PAS_OP(u)
692 tcg_temp_free_ptr(tmp);
693 break;
694 #undef gen_pas_helper
695 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
696 case 1:
697 PAS_OP(q);
698 break;
699 case 2:
700 PAS_OP(sh);
701 break;
702 case 5:
703 PAS_OP(uq);
704 break;
705 case 6:
706 PAS_OP(uh);
707 break;
708 #undef gen_pas_helper
709 }
710 }
711 #undef PAS_OP
712
713 /*
714 * generate a conditional branch based on ARM condition code cc.
715 * This is common between ARM and Aarch64 targets.
716 */
717 void arm_gen_test_cc(int cc, int label)
718 {
719 TCGv_i32 tmp;
720 int inv;
721
722 switch (cc) {
723 case 0: /* eq: Z */
724 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
725 break;
726 case 1: /* ne: !Z */
727 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
728 break;
729 case 2: /* cs: C */
730 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
731 break;
732 case 3: /* cc: !C */
733 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
734 break;
735 case 4: /* mi: N */
736 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
737 break;
738 case 5: /* pl: !N */
739 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
740 break;
741 case 6: /* vs: V */
742 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
743 break;
744 case 7: /* vc: !V */
745 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
746 break;
747 case 8: /* hi: C && !Z */
748 inv = gen_new_label();
749 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
750 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
751 gen_set_label(inv);
752 break;
753 case 9: /* ls: !C || Z */
754 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
755 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
756 break;
757 case 10: /* ge: N == V -> N ^ V == 0 */
758 tmp = tcg_temp_new_i32();
759 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
760 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
761 tcg_temp_free_i32(tmp);
762 break;
763 case 11: /* lt: N != V -> N ^ V != 0 */
764 tmp = tcg_temp_new_i32();
765 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
766 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
767 tcg_temp_free_i32(tmp);
768 break;
769 case 12: /* gt: !Z && N == V */
770 inv = gen_new_label();
771 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
772 tmp = tcg_temp_new_i32();
773 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
774 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
775 tcg_temp_free_i32(tmp);
776 gen_set_label(inv);
777 break;
778 case 13: /* le: Z || N != V */
779 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
780 tmp = tcg_temp_new_i32();
781 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
782 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
783 tcg_temp_free_i32(tmp);
784 break;
785 default:
786 fprintf(stderr, "Bad condition code 0x%x\n", cc);
787 abort();
788 }
789 }
790
791 static const uint8_t table_logic_cc[16] = {
792 1, /* and */
793 1, /* xor */
794 0, /* sub */
795 0, /* rsb */
796 0, /* add */
797 0, /* adc */
798 0, /* sbc */
799 0, /* rsc */
800 1, /* andl */
801 1, /* xorl */
802 0, /* cmp */
803 0, /* cmn */
804 1, /* orr */
805 1, /* mov */
806 1, /* bic */
807 1, /* mvn */
808 };
809
810 /* Set PC and Thumb state from an immediate address. */
811 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
812 {
813 TCGv_i32 tmp;
814
815 s->is_jmp = DISAS_UPDATE;
816 if (s->thumb != (addr & 1)) {
817 tmp = tcg_temp_new_i32();
818 tcg_gen_movi_i32(tmp, addr & 1);
819 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
820 tcg_temp_free_i32(tmp);
821 }
822 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
823 }
824
825 /* Set PC and Thumb state from var. var is marked as dead. */
826 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
827 {
828 s->is_jmp = DISAS_UPDATE;
829 tcg_gen_andi_i32(cpu_R[15], var, ~1);
830 tcg_gen_andi_i32(var, var, 1);
831 store_cpu_field(var, thumb);
832 }
833
834 /* Variant of store_reg which uses branch&exchange logic when storing
835 to r15 in ARM architecture v7 and above. The source must be a temporary
836 and will be marked as dead. */
837 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
838 {
839 if (reg == 15 && ENABLE_ARCH_7) {
840 gen_bx(s, var);
841 } else {
842 store_reg(s, reg, var);
843 }
844 }
845
846 /* Variant of store_reg which uses branch&exchange logic when storing
847 * to r15 in ARM architecture v5T and above. This is used for storing
848 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
849 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
850 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
851 {
852 if (reg == 15 && ENABLE_ARCH_5) {
853 gen_bx(s, var);
854 } else {
855 store_reg(s, reg, var);
856 }
857 }
858
859 /* Abstractions of "generate code to do a guest load/store for
860 * AArch32", where a vaddr is always 32 bits (and is zero
861 * extended if we're a 64 bit core) and data is also
862 * 32 bits unless specifically doing a 64 bit access.
863 * These functions work like tcg_gen_qemu_{ld,st}* except
864 * that the address argument is TCGv_i32 rather than TCGv.
865 */
866 #if TARGET_LONG_BITS == 32
867
868 #define DO_GEN_LD(SUFF, OPC) \
869 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
870 { \
871 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
872 }
873
874 #define DO_GEN_ST(SUFF, OPC) \
875 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
876 { \
877 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
878 }
879
880 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
881 {
882 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
883 }
884
885 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
886 {
887 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
888 }
889
890 #else
891
892 #define DO_GEN_LD(SUFF, OPC) \
893 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
894 { \
895 TCGv addr64 = tcg_temp_new(); \
896 tcg_gen_extu_i32_i64(addr64, addr); \
897 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
898 tcg_temp_free(addr64); \
899 }
900
901 #define DO_GEN_ST(SUFF, OPC) \
902 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
903 { \
904 TCGv addr64 = tcg_temp_new(); \
905 tcg_gen_extu_i32_i64(addr64, addr); \
906 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
907 tcg_temp_free(addr64); \
908 }
909
910 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
911 {
912 TCGv addr64 = tcg_temp_new();
913 tcg_gen_extu_i32_i64(addr64, addr);
914 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
915 tcg_temp_free(addr64);
916 }
917
918 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
919 {
920 TCGv addr64 = tcg_temp_new();
921 tcg_gen_extu_i32_i64(addr64, addr);
922 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
923 tcg_temp_free(addr64);
924 }
925
926 #endif
927
928 DO_GEN_LD(8s, MO_SB)
929 DO_GEN_LD(8u, MO_UB)
930 DO_GEN_LD(16s, MO_TESW)
931 DO_GEN_LD(16u, MO_TEUW)
932 DO_GEN_LD(32u, MO_TEUL)
933 DO_GEN_ST(8, MO_UB)
934 DO_GEN_ST(16, MO_TEUW)
935 DO_GEN_ST(32, MO_TEUL)
936
937 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
938 {
939 tcg_gen_movi_i32(cpu_R[15], val);
940 }
941
942 static inline void gen_hvc(DisasContext *s, int imm16)
943 {
944 /* The pre HVC helper handles cases when HVC gets trapped
945 * as an undefined insn by runtime configuration (ie before
946 * the insn really executes).
947 */
948 gen_set_pc_im(s, s->pc - 4);
949 gen_helper_pre_hvc(cpu_env);
950 /* Otherwise we will treat this as a real exception which
951 * happens after execution of the insn. (The distinction matters
952 * for the PC value reported to the exception handler and also
953 * for single stepping.)
954 */
955 s->svc_imm = imm16;
956 gen_set_pc_im(s, s->pc);
957 s->is_jmp = DISAS_HVC;
958 }
959
960 static inline void gen_smc(DisasContext *s)
961 {
962 /* As with HVC, we may take an exception either before or after
963 * the insn executes.
964 */
965 TCGv_i32 tmp;
966
967 gen_set_pc_im(s, s->pc - 4);
968 tmp = tcg_const_i32(syn_aa32_smc());
969 gen_helper_pre_smc(cpu_env, tmp);
970 tcg_temp_free_i32(tmp);
971 gen_set_pc_im(s, s->pc);
972 s->is_jmp = DISAS_SMC;
973 }
974
975 static inline void
976 gen_set_condexec (DisasContext *s)
977 {
978 if (s->condexec_mask) {
979 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
980 TCGv_i32 tmp = tcg_temp_new_i32();
981 tcg_gen_movi_i32(tmp, val);
982 store_cpu_field(tmp, condexec_bits);
983 }
984 }
985
986 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
987 {
988 gen_set_condexec(s);
989 gen_set_pc_im(s, s->pc - offset);
990 gen_exception_internal(excp);
991 s->is_jmp = DISAS_JUMP;
992 }
993
994 static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
995 {
996 gen_set_condexec(s);
997 gen_set_pc_im(s, s->pc - offset);
998 gen_exception(excp, syn);
999 s->is_jmp = DISAS_JUMP;
1000 }
1001
1002 /* Force a TB lookup after an instruction that changes the CPU state. */
1003 static inline void gen_lookup_tb(DisasContext *s)
1004 {
1005 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1006 s->is_jmp = DISAS_UPDATE;
1007 }
1008
1009 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1010 TCGv_i32 var)
1011 {
1012 int val, rm, shift, shiftop;
1013 TCGv_i32 offset;
1014
1015 if (!(insn & (1 << 25))) {
1016 /* immediate */
1017 val = insn & 0xfff;
1018 if (!(insn & (1 << 23)))
1019 val = -val;
1020 if (val != 0)
1021 tcg_gen_addi_i32(var, var, val);
1022 } else {
1023 /* shift/register */
1024 rm = (insn) & 0xf;
1025 shift = (insn >> 7) & 0x1f;
1026 shiftop = (insn >> 5) & 3;
1027 offset = load_reg(s, rm);
1028 gen_arm_shift_im(offset, shiftop, shift, 0);
1029 if (!(insn & (1 << 23)))
1030 tcg_gen_sub_i32(var, var, offset);
1031 else
1032 tcg_gen_add_i32(var, var, offset);
1033 tcg_temp_free_i32(offset);
1034 }
1035 }
1036
1037 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1038 int extra, TCGv_i32 var)
1039 {
1040 int val, rm;
1041 TCGv_i32 offset;
1042
1043 if (insn & (1 << 22)) {
1044 /* immediate */
1045 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1046 if (!(insn & (1 << 23)))
1047 val = -val;
1048 val += extra;
1049 if (val != 0)
1050 tcg_gen_addi_i32(var, var, val);
1051 } else {
1052 /* register */
1053 if (extra)
1054 tcg_gen_addi_i32(var, var, extra);
1055 rm = (insn) & 0xf;
1056 offset = load_reg(s, rm);
1057 if (!(insn & (1 << 23)))
1058 tcg_gen_sub_i32(var, var, offset);
1059 else
1060 tcg_gen_add_i32(var, var, offset);
1061 tcg_temp_free_i32(offset);
1062 }
1063 }
1064
1065 static TCGv_ptr get_fpstatus_ptr(int neon)
1066 {
1067 TCGv_ptr statusptr = tcg_temp_new_ptr();
1068 int offset;
1069 if (neon) {
1070 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1071 } else {
1072 offset = offsetof(CPUARMState, vfp.fp_status);
1073 }
1074 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1075 return statusptr;
1076 }
1077
1078 #define VFP_OP2(name) \
1079 static inline void gen_vfp_##name(int dp) \
1080 { \
1081 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1082 if (dp) { \
1083 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1084 } else { \
1085 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1086 } \
1087 tcg_temp_free_ptr(fpst); \
1088 }
1089
1090 VFP_OP2(add)
1091 VFP_OP2(sub)
1092 VFP_OP2(mul)
1093 VFP_OP2(div)
1094
1095 #undef VFP_OP2
1096
1097 static inline void gen_vfp_F1_mul(int dp)
1098 {
1099 /* Like gen_vfp_mul() but put result in F1 */
1100 TCGv_ptr fpst = get_fpstatus_ptr(0);
1101 if (dp) {
1102 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1103 } else {
1104 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1105 }
1106 tcg_temp_free_ptr(fpst);
1107 }
1108
1109 static inline void gen_vfp_F1_neg(int dp)
1110 {
1111 /* Like gen_vfp_neg() but put result in F1 */
1112 if (dp) {
1113 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1114 } else {
1115 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1116 }
1117 }
1118
1119 static inline void gen_vfp_abs(int dp)
1120 {
1121 if (dp)
1122 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1123 else
1124 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1125 }
1126
1127 static inline void gen_vfp_neg(int dp)
1128 {
1129 if (dp)
1130 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1131 else
1132 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1133 }
1134
1135 static inline void gen_vfp_sqrt(int dp)
1136 {
1137 if (dp)
1138 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1139 else
1140 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1141 }
1142
1143 static inline void gen_vfp_cmp(int dp)
1144 {
1145 if (dp)
1146 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1147 else
1148 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1149 }
1150
1151 static inline void gen_vfp_cmpe(int dp)
1152 {
1153 if (dp)
1154 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1155 else
1156 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1157 }
1158
1159 static inline void gen_vfp_F1_ld0(int dp)
1160 {
1161 if (dp)
1162 tcg_gen_movi_i64(cpu_F1d, 0);
1163 else
1164 tcg_gen_movi_i32(cpu_F1s, 0);
1165 }
1166
1167 #define VFP_GEN_ITOF(name) \
1168 static inline void gen_vfp_##name(int dp, int neon) \
1169 { \
1170 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1171 if (dp) { \
1172 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1173 } else { \
1174 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1175 } \
1176 tcg_temp_free_ptr(statusptr); \
1177 }
1178
1179 VFP_GEN_ITOF(uito)
1180 VFP_GEN_ITOF(sito)
1181 #undef VFP_GEN_ITOF
1182
1183 #define VFP_GEN_FTOI(name) \
1184 static inline void gen_vfp_##name(int dp, int neon) \
1185 { \
1186 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1187 if (dp) { \
1188 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1189 } else { \
1190 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1191 } \
1192 tcg_temp_free_ptr(statusptr); \
1193 }
1194
1195 VFP_GEN_FTOI(toui)
1196 VFP_GEN_FTOI(touiz)
1197 VFP_GEN_FTOI(tosi)
1198 VFP_GEN_FTOI(tosiz)
1199 #undef VFP_GEN_FTOI
1200
1201 #define VFP_GEN_FIX(name, round) \
1202 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1203 { \
1204 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1205 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1206 if (dp) { \
1207 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1208 statusptr); \
1209 } else { \
1210 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1211 statusptr); \
1212 } \
1213 tcg_temp_free_i32(tmp_shift); \
1214 tcg_temp_free_ptr(statusptr); \
1215 }
1216 VFP_GEN_FIX(tosh, _round_to_zero)
1217 VFP_GEN_FIX(tosl, _round_to_zero)
1218 VFP_GEN_FIX(touh, _round_to_zero)
1219 VFP_GEN_FIX(toul, _round_to_zero)
1220 VFP_GEN_FIX(shto, )
1221 VFP_GEN_FIX(slto, )
1222 VFP_GEN_FIX(uhto, )
1223 VFP_GEN_FIX(ulto, )
1224 #undef VFP_GEN_FIX
1225
1226 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1227 {
1228 if (dp) {
1229 gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
1230 } else {
1231 gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
1232 }
1233 }
1234
1235 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1236 {
1237 if (dp) {
1238 gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
1239 } else {
1240 gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
1241 }
1242 }
1243
1244 static inline long
1245 vfp_reg_offset (int dp, int reg)
1246 {
1247 if (dp)
1248 return offsetof(CPUARMState, vfp.regs[reg]);
1249 else if (reg & 1) {
1250 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1251 + offsetof(CPU_DoubleU, l.upper);
1252 } else {
1253 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1254 + offsetof(CPU_DoubleU, l.lower);
1255 }
1256 }
1257
1258 /* Return the offset of a 32-bit piece of a NEON register.
1259 zero is the least significant end of the register. */
1260 static inline long
1261 neon_reg_offset (int reg, int n)
1262 {
1263 int sreg;
1264 sreg = reg * 2 + n;
1265 return vfp_reg_offset(0, sreg);
1266 }
1267
1268 static TCGv_i32 neon_load_reg(int reg, int pass)
1269 {
1270 TCGv_i32 tmp = tcg_temp_new_i32();
1271 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1272 return tmp;
1273 }
1274
1275 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1276 {
1277 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1278 tcg_temp_free_i32(var);
1279 }
1280
1281 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1282 {
1283 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1284 }
1285
1286 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1287 {
1288 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1289 }
1290
1291 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1292 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1293 #define tcg_gen_st_f32 tcg_gen_st_i32
1294 #define tcg_gen_st_f64 tcg_gen_st_i64
1295
1296 static inline void gen_mov_F0_vreg(int dp, int reg)
1297 {
1298 if (dp)
1299 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1300 else
1301 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1302 }
1303
1304 static inline void gen_mov_F1_vreg(int dp, int reg)
1305 {
1306 if (dp)
1307 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1308 else
1309 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1310 }
1311
1312 static inline void gen_mov_vreg_F0(int dp, int reg)
1313 {
1314 if (dp)
1315 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1316 else
1317 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1318 }
1319
1320 #define ARM_CP_RW_BIT (1 << 20)
1321
1322 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1323 {
1324 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1325 }
1326
1327 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1328 {
1329 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1330 }
1331
1332 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1333 {
1334 TCGv_i32 var = tcg_temp_new_i32();
1335 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1336 return var;
1337 }
1338
1339 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1340 {
1341 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1342 tcg_temp_free_i32(var);
1343 }
1344
1345 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1346 {
1347 iwmmxt_store_reg(cpu_M0, rn);
1348 }
1349
1350 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1351 {
1352 iwmmxt_load_reg(cpu_M0, rn);
1353 }
1354
1355 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1356 {
1357 iwmmxt_load_reg(cpu_V1, rn);
1358 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1359 }
1360
1361 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1362 {
1363 iwmmxt_load_reg(cpu_V1, rn);
1364 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1365 }
1366
1367 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1368 {
1369 iwmmxt_load_reg(cpu_V1, rn);
1370 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1371 }
1372
1373 #define IWMMXT_OP(name) \
1374 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1375 { \
1376 iwmmxt_load_reg(cpu_V1, rn); \
1377 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1378 }
1379
1380 #define IWMMXT_OP_ENV(name) \
1381 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1382 { \
1383 iwmmxt_load_reg(cpu_V1, rn); \
1384 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1385 }
1386
1387 #define IWMMXT_OP_ENV_SIZE(name) \
1388 IWMMXT_OP_ENV(name##b) \
1389 IWMMXT_OP_ENV(name##w) \
1390 IWMMXT_OP_ENV(name##l)
1391
1392 #define IWMMXT_OP_ENV1(name) \
1393 static inline void gen_op_iwmmxt_##name##_M0(void) \
1394 { \
1395 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1396 }
1397
1398 IWMMXT_OP(maddsq)
1399 IWMMXT_OP(madduq)
1400 IWMMXT_OP(sadb)
1401 IWMMXT_OP(sadw)
1402 IWMMXT_OP(mulslw)
1403 IWMMXT_OP(mulshw)
1404 IWMMXT_OP(mululw)
1405 IWMMXT_OP(muluhw)
1406 IWMMXT_OP(macsw)
1407 IWMMXT_OP(macuw)
1408
1409 IWMMXT_OP_ENV_SIZE(unpackl)
1410 IWMMXT_OP_ENV_SIZE(unpackh)
1411
1412 IWMMXT_OP_ENV1(unpacklub)
1413 IWMMXT_OP_ENV1(unpackluw)
1414 IWMMXT_OP_ENV1(unpacklul)
1415 IWMMXT_OP_ENV1(unpackhub)
1416 IWMMXT_OP_ENV1(unpackhuw)
1417 IWMMXT_OP_ENV1(unpackhul)
1418 IWMMXT_OP_ENV1(unpacklsb)
1419 IWMMXT_OP_ENV1(unpacklsw)
1420 IWMMXT_OP_ENV1(unpacklsl)
1421 IWMMXT_OP_ENV1(unpackhsb)
1422 IWMMXT_OP_ENV1(unpackhsw)
1423 IWMMXT_OP_ENV1(unpackhsl)
1424
1425 IWMMXT_OP_ENV_SIZE(cmpeq)
1426 IWMMXT_OP_ENV_SIZE(cmpgtu)
1427 IWMMXT_OP_ENV_SIZE(cmpgts)
1428
1429 IWMMXT_OP_ENV_SIZE(mins)
1430 IWMMXT_OP_ENV_SIZE(minu)
1431 IWMMXT_OP_ENV_SIZE(maxs)
1432 IWMMXT_OP_ENV_SIZE(maxu)
1433
1434 IWMMXT_OP_ENV_SIZE(subn)
1435 IWMMXT_OP_ENV_SIZE(addn)
1436 IWMMXT_OP_ENV_SIZE(subu)
1437 IWMMXT_OP_ENV_SIZE(addu)
1438 IWMMXT_OP_ENV_SIZE(subs)
1439 IWMMXT_OP_ENV_SIZE(adds)
1440
1441 IWMMXT_OP_ENV(avgb0)
1442 IWMMXT_OP_ENV(avgb1)
1443 IWMMXT_OP_ENV(avgw0)
1444 IWMMXT_OP_ENV(avgw1)
1445
1446 IWMMXT_OP_ENV(packuw)
1447 IWMMXT_OP_ENV(packul)
1448 IWMMXT_OP_ENV(packuq)
1449 IWMMXT_OP_ENV(packsw)
1450 IWMMXT_OP_ENV(packsl)
1451 IWMMXT_OP_ENV(packsq)
1452
1453 static void gen_op_iwmmxt_set_mup(void)
1454 {
1455 TCGv_i32 tmp;
1456 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1457 tcg_gen_ori_i32(tmp, tmp, 2);
1458 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1459 }
1460
1461 static void gen_op_iwmmxt_set_cup(void)
1462 {
1463 TCGv_i32 tmp;
1464 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1465 tcg_gen_ori_i32(tmp, tmp, 1);
1466 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1467 }
1468
1469 static void gen_op_iwmmxt_setpsr_nz(void)
1470 {
1471 TCGv_i32 tmp = tcg_temp_new_i32();
1472 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1473 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1474 }
1475
1476 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1477 {
1478 iwmmxt_load_reg(cpu_V1, rn);
1479 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1480 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1481 }
1482
1483 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1484 TCGv_i32 dest)
1485 {
1486 int rd;
1487 uint32_t offset;
1488 TCGv_i32 tmp;
1489
1490 rd = (insn >> 16) & 0xf;
1491 tmp = load_reg(s, rd);
1492
1493 offset = (insn & 0xff) << ((insn >> 7) & 2);
1494 if (insn & (1 << 24)) {
1495 /* Pre indexed */
1496 if (insn & (1 << 23))
1497 tcg_gen_addi_i32(tmp, tmp, offset);
1498 else
1499 tcg_gen_addi_i32(tmp, tmp, -offset);
1500 tcg_gen_mov_i32(dest, tmp);
1501 if (insn & (1 << 21))
1502 store_reg(s, rd, tmp);
1503 else
1504 tcg_temp_free_i32(tmp);
1505 } else if (insn & (1 << 21)) {
1506 /* Post indexed */
1507 tcg_gen_mov_i32(dest, tmp);
1508 if (insn & (1 << 23))
1509 tcg_gen_addi_i32(tmp, tmp, offset);
1510 else
1511 tcg_gen_addi_i32(tmp, tmp, -offset);
1512 store_reg(s, rd, tmp);
1513 } else if (!(insn & (1 << 23)))
1514 return 1;
1515 return 0;
1516 }
1517
1518 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1519 {
1520 int rd = (insn >> 0) & 0xf;
1521 TCGv_i32 tmp;
1522
1523 if (insn & (1 << 8)) {
1524 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1525 return 1;
1526 } else {
1527 tmp = iwmmxt_load_creg(rd);
1528 }
1529 } else {
1530 tmp = tcg_temp_new_i32();
1531 iwmmxt_load_reg(cpu_V0, rd);
1532 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1533 }
1534 tcg_gen_andi_i32(tmp, tmp, mask);
1535 tcg_gen_mov_i32(dest, tmp);
1536 tcg_temp_free_i32(tmp);
1537 return 0;
1538 }
1539
1540 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1541 (ie. an undefined instruction). */
1542 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1543 {
1544 int rd, wrd;
1545 int rdhi, rdlo, rd0, rd1, i;
1546 TCGv_i32 addr;
1547 TCGv_i32 tmp, tmp2, tmp3;
1548
1549 if ((insn & 0x0e000e00) == 0x0c000000) {
1550 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1551 wrd = insn & 0xf;
1552 rdlo = (insn >> 12) & 0xf;
1553 rdhi = (insn >> 16) & 0xf;
1554 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1555 iwmmxt_load_reg(cpu_V0, wrd);
1556 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1557 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1558 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1559 } else { /* TMCRR */
1560 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1561 iwmmxt_store_reg(cpu_V0, wrd);
1562 gen_op_iwmmxt_set_mup();
1563 }
1564 return 0;
1565 }
1566
1567 wrd = (insn >> 12) & 0xf;
1568 addr = tcg_temp_new_i32();
1569 if (gen_iwmmxt_address(s, insn, addr)) {
1570 tcg_temp_free_i32(addr);
1571 return 1;
1572 }
1573 if (insn & ARM_CP_RW_BIT) {
1574 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1575 tmp = tcg_temp_new_i32();
1576 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
1577 iwmmxt_store_creg(wrd, tmp);
1578 } else {
1579 i = 1;
1580 if (insn & (1 << 8)) {
1581 if (insn & (1 << 22)) { /* WLDRD */
1582 gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
1583 i = 0;
1584 } else { /* WLDRW wRd */
1585 tmp = tcg_temp_new_i32();
1586 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
1587 }
1588 } else {
1589 tmp = tcg_temp_new_i32();
1590 if (insn & (1 << 22)) { /* WLDRH */
1591 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
1592 } else { /* WLDRB */
1593 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
1594 }
1595 }
1596 if (i) {
1597 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1598 tcg_temp_free_i32(tmp);
1599 }
1600 gen_op_iwmmxt_movq_wRn_M0(wrd);
1601 }
1602 } else {
1603 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1604 tmp = iwmmxt_load_creg(wrd);
1605 gen_aa32_st32(tmp, addr, get_mem_index(s));
1606 } else {
1607 gen_op_iwmmxt_movq_M0_wRn(wrd);
1608 tmp = tcg_temp_new_i32();
1609 if (insn & (1 << 8)) {
1610 if (insn & (1 << 22)) { /* WSTRD */
1611 gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
1612 } else { /* WSTRW wRd */
1613 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1614 gen_aa32_st32(tmp, addr, get_mem_index(s));
1615 }
1616 } else {
1617 if (insn & (1 << 22)) { /* WSTRH */
1618 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1619 gen_aa32_st16(tmp, addr, get_mem_index(s));
1620 } else { /* WSTRB */
1621 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1622 gen_aa32_st8(tmp, addr, get_mem_index(s));
1623 }
1624 }
1625 }
1626 tcg_temp_free_i32(tmp);
1627 }
1628 tcg_temp_free_i32(addr);
1629 return 0;
1630 }
1631
1632 if ((insn & 0x0f000000) != 0x0e000000)
1633 return 1;
1634
1635 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1636 case 0x000: /* WOR */
1637 wrd = (insn >> 12) & 0xf;
1638 rd0 = (insn >> 0) & 0xf;
1639 rd1 = (insn >> 16) & 0xf;
1640 gen_op_iwmmxt_movq_M0_wRn(rd0);
1641 gen_op_iwmmxt_orq_M0_wRn(rd1);
1642 gen_op_iwmmxt_setpsr_nz();
1643 gen_op_iwmmxt_movq_wRn_M0(wrd);
1644 gen_op_iwmmxt_set_mup();
1645 gen_op_iwmmxt_set_cup();
1646 break;
1647 case 0x011: /* TMCR */
1648 if (insn & 0xf)
1649 return 1;
1650 rd = (insn >> 12) & 0xf;
1651 wrd = (insn >> 16) & 0xf;
1652 switch (wrd) {
1653 case ARM_IWMMXT_wCID:
1654 case ARM_IWMMXT_wCASF:
1655 break;
1656 case ARM_IWMMXT_wCon:
1657 gen_op_iwmmxt_set_cup();
1658 /* Fall through. */
1659 case ARM_IWMMXT_wCSSF:
1660 tmp = iwmmxt_load_creg(wrd);
1661 tmp2 = load_reg(s, rd);
1662 tcg_gen_andc_i32(tmp, tmp, tmp2);
1663 tcg_temp_free_i32(tmp2);
1664 iwmmxt_store_creg(wrd, tmp);
1665 break;
1666 case ARM_IWMMXT_wCGR0:
1667 case ARM_IWMMXT_wCGR1:
1668 case ARM_IWMMXT_wCGR2:
1669 case ARM_IWMMXT_wCGR3:
1670 gen_op_iwmmxt_set_cup();
1671 tmp = load_reg(s, rd);
1672 iwmmxt_store_creg(wrd, tmp);
1673 break;
1674 default:
1675 return 1;
1676 }
1677 break;
1678 case 0x100: /* WXOR */
1679 wrd = (insn >> 12) & 0xf;
1680 rd0 = (insn >> 0) & 0xf;
1681 rd1 = (insn >> 16) & 0xf;
1682 gen_op_iwmmxt_movq_M0_wRn(rd0);
1683 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1684 gen_op_iwmmxt_setpsr_nz();
1685 gen_op_iwmmxt_movq_wRn_M0(wrd);
1686 gen_op_iwmmxt_set_mup();
1687 gen_op_iwmmxt_set_cup();
1688 break;
1689 case 0x111: /* TMRC */
1690 if (insn & 0xf)
1691 return 1;
1692 rd = (insn >> 12) & 0xf;
1693 wrd = (insn >> 16) & 0xf;
1694 tmp = iwmmxt_load_creg(wrd);
1695 store_reg(s, rd, tmp);
1696 break;
1697 case 0x300: /* WANDN */
1698 wrd = (insn >> 12) & 0xf;
1699 rd0 = (insn >> 0) & 0xf;
1700 rd1 = (insn >> 16) & 0xf;
1701 gen_op_iwmmxt_movq_M0_wRn(rd0);
1702 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1703 gen_op_iwmmxt_andq_M0_wRn(rd1);
1704 gen_op_iwmmxt_setpsr_nz();
1705 gen_op_iwmmxt_movq_wRn_M0(wrd);
1706 gen_op_iwmmxt_set_mup();
1707 gen_op_iwmmxt_set_cup();
1708 break;
1709 case 0x200: /* WAND */
1710 wrd = (insn >> 12) & 0xf;
1711 rd0 = (insn >> 0) & 0xf;
1712 rd1 = (insn >> 16) & 0xf;
1713 gen_op_iwmmxt_movq_M0_wRn(rd0);
1714 gen_op_iwmmxt_andq_M0_wRn(rd1);
1715 gen_op_iwmmxt_setpsr_nz();
1716 gen_op_iwmmxt_movq_wRn_M0(wrd);
1717 gen_op_iwmmxt_set_mup();
1718 gen_op_iwmmxt_set_cup();
1719 break;
1720 case 0x810: case 0xa10: /* WMADD */
1721 wrd = (insn >> 12) & 0xf;
1722 rd0 = (insn >> 0) & 0xf;
1723 rd1 = (insn >> 16) & 0xf;
1724 gen_op_iwmmxt_movq_M0_wRn(rd0);
1725 if (insn & (1 << 21))
1726 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1727 else
1728 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1729 gen_op_iwmmxt_movq_wRn_M0(wrd);
1730 gen_op_iwmmxt_set_mup();
1731 break;
1732 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1733 wrd = (insn >> 12) & 0xf;
1734 rd0 = (insn >> 16) & 0xf;
1735 rd1 = (insn >> 0) & 0xf;
1736 gen_op_iwmmxt_movq_M0_wRn(rd0);
1737 switch ((insn >> 22) & 3) {
1738 case 0:
1739 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1740 break;
1741 case 1:
1742 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1743 break;
1744 case 2:
1745 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1746 break;
1747 case 3:
1748 return 1;
1749 }
1750 gen_op_iwmmxt_movq_wRn_M0(wrd);
1751 gen_op_iwmmxt_set_mup();
1752 gen_op_iwmmxt_set_cup();
1753 break;
1754 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1755 wrd = (insn >> 12) & 0xf;
1756 rd0 = (insn >> 16) & 0xf;
1757 rd1 = (insn >> 0) & 0xf;
1758 gen_op_iwmmxt_movq_M0_wRn(rd0);
1759 switch ((insn >> 22) & 3) {
1760 case 0:
1761 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1762 break;
1763 case 1:
1764 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1765 break;
1766 case 2:
1767 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1768 break;
1769 case 3:
1770 return 1;
1771 }
1772 gen_op_iwmmxt_movq_wRn_M0(wrd);
1773 gen_op_iwmmxt_set_mup();
1774 gen_op_iwmmxt_set_cup();
1775 break;
1776 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1777 wrd = (insn >> 12) & 0xf;
1778 rd0 = (insn >> 16) & 0xf;
1779 rd1 = (insn >> 0) & 0xf;
1780 gen_op_iwmmxt_movq_M0_wRn(rd0);
1781 if (insn & (1 << 22))
1782 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1783 else
1784 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1785 if (!(insn & (1 << 20)))
1786 gen_op_iwmmxt_addl_M0_wRn(wrd);
1787 gen_op_iwmmxt_movq_wRn_M0(wrd);
1788 gen_op_iwmmxt_set_mup();
1789 break;
1790 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1791 wrd = (insn >> 12) & 0xf;
1792 rd0 = (insn >> 16) & 0xf;
1793 rd1 = (insn >> 0) & 0xf;
1794 gen_op_iwmmxt_movq_M0_wRn(rd0);
1795 if (insn & (1 << 21)) {
1796 if (insn & (1 << 20))
1797 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1798 else
1799 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1800 } else {
1801 if (insn & (1 << 20))
1802 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1803 else
1804 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1805 }
1806 gen_op_iwmmxt_movq_wRn_M0(wrd);
1807 gen_op_iwmmxt_set_mup();
1808 break;
1809 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1810 wrd = (insn >> 12) & 0xf;
1811 rd0 = (insn >> 16) & 0xf;
1812 rd1 = (insn >> 0) & 0xf;
1813 gen_op_iwmmxt_movq_M0_wRn(rd0);
1814 if (insn & (1 << 21))
1815 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1816 else
1817 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1818 if (!(insn & (1 << 20))) {
1819 iwmmxt_load_reg(cpu_V1, wrd);
1820 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1821 }
1822 gen_op_iwmmxt_movq_wRn_M0(wrd);
1823 gen_op_iwmmxt_set_mup();
1824 break;
1825 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1826 wrd = (insn >> 12) & 0xf;
1827 rd0 = (insn >> 16) & 0xf;
1828 rd1 = (insn >> 0) & 0xf;
1829 gen_op_iwmmxt_movq_M0_wRn(rd0);
1830 switch ((insn >> 22) & 3) {
1831 case 0:
1832 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1833 break;
1834 case 1:
1835 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1836 break;
1837 case 2:
1838 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1839 break;
1840 case 3:
1841 return 1;
1842 }
1843 gen_op_iwmmxt_movq_wRn_M0(wrd);
1844 gen_op_iwmmxt_set_mup();
1845 gen_op_iwmmxt_set_cup();
1846 break;
1847 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1848 wrd = (insn >> 12) & 0xf;
1849 rd0 = (insn >> 16) & 0xf;
1850 rd1 = (insn >> 0) & 0xf;
1851 gen_op_iwmmxt_movq_M0_wRn(rd0);
1852 if (insn & (1 << 22)) {
1853 if (insn & (1 << 20))
1854 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1855 else
1856 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1857 } else {
1858 if (insn & (1 << 20))
1859 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1860 else
1861 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1862 }
1863 gen_op_iwmmxt_movq_wRn_M0(wrd);
1864 gen_op_iwmmxt_set_mup();
1865 gen_op_iwmmxt_set_cup();
1866 break;
1867 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1868 wrd = (insn >> 12) & 0xf;
1869 rd0 = (insn >> 16) & 0xf;
1870 rd1 = (insn >> 0) & 0xf;
1871 gen_op_iwmmxt_movq_M0_wRn(rd0);
1872 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1873 tcg_gen_andi_i32(tmp, tmp, 7);
1874 iwmmxt_load_reg(cpu_V1, rd1);
1875 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1876 tcg_temp_free_i32(tmp);
1877 gen_op_iwmmxt_movq_wRn_M0(wrd);
1878 gen_op_iwmmxt_set_mup();
1879 break;
1880 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1881 if (((insn >> 6) & 3) == 3)
1882 return 1;
1883 rd = (insn >> 12) & 0xf;
1884 wrd = (insn >> 16) & 0xf;
1885 tmp = load_reg(s, rd);
1886 gen_op_iwmmxt_movq_M0_wRn(wrd);
1887 switch ((insn >> 6) & 3) {
1888 case 0:
1889 tmp2 = tcg_const_i32(0xff);
1890 tmp3 = tcg_const_i32((insn & 7) << 3);
1891 break;
1892 case 1:
1893 tmp2 = tcg_const_i32(0xffff);
1894 tmp3 = tcg_const_i32((insn & 3) << 4);
1895 break;
1896 case 2:
1897 tmp2 = tcg_const_i32(0xffffffff);
1898 tmp3 = tcg_const_i32((insn & 1) << 5);
1899 break;
1900 default:
1901 TCGV_UNUSED_I32(tmp2);
1902 TCGV_UNUSED_I32(tmp3);
1903 }
1904 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1905 tcg_temp_free_i32(tmp3);
1906 tcg_temp_free_i32(tmp2);
1907 tcg_temp_free_i32(tmp);
1908 gen_op_iwmmxt_movq_wRn_M0(wrd);
1909 gen_op_iwmmxt_set_mup();
1910 break;
1911 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1912 rd = (insn >> 12) & 0xf;
1913 wrd = (insn >> 16) & 0xf;
1914 if (rd == 15 || ((insn >> 22) & 3) == 3)
1915 return 1;
1916 gen_op_iwmmxt_movq_M0_wRn(wrd);
1917 tmp = tcg_temp_new_i32();
1918 switch ((insn >> 22) & 3) {
1919 case 0:
1920 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1921 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1922 if (insn & 8) {
1923 tcg_gen_ext8s_i32(tmp, tmp);
1924 } else {
1925 tcg_gen_andi_i32(tmp, tmp, 0xff);
1926 }
1927 break;
1928 case 1:
1929 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1930 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1931 if (insn & 8) {
1932 tcg_gen_ext16s_i32(tmp, tmp);
1933 } else {
1934 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1935 }
1936 break;
1937 case 2:
1938 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1939 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1940 break;
1941 }
1942 store_reg(s, rd, tmp);
1943 break;
1944 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1945 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1946 return 1;
1947 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1948 switch ((insn >> 22) & 3) {
1949 case 0:
1950 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1951 break;
1952 case 1:
1953 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1954 break;
1955 case 2:
1956 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1957 break;
1958 }
1959 tcg_gen_shli_i32(tmp, tmp, 28);
1960 gen_set_nzcv(tmp);
1961 tcg_temp_free_i32(tmp);
1962 break;
1963 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1964 if (((insn >> 6) & 3) == 3)
1965 return 1;
1966 rd = (insn >> 12) & 0xf;
1967 wrd = (insn >> 16) & 0xf;
1968 tmp = load_reg(s, rd);
1969 switch ((insn >> 6) & 3) {
1970 case 0:
1971 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1972 break;
1973 case 1:
1974 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1975 break;
1976 case 2:
1977 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1978 break;
1979 }
1980 tcg_temp_free_i32(tmp);
1981 gen_op_iwmmxt_movq_wRn_M0(wrd);
1982 gen_op_iwmmxt_set_mup();
1983 break;
1984 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1985 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1986 return 1;
1987 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1988 tmp2 = tcg_temp_new_i32();
1989 tcg_gen_mov_i32(tmp2, tmp);
1990 switch ((insn >> 22) & 3) {
1991 case 0:
1992 for (i = 0; i < 7; i ++) {
1993 tcg_gen_shli_i32(tmp2, tmp2, 4);
1994 tcg_gen_and_i32(tmp, tmp, tmp2);
1995 }
1996 break;
1997 case 1:
1998 for (i = 0; i < 3; i ++) {
1999 tcg_gen_shli_i32(tmp2, tmp2, 8);
2000 tcg_gen_and_i32(tmp, tmp, tmp2);
2001 }
2002 break;
2003 case 2:
2004 tcg_gen_shli_i32(tmp2, tmp2, 16);
2005 tcg_gen_and_i32(tmp, tmp, tmp2);
2006 break;
2007 }
2008 gen_set_nzcv(tmp);
2009 tcg_temp_free_i32(tmp2);
2010 tcg_temp_free_i32(tmp);
2011 break;
2012 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2013 wrd = (insn >> 12) & 0xf;
2014 rd0 = (insn >> 16) & 0xf;
2015 gen_op_iwmmxt_movq_M0_wRn(rd0);
2016 switch ((insn >> 22) & 3) {
2017 case 0:
2018 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2019 break;
2020 case 1:
2021 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2022 break;
2023 case 2:
2024 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2025 break;
2026 case 3:
2027 return 1;
2028 }
2029 gen_op_iwmmxt_movq_wRn_M0(wrd);
2030 gen_op_iwmmxt_set_mup();
2031 break;
2032 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2033 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2034 return 1;
2035 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2036 tmp2 = tcg_temp_new_i32();
2037 tcg_gen_mov_i32(tmp2, tmp);
2038 switch ((insn >> 22) & 3) {
2039 case 0:
2040 for (i = 0; i < 7; i ++) {
2041 tcg_gen_shli_i32(tmp2, tmp2, 4);
2042 tcg_gen_or_i32(tmp, tmp, tmp2);
2043 }
2044 break;
2045 case 1:
2046 for (i = 0; i < 3; i ++) {
2047 tcg_gen_shli_i32(tmp2, tmp2, 8);
2048 tcg_gen_or_i32(tmp, tmp, tmp2);
2049 }
2050 break;
2051 case 2:
2052 tcg_gen_shli_i32(tmp2, tmp2, 16);
2053 tcg_gen_or_i32(tmp, tmp, tmp2);
2054 break;
2055 }
2056 gen_set_nzcv(tmp);
2057 tcg_temp_free_i32(tmp2);
2058 tcg_temp_free_i32(tmp);
2059 break;
2060 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2061 rd = (insn >> 12) & 0xf;
2062 rd0 = (insn >> 16) & 0xf;
2063 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2064 return 1;
2065 gen_op_iwmmxt_movq_M0_wRn(rd0);
2066 tmp = tcg_temp_new_i32();
2067 switch ((insn >> 22) & 3) {
2068 case 0:
2069 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2070 break;
2071 case 1:
2072 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2073 break;
2074 case 2:
2075 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2076 break;
2077 }
2078 store_reg(s, rd, tmp);
2079 break;
2080 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2081 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2082 wrd = (insn >> 12) & 0xf;
2083 rd0 = (insn >> 16) & 0xf;
2084 rd1 = (insn >> 0) & 0xf;
2085 gen_op_iwmmxt_movq_M0_wRn(rd0);
2086 switch ((insn >> 22) & 3) {
2087 case 0:
2088 if (insn & (1 << 21))
2089 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2090 else
2091 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2092 break;
2093 case 1:
2094 if (insn & (1 << 21))
2095 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2096 else
2097 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2098 break;
2099 case 2:
2100 if (insn & (1 << 21))
2101 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2102 else
2103 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2104 break;
2105 case 3:
2106 return 1;
2107 }
2108 gen_op_iwmmxt_movq_wRn_M0(wrd);
2109 gen_op_iwmmxt_set_mup();
2110 gen_op_iwmmxt_set_cup();
2111 break;
2112 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2113 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2114 wrd = (insn >> 12) & 0xf;
2115 rd0 = (insn >> 16) & 0xf;
2116 gen_op_iwmmxt_movq_M0_wRn(rd0);
2117 switch ((insn >> 22) & 3) {
2118 case 0:
2119 if (insn & (1 << 21))
2120 gen_op_iwmmxt_unpacklsb_M0();
2121 else
2122 gen_op_iwmmxt_unpacklub_M0();
2123 break;
2124 case 1:
2125 if (insn & (1 << 21))
2126 gen_op_iwmmxt_unpacklsw_M0();
2127 else
2128 gen_op_iwmmxt_unpackluw_M0();
2129 break;
2130 case 2:
2131 if (insn & (1 << 21))
2132 gen_op_iwmmxt_unpacklsl_M0();
2133 else
2134 gen_op_iwmmxt_unpacklul_M0();
2135 break;
2136 case 3:
2137 return 1;
2138 }
2139 gen_op_iwmmxt_movq_wRn_M0(wrd);
2140 gen_op_iwmmxt_set_mup();
2141 gen_op_iwmmxt_set_cup();
2142 break;
2143 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2144 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2145 wrd = (insn >> 12) & 0xf;
2146 rd0 = (insn >> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
2148 switch ((insn >> 22) & 3) {
2149 case 0:
2150 if (insn & (1 << 21))
2151 gen_op_iwmmxt_unpackhsb_M0();
2152 else
2153 gen_op_iwmmxt_unpackhub_M0();
2154 break;
2155 case 1:
2156 if (insn & (1 << 21))
2157 gen_op_iwmmxt_unpackhsw_M0();
2158 else
2159 gen_op_iwmmxt_unpackhuw_M0();
2160 break;
2161 case 2:
2162 if (insn & (1 << 21))
2163 gen_op_iwmmxt_unpackhsl_M0();
2164 else
2165 gen_op_iwmmxt_unpackhul_M0();
2166 break;
2167 case 3:
2168 return 1;
2169 }
2170 gen_op_iwmmxt_movq_wRn_M0(wrd);
2171 gen_op_iwmmxt_set_mup();
2172 gen_op_iwmmxt_set_cup();
2173 break;
2174 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2175 case 0x214: case 0x614: case 0xa14: case 0xe14:
2176 if (((insn >> 22) & 3) == 0)
2177 return 1;
2178 wrd = (insn >> 12) & 0xf;
2179 rd0 = (insn >> 16) & 0xf;
2180 gen_op_iwmmxt_movq_M0_wRn(rd0);
2181 tmp = tcg_temp_new_i32();
2182 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2183 tcg_temp_free_i32(tmp);
2184 return 1;
2185 }
2186 switch ((insn >> 22) & 3) {
2187 case 1:
2188 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2189 break;
2190 case 2:
2191 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2192 break;
2193 case 3:
2194 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2195 break;
2196 }
2197 tcg_temp_free_i32(tmp);
2198 gen_op_iwmmxt_movq_wRn_M0(wrd);
2199 gen_op_iwmmxt_set_mup();
2200 gen_op_iwmmxt_set_cup();
2201 break;
2202 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2203 case 0x014: case 0x414: case 0x814: case 0xc14:
2204 if (((insn >> 22) & 3) == 0)
2205 return 1;
2206 wrd = (insn >> 12) & 0xf;
2207 rd0 = (insn >> 16) & 0xf;
2208 gen_op_iwmmxt_movq_M0_wRn(rd0);
2209 tmp = tcg_temp_new_i32();
2210 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2211 tcg_temp_free_i32(tmp);
2212 return 1;
2213 }
2214 switch ((insn >> 22) & 3) {
2215 case 1:
2216 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2217 break;
2218 case 2:
2219 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2220 break;
2221 case 3:
2222 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2223 break;
2224 }
2225 tcg_temp_free_i32(tmp);
2226 gen_op_iwmmxt_movq_wRn_M0(wrd);
2227 gen_op_iwmmxt_set_mup();
2228 gen_op_iwmmxt_set_cup();
2229 break;
2230 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2231 case 0x114: case 0x514: case 0x914: case 0xd14:
2232 if (((insn >> 22) & 3) == 0)
2233 return 1;
2234 wrd = (insn >> 12) & 0xf;
2235 rd0 = (insn >> 16) & 0xf;
2236 gen_op_iwmmxt_movq_M0_wRn(rd0);
2237 tmp = tcg_temp_new_i32();
2238 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2239 tcg_temp_free_i32(tmp);
2240 return 1;
2241 }
2242 switch ((insn >> 22) & 3) {
2243 case 1:
2244 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2245 break;
2246 case 2:
2247 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2248 break;
2249 case 3:
2250 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2251 break;
2252 }
2253 tcg_temp_free_i32(tmp);
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 gen_op_iwmmxt_set_cup();
2257 break;
2258 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2259 case 0x314: case 0x714: case 0xb14: case 0xf14:
2260 if (((insn >> 22) & 3) == 0)
2261 return 1;
2262 wrd = (insn >> 12) & 0xf;
2263 rd0 = (insn >> 16) & 0xf;
2264 gen_op_iwmmxt_movq_M0_wRn(rd0);
2265 tmp = tcg_temp_new_i32();
2266 switch ((insn >> 22) & 3) {
2267 case 1:
2268 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2269 tcg_temp_free_i32(tmp);
2270 return 1;
2271 }
2272 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2273 break;
2274 case 2:
2275 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2276 tcg_temp_free_i32(tmp);
2277 return 1;
2278 }
2279 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2280 break;
2281 case 3:
2282 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2283 tcg_temp_free_i32(tmp);
2284 return 1;
2285 }
2286 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2287 break;
2288 }
2289 tcg_temp_free_i32(tmp);
2290 gen_op_iwmmxt_movq_wRn_M0(wrd);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2293 break;
2294 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2295 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2296 wrd = (insn >> 12) & 0xf;
2297 rd0 = (insn >> 16) & 0xf;
2298 rd1 = (insn >> 0) & 0xf;
2299 gen_op_iwmmxt_movq_M0_wRn(rd0);
2300 switch ((insn >> 22) & 3) {
2301 case 0:
2302 if (insn & (1 << 21))
2303 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2304 else
2305 gen_op_iwmmxt_minub_M0_wRn(rd1);
2306 break;
2307 case 1:
2308 if (insn & (1 << 21))
2309 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2310 else
2311 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2312 break;
2313 case 2:
2314 if (insn & (1 << 21))
2315 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2316 else
2317 gen_op_iwmmxt_minul_M0_wRn(rd1);
2318 break;
2319 case 3:
2320 return 1;
2321 }
2322 gen_op_iwmmxt_movq_wRn_M0(wrd);
2323 gen_op_iwmmxt_set_mup();
2324 break;
2325 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2326 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2327 wrd = (insn >> 12) & 0xf;
2328 rd0 = (insn >> 16) & 0xf;
2329 rd1 = (insn >> 0) & 0xf;
2330 gen_op_iwmmxt_movq_M0_wRn(rd0);
2331 switch ((insn >> 22) & 3) {
2332 case 0:
2333 if (insn & (1 << 21))
2334 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2335 else
2336 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2337 break;
2338 case 1:
2339 if (insn & (1 << 21))
2340 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2341 else
2342 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2343 break;
2344 case 2:
2345 if (insn & (1 << 21))
2346 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2347 else
2348 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2349 break;
2350 case 3:
2351 return 1;
2352 }
2353 gen_op_iwmmxt_movq_wRn_M0(wrd);
2354 gen_op_iwmmxt_set_mup();
2355 break;
2356 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2357 case 0x402: case 0x502: case 0x602: case 0x702:
2358 wrd = (insn >> 12) & 0xf;
2359 rd0 = (insn >> 16) & 0xf;
2360 rd1 = (insn >> 0) & 0xf;
2361 gen_op_iwmmxt_movq_M0_wRn(rd0);
2362 tmp = tcg_const_i32((insn >> 20) & 3);
2363 iwmmxt_load_reg(cpu_V1, rd1);
2364 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2365 tcg_temp_free_i32(tmp);
2366 gen_op_iwmmxt_movq_wRn_M0(wrd);
2367 gen_op_iwmmxt_set_mup();
2368 break;
2369 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2370 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2371 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2372 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2373 wrd = (insn >> 12) & 0xf;
2374 rd0 = (insn >> 16) & 0xf;
2375 rd1 = (insn >> 0) & 0xf;
2376 gen_op_iwmmxt_movq_M0_wRn(rd0);
2377 switch ((insn >> 20) & 0xf) {
2378 case 0x0:
2379 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2380 break;
2381 case 0x1:
2382 gen_op_iwmmxt_subub_M0_wRn(rd1);
2383 break;
2384 case 0x3:
2385 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2386 break;
2387 case 0x4:
2388 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2389 break;
2390 case 0x5:
2391 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2392 break;
2393 case 0x7:
2394 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2395 break;
2396 case 0x8:
2397 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2398 break;
2399 case 0x9:
2400 gen_op_iwmmxt_subul_M0_wRn(rd1);
2401 break;
2402 case 0xb:
2403 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2404 break;
2405 default:
2406 return 1;
2407 }
2408 gen_op_iwmmxt_movq_wRn_M0(wrd);
2409 gen_op_iwmmxt_set_mup();
2410 gen_op_iwmmxt_set_cup();
2411 break;
2412 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2413 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2414 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2415 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2416 wrd = (insn >> 12) & 0xf;
2417 rd0 = (insn >> 16) & 0xf;
2418 gen_op_iwmmxt_movq_M0_wRn(rd0);
2419 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2420 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2421 tcg_temp_free_i32(tmp);
2422 gen_op_iwmmxt_movq_wRn_M0(wrd);
2423 gen_op_iwmmxt_set_mup();
2424 gen_op_iwmmxt_set_cup();
2425 break;
2426 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2427 case 0x418: case 0x518: case 0x618: case 0x718:
2428 case 0x818: case 0x918: case 0xa18: case 0xb18:
2429 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2430 wrd = (insn >> 12) & 0xf;
2431 rd0 = (insn >> 16) & 0xf;
2432 rd1 = (insn >> 0) & 0xf;
2433 gen_op_iwmmxt_movq_M0_wRn(rd0);
2434 switch ((insn >> 20) & 0xf) {
2435 case 0x0:
2436 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2437 break;
2438 case 0x1:
2439 gen_op_iwmmxt_addub_M0_wRn(rd1);
2440 break;
2441 case 0x3:
2442 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2443 break;
2444 case 0x4:
2445 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2446 break;
2447 case 0x5:
2448 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2449 break;
2450 case 0x7:
2451 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2452 break;
2453 case 0x8:
2454 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2455 break;
2456 case 0x9:
2457 gen_op_iwmmxt_addul_M0_wRn(rd1);
2458 break;
2459 case 0xb:
2460 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2461 break;
2462 default:
2463 return 1;
2464 }
2465 gen_op_iwmmxt_movq_wRn_M0(wrd);
2466 gen_op_iwmmxt_set_mup();
2467 gen_op_iwmmxt_set_cup();
2468 break;
2469 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2470 case 0x408: case 0x508: case 0x608: case 0x708:
2471 case 0x808: case 0x908: case 0xa08: case 0xb08:
2472 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2473 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2474 return 1;
2475 wrd = (insn >> 12) & 0xf;
2476 rd0 = (insn >> 16) & 0xf;
2477 rd1 = (insn >> 0) & 0xf;
2478 gen_op_iwmmxt_movq_M0_wRn(rd0);
2479 switch ((insn >> 22) & 3) {
2480 case 1:
2481 if (insn & (1 << 21))
2482 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2483 else
2484 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2485 break;
2486 case 2:
2487 if (insn & (1 << 21))
2488 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2489 else
2490 gen_op_iwmmxt_packul_M0_wRn(rd1);
2491 break;
2492 case 3:
2493 if (insn & (1 << 21))
2494 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2495 else
2496 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2497 break;
2498 }
2499 gen_op_iwmmxt_movq_wRn_M0(wrd);
2500 gen_op_iwmmxt_set_mup();
2501 gen_op_iwmmxt_set_cup();
2502 break;
2503 case 0x201: case 0x203: case 0x205: case 0x207:
2504 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2505 case 0x211: case 0x213: case 0x215: case 0x217:
2506 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2507 wrd = (insn >> 5) & 0xf;
2508 rd0 = (insn >> 12) & 0xf;
2509 rd1 = (insn >> 0) & 0xf;
2510 if (rd0 == 0xf || rd1 == 0xf)
2511 return 1;
2512 gen_op_iwmmxt_movq_M0_wRn(wrd);
2513 tmp = load_reg(s, rd0);
2514 tmp2 = load_reg(s, rd1);
2515 switch ((insn >> 16) & 0xf) {
2516 case 0x0: /* TMIA */
2517 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2518 break;
2519 case 0x8: /* TMIAPH */
2520 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2521 break;
2522 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2523 if (insn & (1 << 16))
2524 tcg_gen_shri_i32(tmp, tmp, 16);
2525 if (insn & (1 << 17))
2526 tcg_gen_shri_i32(tmp2, tmp2, 16);
2527 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2528 break;
2529 default:
2530 tcg_temp_free_i32(tmp2);
2531 tcg_temp_free_i32(tmp);
2532 return 1;
2533 }
2534 tcg_temp_free_i32(tmp2);
2535 tcg_temp_free_i32(tmp);
2536 gen_op_iwmmxt_movq_wRn_M0(wrd);
2537 gen_op_iwmmxt_set_mup();
2538 break;
2539 default:
2540 return 1;
2541 }
2542
2543 return 0;
2544 }
2545
2546 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2547 (ie. an undefined instruction). */
2548 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2549 {
2550 int acc, rd0, rd1, rdhi, rdlo;
2551 TCGv_i32 tmp, tmp2;
2552
2553 if ((insn & 0x0ff00f10) == 0x0e200010) {
2554 /* Multiply with Internal Accumulate Format */
2555 rd0 = (insn >> 12) & 0xf;
2556 rd1 = insn & 0xf;
2557 acc = (insn >> 5) & 7;
2558
2559 if (acc != 0)
2560 return 1;
2561
2562 tmp = load_reg(s, rd0);
2563 tmp2 = load_reg(s, rd1);
2564 switch ((insn >> 16) & 0xf) {
2565 case 0x0: /* MIA */
2566 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2567 break;
2568 case 0x8: /* MIAPH */
2569 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2570 break;
2571 case 0xc: /* MIABB */
2572 case 0xd: /* MIABT */
2573 case 0xe: /* MIATB */
2574 case 0xf: /* MIATT */
2575 if (insn & (1 << 16))
2576 tcg_gen_shri_i32(tmp, tmp, 16);
2577 if (insn & (1 << 17))
2578 tcg_gen_shri_i32(tmp2, tmp2, 16);
2579 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2580 break;
2581 default:
2582 return 1;
2583 }
2584 tcg_temp_free_i32(tmp2);
2585 tcg_temp_free_i32(tmp);
2586
2587 gen_op_iwmmxt_movq_wRn_M0(acc);
2588 return 0;
2589 }
2590
2591 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2592 /* Internal Accumulator Access Format */
2593 rdhi = (insn >> 16) & 0xf;
2594 rdlo = (insn >> 12) & 0xf;
2595 acc = insn & 7;
2596
2597 if (acc != 0)
2598 return 1;
2599
2600 if (insn & ARM_CP_RW_BIT) { /* MRA */
2601 iwmmxt_load_reg(cpu_V0, acc);
2602 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2603 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2604 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2605 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2606 } else { /* MAR */
2607 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2608 iwmmxt_store_reg(cpu_V0, acc);
2609 }
2610 return 0;
2611 }
2612
2613 return 1;
2614 }
2615
2616 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2617 #define VFP_SREG(insn, bigbit, smallbit) \
2618 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2619 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2620 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2621 reg = (((insn) >> (bigbit)) & 0x0f) \
2622 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2623 } else { \
2624 if (insn & (1 << (smallbit))) \
2625 return 1; \
2626 reg = ((insn) >> (bigbit)) & 0x0f; \
2627 }} while (0)
2628
2629 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2630 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2631 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2632 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2633 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2634 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2635
2636 /* Move between integer and VFP cores. */
2637 static TCGv_i32 gen_vfp_mrs(void)
2638 {
2639 TCGv_i32 tmp = tcg_temp_new_i32();
2640 tcg_gen_mov_i32(tmp, cpu_F0s);
2641 return tmp;
2642 }
2643
2644 static void gen_vfp_msr(TCGv_i32 tmp)
2645 {
2646 tcg_gen_mov_i32(cpu_F0s, tmp);
2647 tcg_temp_free_i32(tmp);
2648 }
2649
2650 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2651 {
2652 TCGv_i32 tmp = tcg_temp_new_i32();
2653 if (shift)
2654 tcg_gen_shri_i32(var, var, shift);
2655 tcg_gen_ext8u_i32(var, var);
2656 tcg_gen_shli_i32(tmp, var, 8);
2657 tcg_gen_or_i32(var, var, tmp);
2658 tcg_gen_shli_i32(tmp, var, 16);
2659 tcg_gen_or_i32(var, var, tmp);
2660 tcg_temp_free_i32(tmp);
2661 }
2662
2663 static void gen_neon_dup_low16(TCGv_i32 var)
2664 {
2665 TCGv_i32 tmp = tcg_temp_new_i32();
2666 tcg_gen_ext16u_i32(var, var);
2667 tcg_gen_shli_i32(tmp, var, 16);
2668 tcg_gen_or_i32(var, var, tmp);
2669 tcg_temp_free_i32(tmp);
2670 }
2671
2672 static void gen_neon_dup_high16(TCGv_i32 var)
2673 {
2674 TCGv_i32 tmp = tcg_temp_new_i32();
2675 tcg_gen_andi_i32(var, var, 0xffff0000);
2676 tcg_gen_shri_i32(tmp, var, 16);
2677 tcg_gen_or_i32(var, var, tmp);
2678 tcg_temp_free_i32(tmp);
2679 }
2680
2681 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2682 {
2683 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2684 TCGv_i32 tmp = tcg_temp_new_i32();
2685 switch (size) {
2686 case 0:
2687 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2688 gen_neon_dup_u8(tmp, 0);
2689 break;
2690 case 1:
2691 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2692 gen_neon_dup_low16(tmp);
2693 break;
2694 case 2:
2695 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2696 break;
2697 default: /* Avoid compiler warnings. */
2698 abort();
2699 }
2700 return tmp;
2701 }
2702
2703 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2704 uint32_t dp)
2705 {
2706 uint32_t cc = extract32(insn, 20, 2);
2707
2708 if (dp) {
2709 TCGv_i64 frn, frm, dest;
2710 TCGv_i64 tmp, zero, zf, nf, vf;
2711
2712 zero = tcg_const_i64(0);
2713
2714 frn = tcg_temp_new_i64();
2715 frm = tcg_temp_new_i64();
2716 dest = tcg_temp_new_i64();
2717
2718 zf = tcg_temp_new_i64();
2719 nf = tcg_temp_new_i64();
2720 vf = tcg_temp_new_i64();
2721
2722 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2723 tcg_gen_ext_i32_i64(nf, cpu_NF);
2724 tcg_gen_ext_i32_i64(vf, cpu_VF);
2725
2726 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2727 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2728 switch (cc) {
2729 case 0: /* eq: Z */
2730 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2731 frn, frm);
2732 break;
2733 case 1: /* vs: V */
2734 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2735 frn, frm);
2736 break;
2737 case 2: /* ge: N == V -> N ^ V == 0 */
2738 tmp = tcg_temp_new_i64();
2739 tcg_gen_xor_i64(tmp, vf, nf);
2740 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2741 frn, frm);
2742 tcg_temp_free_i64(tmp);
2743 break;
2744 case 3: /* gt: !Z && N == V */
2745 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2746 frn, frm);
2747 tmp = tcg_temp_new_i64();
2748 tcg_gen_xor_i64(tmp, vf, nf);
2749 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2750 dest, frm);
2751 tcg_temp_free_i64(tmp);
2752 break;
2753 }
2754 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2755 tcg_temp_free_i64(frn);
2756 tcg_temp_free_i64(frm);
2757 tcg_temp_free_i64(dest);
2758
2759 tcg_temp_free_i64(zf);
2760 tcg_temp_free_i64(nf);
2761 tcg_temp_free_i64(vf);
2762
2763 tcg_temp_free_i64(zero);
2764 } else {
2765 TCGv_i32 frn, frm, dest;
2766 TCGv_i32 tmp, zero;
2767
2768 zero = tcg_const_i32(0);
2769
2770 frn = tcg_temp_new_i32();
2771 frm = tcg_temp_new_i32();
2772 dest = tcg_temp_new_i32();
2773 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2774 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2775 switch (cc) {
2776 case 0: /* eq: Z */
2777 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2778 frn, frm);
2779 break;
2780 case 1: /* vs: V */
2781 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2782 frn, frm);
2783 break;
2784 case 2: /* ge: N == V -> N ^ V == 0 */
2785 tmp = tcg_temp_new_i32();
2786 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2787 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2788 frn, frm);
2789 tcg_temp_free_i32(tmp);
2790 break;
2791 case 3: /* gt: !Z && N == V */
2792 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2793 frn, frm);
2794 tmp = tcg_temp_new_i32();
2795 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2796 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2797 dest, frm);
2798 tcg_temp_free_i32(tmp);
2799 break;
2800 }
2801 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2802 tcg_temp_free_i32(frn);
2803 tcg_temp_free_i32(frm);
2804 tcg_temp_free_i32(dest);
2805
2806 tcg_temp_free_i32(zero);
2807 }
2808
2809 return 0;
2810 }
2811
2812 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2813 uint32_t rm, uint32_t dp)
2814 {
2815 uint32_t vmin = extract32(insn, 6, 1);
2816 TCGv_ptr fpst = get_fpstatus_ptr(0);
2817
2818 if (dp) {
2819 TCGv_i64 frn, frm, dest;
2820
2821 frn = tcg_temp_new_i64();
2822 frm = tcg_temp_new_i64();
2823 dest = tcg_temp_new_i64();
2824
2825 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2826 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2827 if (vmin) {
2828 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
2829 } else {
2830 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
2831 }
2832 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2833 tcg_temp_free_i64(frn);
2834 tcg_temp_free_i64(frm);
2835 tcg_temp_free_i64(dest);
2836 } else {
2837 TCGv_i32 frn, frm, dest;
2838
2839 frn = tcg_temp_new_i32();
2840 frm = tcg_temp_new_i32();
2841 dest = tcg_temp_new_i32();
2842
2843 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2844 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2845 if (vmin) {
2846 gen_helper_vfp_minnums(dest, frn, frm, fpst);
2847 } else {
2848 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
2849 }
2850 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2851 tcg_temp_free_i32(frn);
2852 tcg_temp_free_i32(frm);
2853 tcg_temp_free_i32(dest);
2854 }
2855
2856 tcg_temp_free_ptr(fpst);
2857 return 0;
2858 }
2859
2860 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2861 int rounding)
2862 {
2863 TCGv_ptr fpst = get_fpstatus_ptr(0);
2864 TCGv_i32 tcg_rmode;
2865
2866 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2867 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2868
2869 if (dp) {
2870 TCGv_i64 tcg_op;
2871 TCGv_i64 tcg_res;
2872 tcg_op = tcg_temp_new_i64();
2873 tcg_res = tcg_temp_new_i64();
2874 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2875 gen_helper_rintd(tcg_res, tcg_op, fpst);
2876 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2877 tcg_temp_free_i64(tcg_op);
2878 tcg_temp_free_i64(tcg_res);
2879 } else {
2880 TCGv_i32 tcg_op;
2881 TCGv_i32 tcg_res;
2882 tcg_op = tcg_temp_new_i32();
2883 tcg_res = tcg_temp_new_i32();
2884 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2885 gen_helper_rints(tcg_res, tcg_op, fpst);
2886 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2887 tcg_temp_free_i32(tcg_op);
2888 tcg_temp_free_i32(tcg_res);
2889 }
2890
2891 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2892 tcg_temp_free_i32(tcg_rmode);
2893
2894 tcg_temp_free_ptr(fpst);
2895 return 0;
2896 }
2897
2898 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2899 int rounding)
2900 {
2901 bool is_signed = extract32(insn, 7, 1);
2902 TCGv_ptr fpst = get_fpstatus_ptr(0);
2903 TCGv_i32 tcg_rmode, tcg_shift;
2904
2905 tcg_shift = tcg_const_i32(0);
2906
2907 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2908 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2909
2910 if (dp) {
2911 TCGv_i64 tcg_double, tcg_res;
2912 TCGv_i32 tcg_tmp;
2913 /* Rd is encoded as a single precision register even when the source
2914 * is double precision.
2915 */
2916 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2917 tcg_double = tcg_temp_new_i64();
2918 tcg_res = tcg_temp_new_i64();
2919 tcg_tmp = tcg_temp_new_i32();
2920 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2921 if (is_signed) {
2922 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2923 } else {
2924 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2925 }
2926 tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
2927 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2928 tcg_temp_free_i32(tcg_tmp);
2929 tcg_temp_free_i64(tcg_res);
2930 tcg_temp_free_i64(tcg_double);
2931 } else {
2932 TCGv_i32 tcg_single, tcg_res;
2933 tcg_single = tcg_temp_new_i32();
2934 tcg_res = tcg_temp_new_i32();
2935 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2936 if (is_signed) {
2937 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2938 } else {
2939 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
2940 }
2941 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
2942 tcg_temp_free_i32(tcg_res);
2943 tcg_temp_free_i32(tcg_single);
2944 }
2945
2946 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2947 tcg_temp_free_i32(tcg_rmode);
2948
2949 tcg_temp_free_i32(tcg_shift);
2950
2951 tcg_temp_free_ptr(fpst);
2952
2953 return 0;
2954 }
2955
2956 /* Table for converting the most common AArch32 encoding of
2957 * rounding mode to arm_fprounding order (which matches the
2958 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2959 */
2960 static const uint8_t fp_decode_rm[] = {
2961 FPROUNDING_TIEAWAY,
2962 FPROUNDING_TIEEVEN,
2963 FPROUNDING_POSINF,
2964 FPROUNDING_NEGINF,
2965 };
2966
2967 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
2968 {
2969 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
2970
2971 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
2972 return 1;
2973 }
2974
2975 if (dp) {
2976 VFP_DREG_D(rd, insn);
2977 VFP_DREG_N(rn, insn);
2978 VFP_DREG_M(rm, insn);
2979 } else {
2980 rd = VFP_SREG_D(insn);
2981 rn = VFP_SREG_N(insn);
2982 rm = VFP_SREG_M(insn);
2983 }
2984
2985 if ((insn & 0x0f800e50) == 0x0e000a00) {
2986 return handle_vsel(insn, rd, rn, rm, dp);
2987 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
2988 return handle_vminmaxnm(insn, rd, rn, rm, dp);
2989 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
2990 /* VRINTA, VRINTN, VRINTP, VRINTM */
2991 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2992 return handle_vrint(insn, rd, rm, dp, rounding);
2993 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
2994 /* VCVTA, VCVTN, VCVTP, VCVTM */
2995 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2996 return handle_vcvt(insn, rd, rm, dp, rounding);
2997 }
2998 return 1;
2999 }
3000
3001 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3002 (ie. an undefined instruction). */
3003 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3004 {
3005 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3006 int dp, veclen;
3007 TCGv_i32 addr;
3008 TCGv_i32 tmp;
3009 TCGv_i32 tmp2;
3010
3011 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3012 return 1;
3013 }
3014
3015 /* FIXME: this access check should not take precedence over UNDEF
3016 * for invalid encodings; we will generate incorrect syndrome information
3017 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3018 */
3019 if (!s->cpacr_fpen) {
3020 gen_exception_insn(s, 4, EXCP_UDEF,
3021 syn_fp_access_trap(1, 0xe, s->thumb));
3022 return 0;
3023 }
3024
3025 if (!s->vfp_enabled) {
3026 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3027 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3028 return 1;
3029 rn = (insn >> 16) & 0xf;
3030 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3031 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3032 return 1;
3033 }
3034 }
3035
3036 if (extract32(insn, 28, 4) == 0xf) {
3037 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3038 * only used in v8 and above.
3039 */
3040 return disas_vfp_v8_insn(s, insn);
3041 }
3042
3043 dp = ((insn & 0xf00) == 0xb00);
3044 switch ((insn >> 24) & 0xf) {
3045 case 0xe:
3046 if (insn & (1 << 4)) {
3047 /* single register transfer */
3048 rd = (insn >> 12) & 0xf;
3049 if (dp) {
3050 int size;
3051 int pass;
3052
3053 VFP_DREG_N(rn, insn);
3054 if (insn & 0xf)
3055 return 1;
3056 if (insn & 0x00c00060
3057 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3058 return 1;
3059 }
3060
3061 pass = (insn >> 21) & 1;
3062 if (insn & (1 << 22)) {
3063 size = 0;
3064 offset = ((insn >> 5) & 3) * 8;
3065 } else if (insn & (1 << 5)) {
3066 size = 1;
3067 offset = (insn & (1 << 6)) ? 16 : 0;
3068 } else {
3069 size = 2;
3070 offset = 0;
3071 }
3072 if (insn & ARM_CP_RW_BIT) {
3073 /* vfp->arm */
3074 tmp = neon_load_reg(rn, pass);
3075 switch (size) {
3076 case 0:
3077 if (offset)
3078 tcg_gen_shri_i32(tmp, tmp, offset);
3079 if (insn & (1 << 23))
3080 gen_uxtb(tmp);
3081 else
3082 gen_sxtb(tmp);
3083 break;
3084 case 1:
3085 if (insn & (1 << 23)) {
3086 if (offset) {
3087 tcg_gen_shri_i32(tmp, tmp, 16);
3088 } else {
3089 gen_uxth(tmp);
3090 }
3091 } else {
3092 if (offset) {
3093 tcg_gen_sari_i32(tmp, tmp, 16);
3094 } else {
3095 gen_sxth(tmp);
3096 }
3097 }
3098 break;
3099 case 2:
3100 break;
3101 }
3102 store_reg(s, rd, tmp);
3103 } else {
3104 /* arm->vfp */
3105 tmp = load_reg(s, rd);
3106 if (insn & (1 << 23)) {
3107 /* VDUP */
3108 if (size == 0) {
3109 gen_neon_dup_u8(tmp, 0);
3110 } else if (size == 1) {
3111 gen_neon_dup_low16(tmp);
3112 }
3113 for (n = 0; n <= pass * 2; n++) {
3114 tmp2 = tcg_temp_new_i32();
3115 tcg_gen_mov_i32(tmp2, tmp);
3116 neon_store_reg(rn, n, tmp2);
3117 }
3118 neon_store_reg(rn, n, tmp);
3119 } else {
3120 /* VMOV */
3121 switch (size) {
3122 case 0:
3123 tmp2 = neon_load_reg(rn, pass);
3124 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3125 tcg_temp_free_i32(tmp2);
3126 break;
3127 case 1:
3128 tmp2 = neon_load_reg(rn, pass);
3129 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3130 tcg_temp_free_i32(tmp2);
3131 break;
3132 case 2:
3133 break;
3134 }
3135 neon_store_reg(rn, pass, tmp);
3136 }
3137 }
3138 } else { /* !dp */
3139 if ((insn & 0x6f) != 0x00)
3140 return 1;
3141 rn = VFP_SREG_N(insn);
3142 if (insn & ARM_CP_RW_BIT) {
3143 /* vfp->arm */
3144 if (insn & (1 << 21)) {
3145 /* system register */
3146 rn >>= 1;
3147
3148 switch (rn) {
3149 case ARM_VFP_FPSID:
3150 /* VFP2 allows access to FSID from userspace.
3151 VFP3 restricts all id registers to privileged
3152