target-sparc: Implement BMASK/BSHUFFLE.
[qemu.git] / target-sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas.h"
29 #include "helper.h"
30 #include "tcg-op.h"
31
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #define DEBUG_DISAS
36
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
40
41 /* global register indexes */
42 static TCGv_ptr cpu_env, cpu_regwptr;
43 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
44 static TCGv_i32 cpu_cc_op;
45 static TCGv_i32 cpu_psr;
46 static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
47 static TCGv cpu_y;
48 #ifndef CONFIG_USER_ONLY
49 static TCGv cpu_tbr;
50 #endif
51 static TCGv cpu_cond, cpu_dst, cpu_addr, cpu_val;
52 #ifdef TARGET_SPARC64
53 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
54 static TCGv cpu_gsr;
55 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
56 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
57 static TCGv_i32 cpu_softint;
58 #else
59 static TCGv cpu_wim;
60 #endif
61 /* local register indexes (only used inside old micro ops) */
62 static TCGv cpu_tmp0;
63 static TCGv_i32 cpu_tmp32;
64 static TCGv_i64 cpu_tmp64;
65 /* Floating point registers */
66 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67
68 static target_ulong gen_opc_npc[OPC_BUF_SIZE];
69 static target_ulong gen_opc_jump_pc[2];
70
71 #include "gen-icount.h"
72
73 typedef struct DisasContext {
74 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
75 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
76 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
77 int is_br;
78 int mem_idx;
79 int fpu_enabled;
80 int address_mask_32bit;
81 int singlestep;
82 uint32_t cc_op; /* current CC operation */
83 struct TranslationBlock *tb;
84 sparc_def_t *def;
85 TCGv_i32 t32[3];
86 int n_t32;
87 } DisasContext;
88
89 // This function uses non-native bit order
90 #define GET_FIELD(X, FROM, TO) \
91 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
92
93 // This function uses the order in the manuals, i.e. bit 0 is 2^0
94 #define GET_FIELD_SP(X, FROM, TO) \
95 GET_FIELD(X, 31 - (TO), 31 - (FROM))
96
97 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
98 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
99
100 #ifdef TARGET_SPARC64
101 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
102 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
103 #else
104 #define DFPREG(r) (r & 0x1e)
105 #define QFPREG(r) (r & 0x1c)
106 #endif
107
108 #define UA2005_HTRAP_MASK 0xff
109 #define V8_TRAP_MASK 0x7f
110
111 static int sign_extend(int x, int len)
112 {
113 len = 32 - len;
114 return (x << len) >> len;
115 }
116
117 #define IS_IMM (insn & (1<<13))
118
119 static inline void gen_update_fprs_dirty(int rd)
120 {
121 #if defined(TARGET_SPARC64)
122 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
123 #endif
124 }
125
126 /* floating point registers moves */
127 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
128 {
129 #if TCG_TARGET_REG_BITS == 32
130 if (src & 1) {
131 return TCGV_LOW(cpu_fpr[src / 2]);
132 } else {
133 return TCGV_HIGH(cpu_fpr[src / 2]);
134 }
135 #else
136 if (src & 1) {
137 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
138 } else {
139 TCGv_i32 ret = tcg_temp_local_new_i32();
140 TCGv_i64 t = tcg_temp_new_i64();
141
142 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
143 tcg_gen_trunc_i64_i32(ret, t);
144 tcg_temp_free_i64(t);
145
146 dc->t32[dc->n_t32++] = ret;
147 assert(dc->n_t32 <= ARRAY_SIZE(dc->t32));
148
149 return ret;
150 }
151 #endif
152 }
153
154 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
155 {
156 #if TCG_TARGET_REG_BITS == 32
157 if (dst & 1) {
158 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
159 } else {
160 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
161 }
162 #else
163 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
164 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
165 (dst & 1 ? 0 : 32), 32);
166 #endif
167 gen_update_fprs_dirty(dst);
168 }
169
170 static TCGv_i32 gen_dest_fpr_F(void)
171 {
172 return cpu_tmp32;
173 }
174
175 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
176 {
177 src = DFPREG(src);
178 return cpu_fpr[src / 2];
179 }
180
181 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
182 {
183 dst = DFPREG(dst);
184 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
185 gen_update_fprs_dirty(dst);
186 }
187
188 static TCGv_i64 gen_dest_fpr_D(void)
189 {
190 return cpu_tmp64;
191 }
192
193 static void gen_op_load_fpr_QT0(unsigned int src)
194 {
195 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
196 offsetof(CPU_QuadU, ll.upper));
197 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
198 offsetof(CPU_QuadU, ll.lower));
199 }
200
201 static void gen_op_load_fpr_QT1(unsigned int src)
202 {
203 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
204 offsetof(CPU_QuadU, ll.upper));
205 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
206 offsetof(CPU_QuadU, ll.lower));
207 }
208
209 static void gen_op_store_QT0_fpr(unsigned int dst)
210 {
211 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
212 offsetof(CPU_QuadU, ll.upper));
213 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
214 offsetof(CPU_QuadU, ll.lower));
215 }
216
217 #ifdef TARGET_SPARC64
218 static void gen_move_Q(unsigned int rd, unsigned int rs)
219 {
220 rd = QFPREG(rd);
221 rs = QFPREG(rs);
222
223 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
224 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
225 gen_update_fprs_dirty(rd);
226 }
227 #endif
228
229 /* moves */
230 #ifdef CONFIG_USER_ONLY
231 #define supervisor(dc) 0
232 #ifdef TARGET_SPARC64
233 #define hypervisor(dc) 0
234 #endif
235 #else
236 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
237 #ifdef TARGET_SPARC64
238 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
239 #else
240 #endif
241 #endif
242
243 #ifdef TARGET_SPARC64
244 #ifndef TARGET_ABI32
245 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
246 #else
247 #define AM_CHECK(dc) (1)
248 #endif
249 #endif
250
251 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
252 {
253 #ifdef TARGET_SPARC64
254 if (AM_CHECK(dc))
255 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
256 #endif
257 }
258
259 static inline void gen_movl_reg_TN(int reg, TCGv tn)
260 {
261 if (reg == 0)
262 tcg_gen_movi_tl(tn, 0);
263 else if (reg < 8)
264 tcg_gen_mov_tl(tn, cpu_gregs[reg]);
265 else {
266 tcg_gen_ld_tl(tn, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
267 }
268 }
269
270 static inline void gen_movl_TN_reg(int reg, TCGv tn)
271 {
272 if (reg == 0)
273 return;
274 else if (reg < 8)
275 tcg_gen_mov_tl(cpu_gregs[reg], tn);
276 else {
277 tcg_gen_st_tl(tn, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
278 }
279 }
280
281 static inline void gen_goto_tb(DisasContext *s, int tb_num,
282 target_ulong pc, target_ulong npc)
283 {
284 TranslationBlock *tb;
285
286 tb = s->tb;
287 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
288 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
289 !s->singlestep) {
290 /* jump to same page: we can use a direct jump */
291 tcg_gen_goto_tb(tb_num);
292 tcg_gen_movi_tl(cpu_pc, pc);
293 tcg_gen_movi_tl(cpu_npc, npc);
294 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
295 } else {
296 /* jump to another page: currently not optimized */
297 tcg_gen_movi_tl(cpu_pc, pc);
298 tcg_gen_movi_tl(cpu_npc, npc);
299 tcg_gen_exit_tb(0);
300 }
301 }
302
303 // XXX suboptimal
304 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
305 {
306 tcg_gen_extu_i32_tl(reg, src);
307 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
308 tcg_gen_andi_tl(reg, reg, 0x1);
309 }
310
311 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
312 {
313 tcg_gen_extu_i32_tl(reg, src);
314 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
315 tcg_gen_andi_tl(reg, reg, 0x1);
316 }
317
318 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
319 {
320 tcg_gen_extu_i32_tl(reg, src);
321 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
322 tcg_gen_andi_tl(reg, reg, 0x1);
323 }
324
325 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
326 {
327 tcg_gen_extu_i32_tl(reg, src);
328 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
329 tcg_gen_andi_tl(reg, reg, 0x1);
330 }
331
332 static inline void gen_add_tv(TCGv dst, TCGv src1, TCGv src2)
333 {
334 TCGv r_temp;
335 TCGv_i32 r_const;
336 int l1;
337
338 l1 = gen_new_label();
339
340 r_temp = tcg_temp_new();
341 tcg_gen_xor_tl(r_temp, src1, src2);
342 tcg_gen_not_tl(r_temp, r_temp);
343 tcg_gen_xor_tl(cpu_tmp0, src1, dst);
344 tcg_gen_and_tl(r_temp, r_temp, cpu_tmp0);
345 tcg_gen_andi_tl(r_temp, r_temp, (1ULL << 31));
346 tcg_gen_brcondi_tl(TCG_COND_EQ, r_temp, 0, l1);
347 r_const = tcg_const_i32(TT_TOVF);
348 gen_helper_raise_exception(cpu_env, r_const);
349 tcg_temp_free_i32(r_const);
350 gen_set_label(l1);
351 tcg_temp_free(r_temp);
352 }
353
354 static inline void gen_tag_tv(TCGv src1, TCGv src2)
355 {
356 int l1;
357 TCGv_i32 r_const;
358
359 l1 = gen_new_label();
360 tcg_gen_or_tl(cpu_tmp0, src1, src2);
361 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0x3);
362 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1);
363 r_const = tcg_const_i32(TT_TOVF);
364 gen_helper_raise_exception(cpu_env, r_const);
365 tcg_temp_free_i32(r_const);
366 gen_set_label(l1);
367 }
368
369 static inline void gen_op_addi_cc(TCGv dst, TCGv src1, target_long src2)
370 {
371 tcg_gen_mov_tl(cpu_cc_src, src1);
372 tcg_gen_movi_tl(cpu_cc_src2, src2);
373 tcg_gen_addi_tl(cpu_cc_dst, cpu_cc_src, src2);
374 tcg_gen_mov_tl(dst, cpu_cc_dst);
375 }
376
377 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
378 {
379 tcg_gen_mov_tl(cpu_cc_src, src1);
380 tcg_gen_mov_tl(cpu_cc_src2, src2);
381 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
382 tcg_gen_mov_tl(dst, cpu_cc_dst);
383 }
384
385 static TCGv_i32 gen_add32_carry32(void)
386 {
387 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
388
389 /* Carry is computed from a previous add: (dst < src) */
390 #if TARGET_LONG_BITS == 64
391 cc_src1_32 = tcg_temp_new_i32();
392 cc_src2_32 = tcg_temp_new_i32();
393 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_dst);
394 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src);
395 #else
396 cc_src1_32 = cpu_cc_dst;
397 cc_src2_32 = cpu_cc_src;
398 #endif
399
400 carry_32 = tcg_temp_new_i32();
401 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
402
403 #if TARGET_LONG_BITS == 64
404 tcg_temp_free_i32(cc_src1_32);
405 tcg_temp_free_i32(cc_src2_32);
406 #endif
407
408 return carry_32;
409 }
410
411 static TCGv_i32 gen_sub32_carry32(void)
412 {
413 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
414
415 /* Carry is computed from a previous borrow: (src1 < src2) */
416 #if TARGET_LONG_BITS == 64
417 cc_src1_32 = tcg_temp_new_i32();
418 cc_src2_32 = tcg_temp_new_i32();
419 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_src);
420 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src2);
421 #else
422 cc_src1_32 = cpu_cc_src;
423 cc_src2_32 = cpu_cc_src2;
424 #endif
425
426 carry_32 = tcg_temp_new_i32();
427 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
428
429 #if TARGET_LONG_BITS == 64
430 tcg_temp_free_i32(cc_src1_32);
431 tcg_temp_free_i32(cc_src2_32);
432 #endif
433
434 return carry_32;
435 }
436
437 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
438 TCGv src2, int update_cc)
439 {
440 TCGv_i32 carry_32;
441 TCGv carry;
442
443 switch (dc->cc_op) {
444 case CC_OP_DIV:
445 case CC_OP_LOGIC:
446 /* Carry is known to be zero. Fall back to plain ADD. */
447 if (update_cc) {
448 gen_op_add_cc(dst, src1, src2);
449 } else {
450 tcg_gen_add_tl(dst, src1, src2);
451 }
452 return;
453
454 case CC_OP_ADD:
455 case CC_OP_TADD:
456 case CC_OP_TADDTV:
457 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
458 {
459 /* For 32-bit hosts, we can re-use the host's hardware carry
460 generation by using an ADD2 opcode. We discard the low
461 part of the output. Ideally we'd combine this operation
462 with the add that generated the carry in the first place. */
463 TCGv dst_low = tcg_temp_new();
464 tcg_gen_op6_i32(INDEX_op_add2_i32, dst_low, dst,
465 cpu_cc_src, src1, cpu_cc_src2, src2);
466 tcg_temp_free(dst_low);
467 goto add_done;
468 }
469 #endif
470 carry_32 = gen_add32_carry32();
471 break;
472
473 case CC_OP_SUB:
474 case CC_OP_TSUB:
475 case CC_OP_TSUBTV:
476 carry_32 = gen_sub32_carry32();
477 break;
478
479 default:
480 /* We need external help to produce the carry. */
481 carry_32 = tcg_temp_new_i32();
482 gen_helper_compute_C_icc(carry_32, cpu_env);
483 break;
484 }
485
486 #if TARGET_LONG_BITS == 64
487 carry = tcg_temp_new();
488 tcg_gen_extu_i32_i64(carry, carry_32);
489 #else
490 carry = carry_32;
491 #endif
492
493 tcg_gen_add_tl(dst, src1, src2);
494 tcg_gen_add_tl(dst, dst, carry);
495
496 tcg_temp_free_i32(carry_32);
497 #if TARGET_LONG_BITS == 64
498 tcg_temp_free(carry);
499 #endif
500
501 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
502 add_done:
503 #endif
504 if (update_cc) {
505 tcg_gen_mov_tl(cpu_cc_src, src1);
506 tcg_gen_mov_tl(cpu_cc_src2, src2);
507 tcg_gen_mov_tl(cpu_cc_dst, dst);
508 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
509 dc->cc_op = CC_OP_ADDX;
510 }
511 }
512
513 static inline void gen_op_tadd_cc(TCGv dst, TCGv src1, TCGv src2)
514 {
515 tcg_gen_mov_tl(cpu_cc_src, src1);
516 tcg_gen_mov_tl(cpu_cc_src2, src2);
517 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
518 tcg_gen_mov_tl(dst, cpu_cc_dst);
519 }
520
521 static inline void gen_op_tadd_ccTV(TCGv dst, TCGv src1, TCGv src2)
522 {
523 tcg_gen_mov_tl(cpu_cc_src, src1);
524 tcg_gen_mov_tl(cpu_cc_src2, src2);
525 gen_tag_tv(cpu_cc_src, cpu_cc_src2);
526 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
527 gen_add_tv(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
528 tcg_gen_mov_tl(dst, cpu_cc_dst);
529 }
530
531 static inline void gen_sub_tv(TCGv dst, TCGv src1, TCGv src2)
532 {
533 TCGv r_temp;
534 TCGv_i32 r_const;
535 int l1;
536
537 l1 = gen_new_label();
538
539 r_temp = tcg_temp_new();
540 tcg_gen_xor_tl(r_temp, src1, src2);
541 tcg_gen_xor_tl(cpu_tmp0, src1, dst);
542 tcg_gen_and_tl(r_temp, r_temp, cpu_tmp0);
543 tcg_gen_andi_tl(r_temp, r_temp, (1ULL << 31));
544 tcg_gen_brcondi_tl(TCG_COND_EQ, r_temp, 0, l1);
545 r_const = tcg_const_i32(TT_TOVF);
546 gen_helper_raise_exception(cpu_env, r_const);
547 tcg_temp_free_i32(r_const);
548 gen_set_label(l1);
549 tcg_temp_free(r_temp);
550 }
551
552 static inline void gen_op_subi_cc(TCGv dst, TCGv src1, target_long src2, DisasContext *dc)
553 {
554 tcg_gen_mov_tl(cpu_cc_src, src1);
555 tcg_gen_movi_tl(cpu_cc_src2, src2);
556 if (src2 == 0) {
557 tcg_gen_mov_tl(cpu_cc_dst, src1);
558 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
559 dc->cc_op = CC_OP_LOGIC;
560 } else {
561 tcg_gen_subi_tl(cpu_cc_dst, cpu_cc_src, src2);
562 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
563 dc->cc_op = CC_OP_SUB;
564 }
565 tcg_gen_mov_tl(dst, cpu_cc_dst);
566 }
567
568 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
569 {
570 tcg_gen_mov_tl(cpu_cc_src, src1);
571 tcg_gen_mov_tl(cpu_cc_src2, src2);
572 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
573 tcg_gen_mov_tl(dst, cpu_cc_dst);
574 }
575
576 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
577 TCGv src2, int update_cc)
578 {
579 TCGv_i32 carry_32;
580 TCGv carry;
581
582 switch (dc->cc_op) {
583 case CC_OP_DIV:
584 case CC_OP_LOGIC:
585 /* Carry is known to be zero. Fall back to plain SUB. */
586 if (update_cc) {
587 gen_op_sub_cc(dst, src1, src2);
588 } else {
589 tcg_gen_sub_tl(dst, src1, src2);
590 }
591 return;
592
593 case CC_OP_ADD:
594 case CC_OP_TADD:
595 case CC_OP_TADDTV:
596 carry_32 = gen_add32_carry32();
597 break;
598
599 case CC_OP_SUB:
600 case CC_OP_TSUB:
601 case CC_OP_TSUBTV:
602 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
603 {
604 /* For 32-bit hosts, we can re-use the host's hardware carry
605 generation by using a SUB2 opcode. We discard the low
606 part of the output. Ideally we'd combine this operation
607 with the add that generated the carry in the first place. */
608 TCGv dst_low = tcg_temp_new();
609 tcg_gen_op6_i32(INDEX_op_sub2_i32, dst_low, dst,
610 cpu_cc_src, src1, cpu_cc_src2, src2);
611 tcg_temp_free(dst_low);
612 goto sub_done;
613 }
614 #endif
615 carry_32 = gen_sub32_carry32();
616 break;
617
618 default:
619 /* We need external help to produce the carry. */
620 carry_32 = tcg_temp_new_i32();
621 gen_helper_compute_C_icc(carry_32, cpu_env);
622 break;
623 }
624
625 #if TARGET_LONG_BITS == 64
626 carry = tcg_temp_new();
627 tcg_gen_extu_i32_i64(carry, carry_32);
628 #else
629 carry = carry_32;
630 #endif
631
632 tcg_gen_sub_tl(dst, src1, src2);
633 tcg_gen_sub_tl(dst, dst, carry);
634
635 tcg_temp_free_i32(carry_32);
636 #if TARGET_LONG_BITS == 64
637 tcg_temp_free(carry);
638 #endif
639
640 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
641 sub_done:
642 #endif
643 if (update_cc) {
644 tcg_gen_mov_tl(cpu_cc_src, src1);
645 tcg_gen_mov_tl(cpu_cc_src2, src2);
646 tcg_gen_mov_tl(cpu_cc_dst, dst);
647 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
648 dc->cc_op = CC_OP_SUBX;
649 }
650 }
651
652 static inline void gen_op_tsub_cc(TCGv dst, TCGv src1, TCGv src2)
653 {
654 tcg_gen_mov_tl(cpu_cc_src, src1);
655 tcg_gen_mov_tl(cpu_cc_src2, src2);
656 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
657 tcg_gen_mov_tl(dst, cpu_cc_dst);
658 }
659
660 static inline void gen_op_tsub_ccTV(TCGv dst, TCGv src1, TCGv src2)
661 {
662 tcg_gen_mov_tl(cpu_cc_src, src1);
663 tcg_gen_mov_tl(cpu_cc_src2, src2);
664 gen_tag_tv(cpu_cc_src, cpu_cc_src2);
665 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
666 gen_sub_tv(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
667 tcg_gen_mov_tl(dst, cpu_cc_dst);
668 }
669
670 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
671 {
672 TCGv r_temp;
673 int l1;
674
675 l1 = gen_new_label();
676 r_temp = tcg_temp_new();
677
678 /* old op:
679 if (!(env->y & 1))
680 T1 = 0;
681 */
682 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
683 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
684 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
685 tcg_gen_brcondi_tl(TCG_COND_NE, r_temp, 0, l1);
686 tcg_gen_movi_tl(cpu_cc_src2, 0);
687 gen_set_label(l1);
688
689 // b2 = T0 & 1;
690 // env->y = (b2 << 31) | (env->y >> 1);
691 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
692 tcg_gen_shli_tl(r_temp, r_temp, 31);
693 tcg_gen_shri_tl(cpu_tmp0, cpu_y, 1);
694 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0x7fffffff);
695 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, r_temp);
696 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
697
698 // b1 = N ^ V;
699 gen_mov_reg_N(cpu_tmp0, cpu_psr);
700 gen_mov_reg_V(r_temp, cpu_psr);
701 tcg_gen_xor_tl(cpu_tmp0, cpu_tmp0, r_temp);
702 tcg_temp_free(r_temp);
703
704 // T0 = (b1 << 31) | (T0 >> 1);
705 // src1 = T0;
706 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, 31);
707 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
708 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
709
710 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
711
712 tcg_gen_mov_tl(dst, cpu_cc_dst);
713 }
714
715 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
716 {
717 TCGv_i32 r_src1, r_src2;
718 TCGv_i64 r_temp, r_temp2;
719
720 r_src1 = tcg_temp_new_i32();
721 r_src2 = tcg_temp_new_i32();
722
723 tcg_gen_trunc_tl_i32(r_src1, src1);
724 tcg_gen_trunc_tl_i32(r_src2, src2);
725
726 r_temp = tcg_temp_new_i64();
727 r_temp2 = tcg_temp_new_i64();
728
729 if (sign_ext) {
730 tcg_gen_ext_i32_i64(r_temp, r_src2);
731 tcg_gen_ext_i32_i64(r_temp2, r_src1);
732 } else {
733 tcg_gen_extu_i32_i64(r_temp, r_src2);
734 tcg_gen_extu_i32_i64(r_temp2, r_src1);
735 }
736
737 tcg_gen_mul_i64(r_temp2, r_temp, r_temp2);
738
739 tcg_gen_shri_i64(r_temp, r_temp2, 32);
740 tcg_gen_trunc_i64_tl(cpu_tmp0, r_temp);
741 tcg_temp_free_i64(r_temp);
742 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
743
744 tcg_gen_trunc_i64_tl(dst, r_temp2);
745
746 tcg_temp_free_i64(r_temp2);
747
748 tcg_temp_free_i32(r_src1);
749 tcg_temp_free_i32(r_src2);
750 }
751
752 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
753 {
754 /* zero-extend truncated operands before multiplication */
755 gen_op_multiply(dst, src1, src2, 0);
756 }
757
758 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
759 {
760 /* sign-extend truncated operands before multiplication */
761 gen_op_multiply(dst, src1, src2, 1);
762 }
763
764 #ifdef TARGET_SPARC64
765 static inline void gen_trap_ifdivzero_tl(TCGv divisor)
766 {
767 TCGv_i32 r_const;
768 int l1;
769
770 l1 = gen_new_label();
771 tcg_gen_brcondi_tl(TCG_COND_NE, divisor, 0, l1);
772 r_const = tcg_const_i32(TT_DIV_ZERO);
773 gen_helper_raise_exception(cpu_env, r_const);
774 tcg_temp_free_i32(r_const);
775 gen_set_label(l1);
776 }
777
778 static inline void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2)
779 {
780 int l1, l2;
781 TCGv r_temp1, r_temp2;
782
783 l1 = gen_new_label();
784 l2 = gen_new_label();
785 r_temp1 = tcg_temp_local_new();
786 r_temp2 = tcg_temp_local_new();
787 tcg_gen_mov_tl(r_temp1, src1);
788 tcg_gen_mov_tl(r_temp2, src2);
789 gen_trap_ifdivzero_tl(r_temp2);
790 tcg_gen_brcondi_tl(TCG_COND_NE, r_temp1, INT64_MIN, l1);
791 tcg_gen_brcondi_tl(TCG_COND_NE, r_temp2, -1, l1);
792 tcg_gen_movi_i64(dst, INT64_MIN);
793 tcg_gen_br(l2);
794 gen_set_label(l1);
795 tcg_gen_div_i64(dst, r_temp1, r_temp2);
796 gen_set_label(l2);
797 tcg_temp_free(r_temp1);
798 tcg_temp_free(r_temp2);
799 }
800 #endif
801
802 // 1
803 static inline void gen_op_eval_ba(TCGv dst)
804 {
805 tcg_gen_movi_tl(dst, 1);
806 }
807
808 // Z
809 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
810 {
811 gen_mov_reg_Z(dst, src);
812 }
813
814 // Z | (N ^ V)
815 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
816 {
817 gen_mov_reg_N(cpu_tmp0, src);
818 gen_mov_reg_V(dst, src);
819 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
820 gen_mov_reg_Z(cpu_tmp0, src);
821 tcg_gen_or_tl(dst, dst, cpu_tmp0);
822 }
823
824 // N ^ V
825 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
826 {
827 gen_mov_reg_V(cpu_tmp0, src);
828 gen_mov_reg_N(dst, src);
829 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
830 }
831
832 // C | Z
833 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
834 {
835 gen_mov_reg_Z(cpu_tmp0, src);
836 gen_mov_reg_C(dst, src);
837 tcg_gen_or_tl(dst, dst, cpu_tmp0);
838 }
839
840 // C
841 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
842 {
843 gen_mov_reg_C(dst, src);
844 }
845
846 // V
847 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
848 {
849 gen_mov_reg_V(dst, src);
850 }
851
852 // 0
853 static inline void gen_op_eval_bn(TCGv dst)
854 {
855 tcg_gen_movi_tl(dst, 0);
856 }
857
858 // N
859 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
860 {
861 gen_mov_reg_N(dst, src);
862 }
863
864 // !Z
865 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
866 {
867 gen_mov_reg_Z(dst, src);
868 tcg_gen_xori_tl(dst, dst, 0x1);
869 }
870
871 // !(Z | (N ^ V))
872 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
873 {
874 gen_mov_reg_N(cpu_tmp0, src);
875 gen_mov_reg_V(dst, src);
876 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
877 gen_mov_reg_Z(cpu_tmp0, src);
878 tcg_gen_or_tl(dst, dst, cpu_tmp0);
879 tcg_gen_xori_tl(dst, dst, 0x1);
880 }
881
882 // !(N ^ V)
883 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
884 {
885 gen_mov_reg_V(cpu_tmp0, src);
886 gen_mov_reg_N(dst, src);
887 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
888 tcg_gen_xori_tl(dst, dst, 0x1);
889 }
890
891 // !(C | Z)
892 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
893 {
894 gen_mov_reg_Z(cpu_tmp0, src);
895 gen_mov_reg_C(dst, src);
896 tcg_gen_or_tl(dst, dst, cpu_tmp0);
897 tcg_gen_xori_tl(dst, dst, 0x1);
898 }
899
900 // !C
901 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
902 {
903 gen_mov_reg_C(dst, src);
904 tcg_gen_xori_tl(dst, dst, 0x1);
905 }
906
907 // !N
908 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
909 {
910 gen_mov_reg_N(dst, src);
911 tcg_gen_xori_tl(dst, dst, 0x1);
912 }
913
914 // !V
915 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
916 {
917 gen_mov_reg_V(dst, src);
918 tcg_gen_xori_tl(dst, dst, 0x1);
919 }
920
921 /*
922 FPSR bit field FCC1 | FCC0:
923 0 =
924 1 <
925 2 >
926 3 unordered
927 */
928 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
929 unsigned int fcc_offset)
930 {
931 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
932 tcg_gen_andi_tl(reg, reg, 0x1);
933 }
934
935 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
936 unsigned int fcc_offset)
937 {
938 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
939 tcg_gen_andi_tl(reg, reg, 0x1);
940 }
941
942 // !0: FCC0 | FCC1
943 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
944 unsigned int fcc_offset)
945 {
946 gen_mov_reg_FCC0(dst, src, fcc_offset);
947 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
948 tcg_gen_or_tl(dst, dst, cpu_tmp0);
949 }
950
951 // 1 or 2: FCC0 ^ FCC1
952 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
953 unsigned int fcc_offset)
954 {
955 gen_mov_reg_FCC0(dst, src, fcc_offset);
956 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
957 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
958 }
959
960 // 1 or 3: FCC0
961 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
962 unsigned int fcc_offset)
963 {
964 gen_mov_reg_FCC0(dst, src, fcc_offset);
965 }
966
967 // 1: FCC0 & !FCC1
968 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
969 unsigned int fcc_offset)
970 {
971 gen_mov_reg_FCC0(dst, src, fcc_offset);
972 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
973 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
974 tcg_gen_and_tl(dst, dst, cpu_tmp0);
975 }
976
977 // 2 or 3: FCC1
978 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
979 unsigned int fcc_offset)
980 {
981 gen_mov_reg_FCC1(dst, src, fcc_offset);
982 }
983
984 // 2: !FCC0 & FCC1
985 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
986 unsigned int fcc_offset)
987 {
988 gen_mov_reg_FCC0(dst, src, fcc_offset);
989 tcg_gen_xori_tl(dst, dst, 0x1);
990 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
991 tcg_gen_and_tl(dst, dst, cpu_tmp0);
992 }
993
994 // 3: FCC0 & FCC1
995 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
996 unsigned int fcc_offset)
997 {
998 gen_mov_reg_FCC0(dst, src, fcc_offset);
999 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
1000 tcg_gen_and_tl(dst, dst, cpu_tmp0);
1001 }
1002
1003 // 0: !(FCC0 | FCC1)
1004 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
1005 unsigned int fcc_offset)
1006 {
1007 gen_mov_reg_FCC0(dst, src, fcc_offset);
1008 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
1009 tcg_gen_or_tl(dst, dst, cpu_tmp0);
1010 tcg_gen_xori_tl(dst, dst, 0x1);
1011 }
1012
1013 // 0 or 3: !(FCC0 ^ FCC1)
1014 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
1015 unsigned int fcc_offset)
1016 {
1017 gen_mov_reg_FCC0(dst, src, fcc_offset);
1018 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
1019 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
1020 tcg_gen_xori_tl(dst, dst, 0x1);
1021 }
1022
1023 // 0 or 2: !FCC0
1024 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
1025 unsigned int fcc_offset)
1026 {
1027 gen_mov_reg_FCC0(dst, src, fcc_offset);
1028 tcg_gen_xori_tl(dst, dst, 0x1);
1029 }
1030
1031 // !1: !(FCC0 & !FCC1)
1032 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
1033 unsigned int fcc_offset)
1034 {
1035 gen_mov_reg_FCC0(dst, src, fcc_offset);
1036 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
1037 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
1038 tcg_gen_and_tl(dst, dst, cpu_tmp0);
1039 tcg_gen_xori_tl(dst, dst, 0x1);
1040 }
1041
1042 // 0 or 1: !FCC1
1043 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
1044 unsigned int fcc_offset)
1045 {
1046 gen_mov_reg_FCC1(dst, src, fcc_offset);
1047 tcg_gen_xori_tl(dst, dst, 0x1);
1048 }
1049
1050 // !2: !(!FCC0 & FCC1)
1051 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
1052 unsigned int fcc_offset)
1053 {
1054 gen_mov_reg_FCC0(dst, src, fcc_offset);
1055 tcg_gen_xori_tl(dst, dst, 0x1);
1056 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
1057 tcg_gen_and_tl(dst, dst, cpu_tmp0);
1058 tcg_gen_xori_tl(dst, dst, 0x1);
1059 }
1060
1061 // !3: !(FCC0 & FCC1)
1062 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
1063 unsigned int fcc_offset)
1064 {
1065 gen_mov_reg_FCC0(dst, src, fcc_offset);
1066 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
1067 tcg_gen_and_tl(dst, dst, cpu_tmp0);
1068 tcg_gen_xori_tl(dst, dst, 0x1);
1069 }
1070
1071 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
1072 target_ulong pc2, TCGv r_cond)
1073 {
1074 int l1;
1075
1076 l1 = gen_new_label();
1077
1078 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
1079
1080 gen_goto_tb(dc, 0, pc1, pc1 + 4);
1081
1082 gen_set_label(l1);
1083 gen_goto_tb(dc, 1, pc2, pc2 + 4);
1084 }
1085
1086 static inline void gen_branch_a(DisasContext *dc, target_ulong pc1,
1087 target_ulong pc2, TCGv r_cond)
1088 {
1089 int l1;
1090
1091 l1 = gen_new_label();
1092
1093 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
1094
1095 gen_goto_tb(dc, 0, pc2, pc1);
1096
1097 gen_set_label(l1);
1098 gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8);
1099 }
1100
1101 static inline void gen_generic_branch(target_ulong npc1, target_ulong npc2,
1102 TCGv r_cond)
1103 {
1104 int l1, l2;
1105
1106 l1 = gen_new_label();
1107 l2 = gen_new_label();
1108
1109 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
1110
1111 tcg_gen_movi_tl(cpu_npc, npc1);
1112 tcg_gen_br(l2);
1113
1114 gen_set_label(l1);
1115 tcg_gen_movi_tl(cpu_npc, npc2);
1116 gen_set_label(l2);
1117 }
1118
1119 /* call this function before using the condition register as it may
1120 have been set for a jump */
1121 static inline void flush_cond(DisasContext *dc, TCGv cond)
1122 {
1123 if (dc->npc == JUMP_PC) {
1124 gen_generic_branch(dc->jump_pc[0], dc->jump_pc[1], cond);
1125 dc->npc = DYNAMIC_PC;
1126 }
1127 }
1128
1129 static inline void save_npc(DisasContext *dc, TCGv cond)
1130 {
1131 if (dc->npc == JUMP_PC) {
1132 gen_generic_branch(dc->jump_pc[0], dc->jump_pc[1], cond);
1133 dc->npc = DYNAMIC_PC;
1134 } else if (dc->npc != DYNAMIC_PC) {
1135 tcg_gen_movi_tl(cpu_npc, dc->npc);
1136 }
1137 }
1138
1139 static inline void save_state(DisasContext *dc, TCGv cond)
1140 {
1141 tcg_gen_movi_tl(cpu_pc, dc->pc);
1142 /* flush pending conditional evaluations before exposing cpu state */
1143 if (dc->cc_op != CC_OP_FLAGS) {
1144 dc->cc_op = CC_OP_FLAGS;
1145 gen_helper_compute_psr(cpu_env);
1146 }
1147 save_npc(dc, cond);
1148 }
1149
1150 static inline void gen_mov_pc_npc(DisasContext *dc, TCGv cond)
1151 {
1152 if (dc->npc == JUMP_PC) {
1153 gen_generic_branch(dc->jump_pc[0], dc->jump_pc[1], cond);
1154 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1155 dc->pc = DYNAMIC_PC;
1156 } else if (dc->npc == DYNAMIC_PC) {
1157 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1158 dc->pc = DYNAMIC_PC;
1159 } else {
1160 dc->pc = dc->npc;
1161 }
1162 }
1163
1164 static inline void gen_op_next_insn(void)
1165 {
1166 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1167 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1168 }
1169
1170 static inline void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1171 DisasContext *dc)
1172 {
1173 TCGv_i32 r_src;
1174
1175 #ifdef TARGET_SPARC64
1176 if (cc)
1177 r_src = cpu_xcc;
1178 else
1179 r_src = cpu_psr;
1180 #else
1181 r_src = cpu_psr;
1182 #endif
1183 switch (dc->cc_op) {
1184 case CC_OP_FLAGS:
1185 break;
1186 default:
1187 gen_helper_compute_psr(cpu_env);
1188 dc->cc_op = CC_OP_FLAGS;
1189 break;
1190 }
1191 switch (cond) {
1192 case 0x0:
1193 gen_op_eval_bn(r_dst);
1194 break;
1195 case 0x1:
1196 gen_op_eval_be(r_dst, r_src);
1197 break;
1198 case 0x2:
1199 gen_op_eval_ble(r_dst, r_src);
1200 break;
1201 case 0x3:
1202 gen_op_eval_bl(r_dst, r_src);
1203 break;
1204 case 0x4:
1205 gen_op_eval_bleu(r_dst, r_src);
1206 break;
1207 case 0x5:
1208 gen_op_eval_bcs(r_dst, r_src);
1209 break;
1210 case 0x6:
1211 gen_op_eval_bneg(r_dst, r_src);
1212 break;
1213 case 0x7:
1214 gen_op_eval_bvs(r_dst, r_src);
1215 break;
1216 case 0x8:
1217 gen_op_eval_ba(r_dst);
1218 break;
1219 case 0x9:
1220 gen_op_eval_bne(r_dst, r_src);
1221 break;
1222 case 0xa:
1223 gen_op_eval_bg(r_dst, r_src);
1224 break;
1225 case 0xb:
1226 gen_op_eval_bge(r_dst, r_src);
1227 break;
1228 case 0xc:
1229 gen_op_eval_bgu(r_dst, r_src);
1230 break;
1231 case 0xd:
1232 gen_op_eval_bcc(r_dst, r_src);
1233 break;
1234 case 0xe:
1235 gen_op_eval_bpos(r_dst, r_src);
1236 break;
1237 case 0xf:
1238 gen_op_eval_bvc(r_dst, r_src);
1239 break;
1240 }
1241 }
1242
1243 static inline void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1244 {
1245 unsigned int offset;
1246
1247 switch (cc) {
1248 default:
1249 case 0x0:
1250 offset = 0;
1251 break;
1252 case 0x1:
1253 offset = 32 - 10;
1254 break;
1255 case 0x2:
1256 offset = 34 - 10;
1257 break;
1258 case 0x3:
1259 offset = 36 - 10;
1260 break;
1261 }
1262
1263 switch (cond) {
1264 case 0x0:
1265 gen_op_eval_bn(r_dst);
1266 break;
1267 case 0x1:
1268 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1269 break;
1270 case 0x2:
1271 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1272 break;
1273 case 0x3:
1274 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1275 break;
1276 case 0x4:
1277 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1278 break;
1279 case 0x5:
1280 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1281 break;
1282 case 0x6:
1283 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1284 break;
1285 case 0x7:
1286 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1287 break;
1288 case 0x8:
1289 gen_op_eval_ba(r_dst);
1290 break;
1291 case 0x9:
1292 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1293 break;
1294 case 0xa:
1295 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1296 break;
1297 case 0xb:
1298 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1299 break;
1300 case 0xc:
1301 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1302 break;
1303 case 0xd:
1304 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1305 break;
1306 case 0xe:
1307 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1308 break;
1309 case 0xf:
1310 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1311 break;
1312 }
1313 }
1314
1315 #ifdef TARGET_SPARC64
1316 // Inverted logic
1317 static const int gen_tcg_cond_reg[8] = {
1318 -1,
1319 TCG_COND_NE,
1320 TCG_COND_GT,
1321 TCG_COND_GE,
1322 -1,
1323 TCG_COND_EQ,
1324 TCG_COND_LE,
1325 TCG_COND_LT,
1326 };
1327
1328 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1329 {
1330 int l1;
1331
1332 l1 = gen_new_label();
1333 tcg_gen_movi_tl(r_dst, 0);
1334 tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], r_src, 0, l1);
1335 tcg_gen_movi_tl(r_dst, 1);
1336 gen_set_label(l1);
1337 }
1338 #endif
1339
1340 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc,
1341 TCGv r_cond)
1342 {
1343 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1344 target_ulong target = dc->pc + offset;
1345
1346 if (cond == 0x0) {
1347 /* unconditional not taken */
1348 if (a) {
1349 dc->pc = dc->npc + 4;
1350 dc->npc = dc->pc + 4;
1351 } else {
1352 dc->pc = dc->npc;
1353 dc->npc = dc->pc + 4;
1354 }
1355 } else if (cond == 0x8) {
1356 /* unconditional taken */
1357 if (a) {
1358 dc->pc = target;
1359 dc->npc = dc->pc + 4;
1360 } else {
1361 dc->pc = dc->npc;
1362 dc->npc = target;
1363 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1364 }
1365 } else {
1366 flush_cond(dc, r_cond);
1367 gen_cond(r_cond, cc, cond, dc);
1368 if (a) {
1369 gen_branch_a(dc, target, dc->npc, r_cond);
1370 dc->is_br = 1;
1371 } else {
1372 dc->pc = dc->npc;
1373 dc->jump_pc[0] = target;
1374 if (unlikely(dc->npc == DYNAMIC_PC)) {
1375 dc->jump_pc[1] = DYNAMIC_PC;
1376 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1377 } else {
1378 dc->jump_pc[1] = dc->npc + 4;
1379 dc->npc = JUMP_PC;
1380 }
1381 }
1382 }
1383 }
1384
1385 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc,
1386 TCGv r_cond)
1387 {
1388 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1389 target_ulong target = dc->pc + offset;
1390
1391 if (cond == 0x0) {
1392 /* unconditional not taken */
1393 if (a) {
1394 dc->pc = dc->npc + 4;
1395 dc->npc = dc->pc + 4;
1396 } else {
1397 dc->pc = dc->npc;
1398 dc->npc = dc->pc + 4;
1399 }
1400 } else if (cond == 0x8) {
1401 /* unconditional taken */
1402 if (a) {
1403 dc->pc = target;
1404 dc->npc = dc->pc + 4;
1405 } else {
1406 dc->pc = dc->npc;
1407 dc->npc = target;
1408 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1409 }
1410 } else {
1411 flush_cond(dc, r_cond);
1412 gen_fcond(r_cond, cc, cond);
1413 if (a) {
1414 gen_branch_a(dc, target, dc->npc, r_cond);
1415 dc->is_br = 1;
1416 } else {
1417 dc->pc = dc->npc;
1418 dc->jump_pc[0] = target;
1419 if (unlikely(dc->npc == DYNAMIC_PC)) {
1420 dc->jump_pc[1] = DYNAMIC_PC;
1421 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1422 } else {
1423 dc->jump_pc[1] = dc->npc + 4;
1424 dc->npc = JUMP_PC;
1425 }
1426 }
1427 }
1428 }
1429
1430 #ifdef TARGET_SPARC64
1431 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1432 TCGv r_cond, TCGv r_reg)
1433 {
1434 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1435 target_ulong target = dc->pc + offset;
1436
1437 flush_cond(dc, r_cond);
1438 gen_cond_reg(r_cond, cond, r_reg);
1439 if (a) {
1440 gen_branch_a(dc, target, dc->npc, r_cond);
1441 dc->is_br = 1;
1442 } else {
1443 dc->pc = dc->npc;
1444 dc->jump_pc[0] = target;
1445 if (unlikely(dc->npc == DYNAMIC_PC)) {
1446 dc->jump_pc[1] = DYNAMIC_PC;
1447 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1448 } else {
1449 dc->jump_pc[1] = dc->npc + 4;
1450 dc->npc = JUMP_PC;
1451 }
1452 }
1453 }
1454
1455 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1456 {
1457 switch (fccno) {
1458 case 0:
1459 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1460 break;
1461 case 1:
1462 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1463 break;
1464 case 2:
1465 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1466 break;
1467 case 3:
1468 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1469 break;
1470 }
1471 }
1472
1473 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1474 {
1475 switch (fccno) {
1476 case 0:
1477 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1478 break;
1479 case 1:
1480 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1481 break;
1482 case 2:
1483 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1484 break;
1485 case 3:
1486 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1487 break;
1488 }
1489 }
1490
1491 static inline void gen_op_fcmpq(int fccno)
1492 {
1493 switch (fccno) {
1494 case 0:
1495 gen_helper_fcmpq(cpu_env);
1496 break;
1497 case 1:
1498 gen_helper_fcmpq_fcc1(cpu_env);
1499 break;
1500 case 2:
1501 gen_helper_fcmpq_fcc2(cpu_env);
1502 break;
1503 case 3:
1504 gen_helper_fcmpq_fcc3(cpu_env);
1505 break;
1506 }
1507 }
1508
1509 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1510 {
1511 switch (fccno) {
1512 case 0:
1513 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1514 break;
1515 case 1:
1516 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1517 break;
1518 case 2:
1519 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1520 break;
1521 case 3:
1522 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1523 break;
1524 }
1525 }
1526
1527 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1528 {
1529 switch (fccno) {
1530 case 0:
1531 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1532 break;
1533 case 1:
1534 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1535 break;
1536 case 2:
1537 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1538 break;
1539 case 3:
1540 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1541 break;
1542 }
1543 }
1544
1545 static inline void gen_op_fcmpeq(int fccno)
1546 {
1547 switch (fccno) {
1548 case 0:
1549 gen_helper_fcmpeq(cpu_env);
1550 break;
1551 case 1:
1552 gen_helper_fcmpeq_fcc1(cpu_env);
1553 break;
1554 case 2:
1555 gen_helper_fcmpeq_fcc2(cpu_env);
1556 break;
1557 case 3:
1558 gen_helper_fcmpeq_fcc3(cpu_env);
1559 break;
1560 }
1561 }
1562
1563 #else
1564
1565 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1566 {
1567 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1568 }
1569
1570 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1571 {
1572 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1573 }
1574
1575 static inline void gen_op_fcmpq(int fccno)
1576 {
1577 gen_helper_fcmpq(cpu_env);
1578 }
1579
1580 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1581 {
1582 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1583 }
1584
1585 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1586 {
1587 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1588 }
1589
1590 static inline void gen_op_fcmpeq(int fccno)
1591 {
1592 gen_helper_fcmpeq(cpu_env);
1593 }
1594 #endif
1595
1596 static inline void gen_op_fpexception_im(int fsr_flags)
1597 {
1598 TCGv_i32 r_const;
1599
1600 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1601 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1602 r_const = tcg_const_i32(TT_FP_EXCP);
1603 gen_helper_raise_exception(cpu_env, r_const);
1604 tcg_temp_free_i32(r_const);
1605 }
1606
1607 static int gen_trap_ifnofpu(DisasContext *dc, TCGv r_cond)
1608 {
1609 #if !defined(CONFIG_USER_ONLY)
1610 if (!dc->fpu_enabled) {
1611 TCGv_i32 r_const;
1612
1613 save_state(dc, r_cond);
1614 r_const = tcg_const_i32(TT_NFPU_INSN);
1615 gen_helper_raise_exception(cpu_env, r_const);
1616 tcg_temp_free_i32(r_const);
1617 dc->is_br = 1;
1618 return 1;
1619 }
1620 #endif
1621 return 0;
1622 }
1623
1624 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1625 {
1626 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1627 }
1628
1629 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1630 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1631 {
1632 TCGv_i32 dst, src;
1633
1634 src = gen_load_fpr_F(dc, rs);
1635 dst = gen_dest_fpr_F();
1636
1637 gen(dst, cpu_env, src);
1638
1639 gen_store_fpr_F(dc, rd, dst);
1640 }
1641
1642 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1643 void (*gen)(TCGv_i32, TCGv_i32))
1644 {
1645 TCGv_i32 dst, src;
1646
1647 src = gen_load_fpr_F(dc, rs);
1648 dst = gen_dest_fpr_F();
1649
1650 gen(dst, src);
1651
1652 gen_store_fpr_F(dc, rd, dst);
1653 }
1654
1655 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1656 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1657 {
1658 TCGv_i32 dst, src1, src2;
1659
1660 src1 = gen_load_fpr_F(dc, rs1);
1661 src2 = gen_load_fpr_F(dc, rs2);
1662 dst = gen_dest_fpr_F();
1663
1664 gen(dst, cpu_env, src1, src2);
1665
1666 gen_store_fpr_F(dc, rd, dst);
1667 }
1668
1669 #ifdef TARGET_SPARC64
1670 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1671 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1672 {
1673 TCGv_i32 dst, src1, src2;
1674
1675 src1 = gen_load_fpr_F(dc, rs1);
1676 src2 = gen_load_fpr_F(dc, rs2);
1677 dst = gen_dest_fpr_F();
1678
1679 gen(dst, src1, src2);
1680
1681 gen_store_fpr_F(dc, rd, dst);
1682 }
1683 #endif
1684
1685 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1686 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1687 {
1688 TCGv_i64 dst, src;
1689
1690 src = gen_load_fpr_D(dc, rs);
1691 dst = gen_dest_fpr_D();
1692
1693 gen(dst, cpu_env, src);
1694
1695 gen_store_fpr_D(dc, rd, dst);
1696 }
1697
1698 #ifdef TARGET_SPARC64
1699 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1700 void (*gen)(TCGv_i64, TCGv_i64))
1701 {
1702 TCGv_i64 dst, src;
1703
1704 src = gen_load_fpr_D(dc, rs);
1705 dst = gen_dest_fpr_D();
1706
1707 gen(dst, src);
1708
1709 gen_store_fpr_D(dc, rd, dst);
1710 }
1711 #endif
1712
1713 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1714 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1715 {
1716 TCGv_i64 dst, src1, src2;
1717
1718 src1 = gen_load_fpr_D(dc, rs1);
1719 src2 = gen_load_fpr_D(dc, rs2);
1720 dst = gen_dest_fpr_D();
1721
1722 gen(dst, cpu_env, src1, src2);
1723
1724 gen_store_fpr_D(dc, rd, dst);
1725 }
1726
1727 #ifdef TARGET_SPARC64
1728 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1729 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1730 {
1731 TCGv_i64 dst, src1, src2;
1732
1733 src1 = gen_load_fpr_D(dc, rs1);
1734 src2 = gen_load_fpr_D(dc, rs2);
1735 dst = gen_dest_fpr_D();
1736
1737 gen(dst, src1, src2);
1738
1739 gen_store_fpr_D(dc, rd, dst);
1740 }
1741
1742 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1743 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1744 {
1745 TCGv_i64 dst, src1, src2;
1746
1747 src1 = gen_load_fpr_D(dc, rs1);
1748 src2 = gen_load_fpr_D(dc, rs2);
1749 dst = gen_dest_fpr_D();
1750
1751 gen(dst, cpu_gsr, src1, src2);
1752
1753 gen_store_fpr_D(dc, rd, dst);
1754 }
1755
1756 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1757 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1758 {
1759 TCGv_i64 dst, src0, src1, src2;
1760
1761 src1 = gen_load_fpr_D(dc, rs1);
1762 src2 = gen_load_fpr_D(dc, rs2);
1763 src0 = gen_load_fpr_D(dc, rd);
1764 dst = gen_dest_fpr_D();
1765
1766 gen(dst, src0, src1, src2);
1767
1768 gen_store_fpr_D(dc, rd, dst);
1769 }
1770 #endif
1771
1772 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1773 void (*gen)(TCGv_ptr))
1774 {
1775 gen_op_load_fpr_QT1(QFPREG(rs));
1776
1777 gen(cpu_env);
1778
1779 gen_op_store_QT0_fpr(QFPREG(rd));
1780 gen_update_fprs_dirty(QFPREG(rd));
1781 }
1782
1783 #ifdef TARGET_SPARC64
1784 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1785 void (*gen)(TCGv_ptr))
1786 {
1787 gen_op_load_fpr_QT1(QFPREG(rs));
1788
1789 gen(cpu_env);
1790
1791 gen_op_store_QT0_fpr(QFPREG(rd));
1792 gen_update_fprs_dirty(QFPREG(rd));
1793 }
1794 #endif
1795
1796 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1797 void (*gen)(TCGv_ptr))
1798 {
1799 gen_op_load_fpr_QT0(QFPREG(rs1));
1800 gen_op_load_fpr_QT1(QFPREG(rs2));
1801
1802 gen(cpu_env);
1803
1804 gen_op_store_QT0_fpr(QFPREG(rd));
1805 gen_update_fprs_dirty(QFPREG(rd));
1806 }
1807
1808 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1809 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1810 {
1811 TCGv_i64 dst;
1812 TCGv_i32 src1, src2;
1813
1814 src1 = gen_load_fpr_F(dc, rs1);
1815 src2 = gen_load_fpr_F(dc, rs2);
1816 dst = gen_dest_fpr_D();
1817
1818 gen(dst, cpu_env, src1, src2);
1819
1820 gen_store_fpr_D(dc, rd, dst);
1821 }
1822
1823 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1824 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1825 {
1826 TCGv_i64 src1, src2;
1827
1828 src1 = gen_load_fpr_D(dc, rs1);
1829 src2 = gen_load_fpr_D(dc, rs2);
1830
1831 gen(cpu_env, src1, src2);
1832
1833 gen_op_store_QT0_fpr(QFPREG(rd));
1834 gen_update_fprs_dirty(QFPREG(rd));
1835 }
1836
1837 #ifdef TARGET_SPARC64
1838 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1839 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1840 {
1841 TCGv_i64 dst;
1842 TCGv_i32 src;
1843
1844 src = gen_load_fpr_F(dc, rs);
1845 dst = gen_dest_fpr_D();
1846
1847 gen(dst, cpu_env, src);
1848
1849 gen_store_fpr_D(dc, rd, dst);
1850 }
1851 #endif
1852
1853 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1854 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1855 {
1856 TCGv_i64 dst;
1857 TCGv_i32 src;
1858
1859 src = gen_load_fpr_F(dc, rs);
1860 dst = gen_dest_fpr_D();
1861
1862 gen(dst, cpu_env, src);
1863
1864 gen_store_fpr_D(dc, rd, dst);
1865 }
1866
1867 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1868 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1869 {
1870 TCGv_i32 dst;
1871 TCGv_i64 src;
1872
1873 src = gen_load_fpr_D(dc, rs);
1874 dst = gen_dest_fpr_F();
1875
1876 gen(dst, cpu_env, src);
1877
1878 gen_store_fpr_F(dc, rd, dst);
1879 }
1880
1881 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1882 void (*gen)(TCGv_i32, TCGv_ptr))
1883 {
1884 TCGv_i32 dst;
1885
1886 gen_op_load_fpr_QT1(QFPREG(rs));
1887 dst = gen_dest_fpr_F();
1888
1889 gen(dst, cpu_env);
1890
1891 gen_store_fpr_F(dc, rd, dst);
1892 }
1893
1894 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1895 void (*gen)(TCGv_i64, TCGv_ptr))
1896 {
1897 TCGv_i64 dst;
1898
1899 gen_op_load_fpr_QT1(QFPREG(rs));
1900 dst = gen_dest_fpr_D();
1901
1902 gen(dst, cpu_env);
1903
1904 gen_store_fpr_D(dc, rd, dst);
1905 }
1906
1907 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1908 void (*gen)(TCGv_ptr, TCGv_i32))
1909 {
1910 TCGv_i32 src;
1911
1912 src = gen_load_fpr_F(dc, rs);
1913
1914 gen(cpu_env, src);
1915
1916 gen_op_store_QT0_fpr(QFPREG(rd));
1917 gen_update_fprs_dirty(QFPREG(rd));
1918 }
1919
1920 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1921 void (*gen)(TCGv_ptr, TCGv_i64))
1922 {
1923 TCGv_i64 src;
1924
1925 src = gen_load_fpr_D(dc, rs);
1926
1927 gen(cpu_env, src);
1928
1929 gen_op_store_QT0_fpr(QFPREG(rd));
1930 gen_update_fprs_dirty(QFPREG(rd));
1931 }
1932
1933 /* asi moves */
1934 #ifdef TARGET_SPARC64
1935 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
1936 {
1937 int asi;
1938 TCGv_i32 r_asi;
1939
1940 if (IS_IMM) {
1941 r_asi = tcg_temp_new_i32();
1942 tcg_gen_mov_i32(r_asi, cpu_asi);
1943 } else {
1944 asi = GET_FIELD(insn, 19, 26);
1945 r_asi = tcg_const_i32(asi);
1946 }
1947 return r_asi;
1948 }
1949
1950 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
1951 int sign)
1952 {
1953 TCGv_i32 r_asi, r_size, r_sign;
1954
1955 r_asi = gen_get_asi(insn, addr);
1956 r_size = tcg_const_i32(size);
1957 r_sign = tcg_const_i32(sign);
1958 gen_helper_ld_asi(dst, addr, r_asi, r_size, r_sign);
1959 tcg_temp_free_i32(r_sign);
1960 tcg_temp_free_i32(r_size);
1961 tcg_temp_free_i32(r_asi);
1962 }
1963
1964 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
1965 {
1966 TCGv_i32 r_asi, r_size;
1967
1968 r_asi = gen_get_asi(insn, addr);
1969 r_size = tcg_const_i32(size);
1970 gen_helper_st_asi(addr, src, r_asi, r_size);
1971 tcg_temp_free_i32(r_size);
1972 tcg_temp_free_i32(r_asi);
1973 }
1974
1975 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
1976 {
1977 TCGv_i32 r_asi, r_size, r_rd;
1978
1979 r_asi = gen_get_asi(insn, addr);
1980 r_size = tcg_const_i32(size);
1981 r_rd = tcg_const_i32(rd);
1982 gen_helper_ldf_asi(addr, r_asi, r_size, r_rd);
1983 tcg_temp_free_i32(r_rd);
1984 tcg_temp_free_i32(r_size);
1985 tcg_temp_free_i32(r_asi);
1986 }
1987
1988 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
1989 {
1990 TCGv_i32 r_asi, r_size, r_rd;
1991
1992 r_asi = gen_get_asi(insn, addr);
1993 r_size = tcg_const_i32(size);
1994 r_rd = tcg_const_i32(rd);
1995 gen_helper_stf_asi(addr, r_asi, r_size, r_rd);
1996 tcg_temp_free_i32(r_rd);
1997 tcg_temp_free_i32(r_size);
1998 tcg_temp_free_i32(r_asi);
1999 }
2000
2001 static inline void gen_swap_asi(TCGv dst, TCGv addr, int insn)
2002 {
2003 TCGv_i32 r_asi, r_size, r_sign;
2004
2005 r_asi = gen_get_asi(insn, addr);
2006 r_size = tcg_const_i32(4);
2007 r_sign = tcg_const_i32(0);
2008 gen_helper_ld_asi(cpu_tmp64, addr, r_asi, r_size, r_sign);
2009 tcg_temp_free_i32(r_sign);
2010 gen_helper_st_asi(addr, dst, r_asi, r_size);
2011 tcg_temp_free_i32(r_size);
2012 tcg_temp_free_i32(r_asi);
2013 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2014 }
2015
2016 static inline void gen_ldda_asi(TCGv hi, TCGv addr, int insn, int rd)
2017 {
2018 TCGv_i32 r_asi, r_rd;
2019
2020 r_asi = gen_get_asi(insn, addr);
2021 r_rd = tcg_const_i32(rd);
2022 gen_helper_ldda_asi(addr, r_asi, r_rd);
2023 tcg_temp_free_i32(r_rd);
2024 tcg_temp_free_i32(r_asi);
2025 }
2026
2027 static inline void gen_stda_asi(TCGv hi, TCGv addr, int insn, int rd)
2028 {
2029 TCGv_i32 r_asi, r_size;
2030
2031 gen_movl_reg_TN(rd + 1, cpu_tmp0);
2032 tcg_gen_concat_tl_i64(cpu_tmp64, cpu_tmp0, hi);
2033 r_asi = gen_get_asi(insn, addr);
2034 r_size = tcg_const_i32(8);
2035 gen_helper_st_asi(addr, cpu_tmp64, r_asi, r_size);
2036 tcg_temp_free_i32(r_size);
2037 tcg_temp_free_i32(r_asi);
2038 }
2039
2040 static inline void gen_cas_asi(TCGv dst, TCGv addr, TCGv val2, int insn,
2041 int rd)
2042 {
2043 TCGv r_val1;
2044 TCGv_i32 r_asi;
2045
2046 r_val1 = tcg_temp_new();
2047 gen_movl_reg_TN(rd, r_val1);
2048 r_asi = gen_get_asi(insn, addr);
2049 gen_helper_cas_asi(dst, addr, r_val1, val2, r_asi);
2050 tcg_temp_free_i32(r_asi);
2051 tcg_temp_free(r_val1);
2052 }
2053
2054 static inline void gen_casx_asi(TCGv dst, TCGv addr, TCGv val2, int insn,
2055 int rd)
2056 {
2057 TCGv_i32 r_asi;
2058
2059 gen_movl_reg_TN(rd, cpu_tmp64);
2060 r_asi = gen_get_asi(insn, addr);
2061 gen_helper_casx_asi(dst, addr, cpu_tmp64, val2, r_asi);
2062 tcg_temp_free_i32(r_asi);
2063 }
2064
2065 #elif !defined(CONFIG_USER_ONLY)
2066
2067 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2068 int sign)
2069 {
2070 TCGv_i32 r_asi, r_size, r_sign;
2071
2072 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2073 r_size = tcg_const_i32(size);
2074 r_sign = tcg_const_i32(sign);
2075 gen_helper_ld_asi(cpu_tmp64, addr, r_asi, r_size, r_sign);
2076 tcg_temp_free(r_sign);
2077 tcg_temp_free(r_size);
2078 tcg_temp_free(r_asi);
2079 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2080 }
2081
2082 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2083 {
2084 TCGv_i32 r_asi, r_size;
2085
2086 tcg_gen_extu_tl_i64(cpu_tmp64, src);
2087 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2088 r_size = tcg_const_i32(size);
2089 gen_helper_st_asi(addr, cpu_tmp64, r_asi, r_size);
2090 tcg_temp_free(r_size);
2091 tcg_temp_free(r_asi);
2092 }
2093
2094 static inline void gen_swap_asi(TCGv dst, TCGv addr, int insn)
2095 {
2096 TCGv_i32 r_asi, r_size, r_sign;
2097 TCGv_i64 r_val;
2098
2099 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2100 r_size = tcg_const_i32(4);
2101 r_sign = tcg_const_i32(0);
2102 gen_helper_ld_asi(cpu_tmp64, addr, r_asi, r_size, r_sign);
2103 tcg_temp_free(r_sign);
2104 r_val = tcg_temp_new_i64();
2105 tcg_gen_extu_tl_i64(r_val, dst);
2106 gen_helper_st_asi(addr, r_val, r_asi, r_size);
2107 tcg_temp_free_i64(r_val);
2108 tcg_temp_free(r_size);
2109 tcg_temp_free(r_asi);
2110 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2111 }
2112
2113 static inline void gen_ldda_asi(TCGv hi, TCGv addr, int insn, int rd)
2114 {
2115 TCGv_i32 r_asi, r_size, r_sign;
2116
2117 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2118 r_size = tcg_const_i32(8);
2119 r_sign = tcg_const_i32(0);
2120 gen_helper_ld_asi(cpu_tmp64, addr, r_asi, r_size, r_sign);
2121 tcg_temp_free(r_sign);
2122 tcg_temp_free(r_size);
2123 tcg_temp_free(r_asi);
2124 tcg_gen_trunc_i64_tl(cpu_tmp0, cpu_tmp64);
2125 gen_movl_TN_reg(rd + 1, cpu_tmp0);
2126 tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
2127 tcg_gen_trunc_i64_tl(hi, cpu_tmp64);
2128 gen_movl_TN_reg(rd, hi);
2129 }
2130
2131 static inline void gen_stda_asi(TCGv hi, TCGv addr, int insn, int rd)
2132 {
2133 TCGv_i32 r_asi, r_size;
2134
2135 gen_movl_reg_TN(rd + 1, cpu_tmp0);
2136 tcg_gen_concat_tl_i64(cpu_tmp64, cpu_tmp0, hi);
2137 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2138 r_size = tcg_const_i32(8);
2139 gen_helper_st_asi(addr, cpu_tmp64, r_asi, r_size);
2140 tcg_temp_free(r_size);
2141 tcg_temp_free(r_asi);
2142 }
2143 #endif
2144
2145 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2146 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2147 {
2148 TCGv_i64 r_val;
2149 TCGv_i32 r_asi, r_size;
2150
2151 gen_ld_asi(dst, addr, insn, 1, 0);
2152
2153 r_val = tcg_const_i64(0xffULL);
2154 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2155 r_size = tcg_const_i32(1);
2156 gen_helper_st_asi(addr, r_val, r_asi, r_size);
2157 tcg_temp_free_i32(r_size);
2158 tcg_temp_free_i32(r_asi);
2159 tcg_temp_free_i64(r_val);
2160 }
2161 #endif
2162
2163 static inline TCGv get_src1(unsigned int insn, TCGv def)
2164 {
2165 TCGv r_rs1 = def;
2166 unsigned int rs1;
2167
2168 rs1 = GET_FIELD(insn, 13, 17);
2169 if (rs1 == 0) {
2170 tcg_gen_movi_tl(def, 0);
2171 } else if (rs1 < 8) {
2172 r_rs1 = cpu_gregs[rs1];
2173 } else {
2174 tcg_gen_ld_tl(def, cpu_regwptr, (rs1 - 8) * sizeof(target_ulong));
2175 }
2176 return r_rs1;
2177 }
2178
2179 static inline TCGv get_src2(unsigned int insn, TCGv def)
2180 {
2181 TCGv r_rs2 = def;
2182
2183 if (IS_IMM) { /* immediate */
2184 target_long simm = GET_FIELDs(insn, 19, 31);
2185 tcg_gen_movi_tl(def, simm);
2186 } else { /* register */
2187 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2188 if (rs2 == 0) {
2189 tcg_gen_movi_tl(def, 0);
2190 } else if (rs2 < 8) {
2191 r_rs2 = cpu_gregs[rs2];
2192 } else {
2193 tcg_gen_ld_tl(def, cpu_regwptr, (rs2 - 8) * sizeof(target_ulong));
2194 }
2195 }
2196 return r_rs2;
2197 }
2198
2199 #ifdef TARGET_SPARC64
2200 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2201 {
2202 TCGv_i32 r_tl = tcg_temp_new_i32();
2203
2204 /* load env->tl into r_tl */
2205 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2206
2207 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2208 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2209
2210 /* calculate offset to current trap state from env->ts, reuse r_tl */
2211 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2212 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUState, ts));
2213
2214 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2215 {
2216 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2217 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2218 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2219 tcg_temp_free_ptr(r_tl_tmp);
2220 }
2221
2222 tcg_temp_free_i32(r_tl);
2223 }
2224
2225 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2226 int width, bool cc, bool left)
2227 {
2228 TCGv lo1, lo2, t1, t2;
2229 uint64_t amask, tabl, tabr;
2230 int shift, imask, omask;
2231
2232 if (cc) {
2233 tcg_gen_mov_tl(cpu_cc_src, s1);
2234 tcg_gen_mov_tl(cpu_cc_src2, s2);
2235 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2236 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2237 dc->cc_op = CC_OP_SUB;
2238 }
2239
2240 /* Theory of operation: there are two tables, left and right (not to
2241 be confused with the left and right versions of the opcode). These
2242 are indexed by the low 3 bits of the inputs. To make things "easy",
2243 these tables are loaded into two constants, TABL and TABR below.
2244 The operation index = (input & imask) << shift calculates the index
2245 into the constant, while val = (table >> index) & omask calculates
2246 the value we're looking for. */
2247 switch (width) {
2248 case 8:
2249 imask = 0x7;
2250 shift = 3;
2251 omask = 0xff;
2252 if (left) {
2253 tabl = 0x80c0e0f0f8fcfeffULL;
2254 tabr = 0xff7f3f1f0f070301ULL;
2255 } else {
2256 tabl = 0x0103070f1f3f7fffULL;
2257 tabr = 0xfffefcf8f0e0c080ULL;
2258 }
2259 break;
2260 case 16:
2261 imask = 0x6;
2262 shift = 1;
2263 omask = 0xf;
2264 if (left) {
2265 tabl = 0x8cef;
2266 tabr = 0xf731;
2267 } else {
2268 tabl = 0x137f;
2269 tabr = 0xfec8;
2270 }
2271 break;
2272 case 32:
2273 imask = 0x4;
2274 shift = 0;
2275 omask = 0x3;
2276 if (left) {
2277 tabl = (2 << 2) | 3;
2278 tabr = (3 << 2) | 1;
2279 } else {
2280 tabl = (1 << 2) | 3;
2281 tabr = (3 << 2) | 2;
2282 }
2283 break;
2284 default:
2285 abort();
2286 }
2287
2288 lo1 = tcg_temp_new();
2289 lo2 = tcg_temp_new();
2290 tcg_gen_andi_tl(lo1, s1, imask);
2291 tcg_gen_andi_tl(lo2, s2, imask);
2292 tcg_gen_shli_tl(lo1, lo1, shift);
2293 tcg_gen_shli_tl(lo2, lo2, shift);
2294
2295 t1 = tcg_const_tl(tabl);
2296 t2 = tcg_const_tl(tabr);
2297 tcg_gen_shr_tl(lo1, t1, lo1);
2298 tcg_gen_shr_tl(lo2, t2, lo2);
2299 tcg_gen_andi_tl(dst, lo1, omask);
2300 tcg_gen_andi_tl(lo2, lo2, omask);
2301
2302 amask = -8;
2303 if (AM_CHECK(dc)) {
2304 amask &= 0xffffffffULL;
2305 }
2306 tcg_gen_andi_tl(s1, s1, amask);
2307 tcg_gen_andi_tl(s2, s2, amask);
2308
2309 /* We want to compute
2310 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2311 We've already done dst = lo1, so this reduces to
2312 dst &= (s1 == s2 ? -1 : lo2)
2313 Which we perform by
2314 lo2 |= -(s1 == s2)
2315 dst &= lo2
2316 */
2317 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2318 tcg_gen_neg_tl(t1, t1);
2319 tcg_gen_or_tl(lo2, lo2, t1);
2320 tcg_gen_and_tl(dst, dst, lo2);
2321
2322 tcg_temp_free(lo1);
2323 tcg_temp_free(lo2);
2324 tcg_temp_free(t1);
2325 tcg_temp_free(t2);
2326 }
2327
2328 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2329 {
2330 TCGv tmp = tcg_temp_new();
2331
2332 tcg_gen_add_tl(tmp, s1, s2);
2333 tcg_gen_andi_tl(dst, tmp, -8);
2334 if (left) {
2335 tcg_gen_neg_tl(tmp, tmp);
2336 }
2337 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2338
2339 tcg_temp_free(tmp);
2340 }
2341 #endif
2342
2343 #define CHECK_IU_FEATURE(dc, FEATURE) \
2344 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2345 goto illegal_insn;
2346 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2347 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2348 goto nfpu_insn;
2349
2350 /* before an instruction, dc->pc must be static */
2351 static void disas_sparc_insn(DisasContext * dc)
2352 {
2353 unsigned int insn, opc, rs1, rs2, rd;
2354 TCGv cpu_src1, cpu_src2, cpu_tmp1, cpu_tmp2;
2355 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2356 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2357 target_long simm;
2358
2359 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
2360 tcg_gen_debug_insn_start(dc->pc);
2361 insn = ldl_code(dc->pc);
2362 opc = GET_FIELD(insn, 0, 1);
2363
2364 rd = GET_FIELD(insn, 2, 6);
2365
2366 cpu_tmp1 = cpu_src1 = tcg_temp_new();
2367 cpu_tmp2 = cpu_src2 = tcg_temp_new();
2368
2369 switch (opc) {
2370 case 0: /* branches/sethi */
2371 {
2372 unsigned int xop = GET_FIELD(insn, 7, 9);
2373 int32_t target;
2374 switch (xop) {
2375 #ifdef TARGET_SPARC64
2376 case 0x1: /* V9 BPcc */
2377 {
2378 int cc;
2379
2380 target = GET_FIELD_SP(insn, 0, 18);
2381 target = sign_extend(target, 19);
2382 target <<= 2;
2383 cc = GET_FIELD_SP(insn, 20, 21);
2384 if (cc == 0)
2385 do_branch(dc, target, insn, 0, cpu_cond);
2386 else if (cc == 2)
2387 do_branch(dc, target, insn, 1, cpu_cond);
2388 else
2389 goto illegal_insn;
2390 goto jmp_insn;
2391 }
2392 case 0x3: /* V9 BPr */
2393 {
2394 target = GET_FIELD_SP(insn, 0, 13) |
2395 (GET_FIELD_SP(insn, 20, 21) << 14);
2396 target = sign_extend(target, 16);
2397 target <<= 2;
2398 cpu_src1 = get_src1(insn, cpu_src1);
2399 do_branch_reg(dc, target, insn, cpu_cond, cpu_src1);
2400 goto jmp_insn;
2401 }
2402 case 0x5: /* V9 FBPcc */
2403 {
2404 int cc = GET_FIELD_SP(insn, 20, 21);
2405 if (gen_trap_ifnofpu(dc, cpu_cond))
2406 goto jmp_insn;
2407 target = GET_FIELD_SP(insn, 0, 18);
2408 target = sign_extend(target, 19);
2409 target <<= 2;
2410 do_fbranch(dc, target, insn, cc, cpu_cond);
2411 goto jmp_insn;
2412 }
2413 #else
2414 case 0x7: /* CBN+x */
2415 {
2416 goto ncp_insn;
2417 }
2418 #endif
2419 case 0x2: /* BN+x */
2420 {
2421 target = GET_FIELD(insn, 10, 31);
2422 target = sign_extend(target, 22);
2423 target <<= 2;
2424 do_branch(dc, target, insn, 0, cpu_cond);
2425 goto jmp_insn;
2426 }
2427 case 0x6: /* FBN+x */
2428 {
2429 if (gen_trap_ifnofpu(dc, cpu_cond))
2430 goto jmp_insn;
2431 target = GET_FIELD(insn, 10, 31);
2432 target = sign_extend(target, 22);
2433 target <<= 2;
2434 do_fbranch(dc, target, insn, 0, cpu_cond);
2435 goto jmp_insn;
2436 }
2437 case 0x4: /* SETHI */
2438 if (rd) { // nop
2439 uint32_t value = GET_FIELD(insn, 10, 31);
2440 TCGv r_const;
2441
2442 r_const = tcg_const_tl(value << 10);
2443 gen_movl_TN_reg(rd, r_const);
2444 tcg_temp_free(r_const);
2445 }
2446 break;
2447 case 0x0: /* UNIMPL */
2448 default:
2449 goto illegal_insn;
2450 }
2451 break;
2452 }
2453 break;
2454 case 1: /*CALL*/
2455 {
2456 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2457 TCGv r_const;
2458
2459 r_const = tcg_const_tl(dc->pc);
2460 gen_movl_TN_reg(15, r_const);
2461 tcg_temp_free(r_const);
2462 target += dc->pc;
2463 gen_mov_pc_npc(dc, cpu_cond);
2464 dc->npc = target;
2465 }
2466 goto jmp_insn;
2467 case 2: /* FPU & Logical Operations */
2468 {
2469 unsigned int xop = GET_FIELD(insn, 7, 12);
2470 if (xop == 0x3a) { /* generate trap */
2471 int cond;
2472
2473 cpu_src1 = get_src1(insn, cpu_src1);
2474 if (IS_IMM) {
2475 rs2 = GET_FIELD(insn, 25, 31);
2476 tcg_gen_addi_tl(cpu_dst, cpu_src1, rs2);
2477 } else {
2478 rs2 = GET_FIELD(insn, 27, 31);
2479 if (rs2 != 0) {
2480 gen_movl_reg_TN(rs2, cpu_src2);
2481 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
2482 } else
2483 tcg_gen_mov_tl(cpu_dst, cpu_src1);
2484 }
2485
2486 cond = GET_FIELD(insn, 3, 6);
2487 if (cond == 0x8) { /* Trap Always */
2488 save_state(dc, cpu_cond);
2489 if ((dc->def->features & CPU_FEATURE_HYPV) &&
2490 supervisor(dc))
2491 tcg_gen_andi_tl(cpu_dst, cpu_dst, UA2005_HTRAP_MASK);
2492 else
2493 tcg_gen_andi_tl(cpu_dst, cpu_dst, V8_TRAP_MASK);
2494 tcg_gen_addi_tl(cpu_dst, cpu_dst, TT_TRAP);
2495 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
2496
2497 if (rs2 == 0 &&
2498 dc->def->features & CPU_FEATURE_TA0_SHUTDOWN) {
2499
2500 gen_helper_shutdown();
2501
2502 } else {
2503 gen_helper_raise_exception(cpu_env, cpu_tmp32);
2504 }
2505 } else if (cond != 0) {
2506 TCGv r_cond = tcg_temp_new();
2507 int l1;
2508 #ifdef TARGET_SPARC64
2509 /* V9 icc/xcc */
2510 int cc = GET_FIELD_SP(insn, 11, 12);
2511
2512 save_state(dc, cpu_cond);
2513 if (cc == 0)
2514 gen_cond(r_cond, 0, cond, dc);
2515 else if (cc == 2)
2516 gen_cond(r_cond, 1, cond, dc);
2517 else
2518 goto illegal_insn;
2519 #else
2520 save_state(dc, cpu_cond);
2521 gen_cond(r_cond, 0, cond, dc);
2522 #endif
2523 l1 = gen_new_label();
2524 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
2525
2526 if ((dc->def->features & CPU_FEATURE_HYPV) &&
2527 supervisor(dc))
2528 tcg_gen_andi_tl(cpu_dst, cpu_dst, UA2005_HTRAP_MASK);
2529 else
2530 tcg_gen_andi_tl(cpu_dst, cpu_dst, V8_TRAP_MASK);
2531 tcg_gen_addi_tl(cpu_dst, cpu_dst, TT_TRAP);
2532 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
2533 gen_helper_raise_exception(cpu_env, cpu_tmp32);
2534
2535 gen_set_label(l1);
2536 tcg_temp_free(r_cond);
2537 }
2538 gen_op_next_insn();
2539 tcg_gen_exit_tb(0);
2540 dc->is_br = 1;
2541 goto jmp_insn;
2542 } else if (xop == 0x28) {
2543 rs1 = GET_FIELD(insn, 13, 17);
2544 switch(rs1) {
2545 case 0: /* rdy */
2546 #ifndef TARGET_SPARC64
2547 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2548 manual, rdy on the microSPARC
2549 II */
2550 case 0x0f: /* stbar in the SPARCv8 manual,
2551 rdy on the microSPARC II */
2552 case 0x10 ... 0x1f: /* implementation-dependent in the
2553 SPARCv8 manual, rdy on the
2554 microSPARC II */
2555 /* Read Asr17 */
2556 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2557 TCGv r_const;
2558
2559 /* Read Asr17 for a Leon3 monoprocessor */
2560 r_const = tcg_const_tl((1 << 8)
2561 | (dc->def->nwindows - 1));
2562 gen_movl_TN_reg(rd, r_const);
2563 tcg_temp_free(r_const);
2564 break;
2565 }
2566 #endif
2567 gen_movl_TN_reg(rd, cpu_y);
2568 break;
2569 #ifdef TARGET_SPARC64
2570 case 0x2: /* V9 rdccr */
2571 gen_helper_compute_psr(cpu_env);
2572 gen_helper_rdccr(cpu_dst, cpu_env);
2573 gen_movl_TN_reg(rd, cpu_dst);
2574 break;
2575 case 0x3: /* V9 rdasi */
2576 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2577 gen_movl_TN_reg(rd, cpu_dst);
2578 break;
2579 case 0x4: /* V9 rdtick */
2580 {
2581 TCGv_ptr r_tickptr;
2582
2583 r_tickptr = tcg_temp_new_ptr();
2584 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2585 offsetof(CPUState, tick));
2586 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2587 tcg_temp_free_ptr(r_tickptr);
2588 gen_movl_TN_reg(rd, cpu_dst);
2589 }
2590 break;
2591 case 0x5: /* V9 rdpc */
2592 {
2593 TCGv r_const;
2594
2595 r_const = tcg_const_tl(dc->pc);
2596 gen_movl_TN_reg(rd, r_const);
2597 tcg_temp_free(r_const);
2598 }
2599 break;
2600 case 0x6: /* V9 rdfprs */
2601 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2602 gen_movl_TN_reg(rd, cpu_dst);
2603 break;
2604 case 0xf: /* V9 membar */
2605 break; /* no effect */
2606 case 0x13: /* Graphics Status */
2607 if (gen_trap_ifnofpu(dc, cpu_cond))
2608 goto jmp_insn;
2609 gen_movl_TN_reg(rd, cpu_gsr);
2610 break;
2611 case 0x16: /* Softint */
2612 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2613 gen_movl_TN_reg(rd, cpu_dst);
2614 break;
2615 case 0x17: /* Tick compare */
2616 gen_movl_TN_reg(rd, cpu_tick_cmpr);
2617 break;
2618 case 0x18: /* System tick */
2619 {
2620 TCGv_ptr r_tickptr;
2621
2622 r_tickptr = tcg_temp_new_ptr();
2623 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2624 offsetof(CPUState, stick));
2625 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2626 tcg_temp_free_ptr(r_tickptr);
2627 gen_movl_TN_reg(rd, cpu_dst);
2628 }
2629 break;
2630 case 0x19: /* System tick compare */
2631 gen_movl_TN_reg(rd, cpu_stick_cmpr);
2632 break;
2633 case 0x10: /* Performance Control */
2634 case 0x11: /* Performance Instrumentation Counter */
2635 case 0x12: /* Dispatch Control */
2636 case 0x14: /* Softint set, WO */
2637 case 0x15: /* Softint clear, WO */
2638 #endif
2639 default:
2640 goto illegal_insn;
2641 }
2642 #if !defined(CONFIG_USER_ONLY)
2643 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2644 #ifndef TARGET_SPARC64
2645 if (!supervisor(dc))
2646 goto priv_insn;
2647 gen_helper_compute_psr(cpu_env);
2648 dc->cc_op = CC_OP_FLAGS;
2649 gen_helper_rdpsr(cpu_dst, cpu_env);
2650 #else
2651 CHECK_IU_FEATURE(dc, HYPV);
2652 if (!hypervisor(dc))
2653 goto priv_insn;
2654 rs1 = GET_FIELD(insn, 13, 17);
2655 switch (rs1) {
2656 case 0: // hpstate
2657 // gen_op_rdhpstate();
2658 break;
2659 case 1: // htstate
2660 // gen_op_rdhtstate();
2661 break;
2662 case 3: // hintp
2663 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2664 break;
2665 case 5: // htba
2666 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2667 break;
2668 case 6: // hver
2669 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2670 break;
2671 case 31: // hstick_cmpr
2672 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2673 break;
2674 default:
2675 goto illegal_insn;
2676 }
2677 #endif
2678 gen_movl_TN_reg(rd, cpu_dst);
2679 break;
2680 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2681 if (!supervisor(dc))
2682 goto priv_insn;
2683 #ifdef TARGET_SPARC64
2684 rs1 = GET_FIELD(insn, 13, 17);
2685 switch (rs1) {
2686 case 0: // tpc
2687 {
2688 TCGv_ptr r_tsptr;
2689
2690 r_tsptr = tcg_temp_new_ptr();
2691 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2692 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2693 offsetof(trap_state, tpc));
2694 tcg_temp_free_ptr(r_tsptr);
2695 }
2696 break;
2697 case 1: // tnpc
2698 {
2699 TCGv_ptr r_tsptr;
2700
2701 r_tsptr = tcg_temp_new_ptr();
2702 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2703 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2704 offsetof(trap_state, tnpc));
2705 tcg_temp_free_ptr(r_tsptr);
2706 }
2707 break;
2708 case 2: // tstate
2709 {
2710 TCGv_ptr r_tsptr;
2711
2712 r_tsptr = tcg_temp_new_ptr();
2713 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2714 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2715 offsetof(trap_state, tstate));
2716 tcg_temp_free_ptr(r_tsptr);
2717 }
2718 break;
2719 case 3: // tt
2720 {
2721 TCGv_ptr r_tsptr;
2722
2723 r_tsptr = tcg_temp_new_ptr();
2724 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2725 tcg_gen_ld_i32(cpu_tmp32, r_tsptr,
2726 offsetof(trap_state, tt));
2727 tcg_temp_free_ptr(r_tsptr);
2728 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2729 }
2730 break;
2731 case 4: // tick
2732 {
2733 TCGv_ptr r_tickptr;
2734
2735 r_tickptr = tcg_temp_new_ptr();
2736 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2737 offsetof(CPUState, tick));
2738 gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
2739 gen_movl_TN_reg(rd, cpu_tmp0);
2740 tcg_temp_free_ptr(r_tickptr);
2741 }
2742 break;
2743 case 5: // tba
2744 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2745 break;
2746 case 6: // pstate
2747 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2748 offsetof(CPUSPARCState, pstate));
2749 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2750 break;
2751 case 7: // tl
2752 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2753 offsetof(CPUSPARCState, tl));
2754 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2755 break;
2756 case 8: // pil
2757 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2758 offsetof(CPUSPARCState, psrpil));
2759 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2760 break;
2761 case 9: // cwp
2762 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2763 break;
2764 case 10: // cansave
2765 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2766 offsetof(CPUSPARCState, cansave));
2767 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2768 break;
2769 case 11: // canrestore
2770 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2771 offsetof(CPUSPARCState, canrestore));
2772 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2773 break;
2774 case 12: // cleanwin
2775 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2776 offsetof(CPUSPARCState, cleanwin));
2777 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2778 break;
2779 case 13: // otherwin
2780 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2781 offsetof(CPUSPARCState, otherwin));
2782 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2783 break;
2784 case 14: // wstate
2785 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2786 offsetof(CPUSPARCState, wstate));
2787 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2788 break;
2789 case 16: // UA2005 gl
2790 CHECK_IU_FEATURE(dc, GL);
2791 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2792 offsetof(CPUSPARCState, gl));
2793 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2794 break;
2795 case 26: // UA2005 strand status
2796 CHECK_IU_FEATURE(dc, HYPV);
2797 if (!hypervisor(dc))
2798 goto priv_insn;
2799 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2800 break;
2801 case 31: // ver
2802 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2803 break;
2804 case 15: // fq
2805 default:
2806 goto illegal_insn;
2807 }
2808 #else
2809 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2810 #endif
2811 gen_movl_TN_reg(rd, cpu_tmp0);
2812 break;
2813 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2814 #ifdef TARGET_SPARC64
2815 save_state(dc, cpu_cond);
2816 gen_helper_flushw(cpu_env);
2817 #else
2818 if (!supervisor(dc))
2819 goto priv_insn;
2820 gen_movl_TN_reg(rd, cpu_tbr);
2821 #endif
2822 break;
2823 #endif
2824 } else if (xop == 0x34) { /* FPU Operations */
2825 if (gen_trap_ifnofpu(dc, cpu_cond))
2826 goto jmp_insn;
2827 gen_op_clear_ieee_excp_and_FTT();
2828 rs1 = GET_FIELD(insn, 13, 17);
2829 rs2 = GET_FIELD(insn, 27, 31);
2830 xop = GET_FIELD(insn, 18, 26);
2831 save_state(dc, cpu_cond);
2832 switch (xop) {
2833 case 0x1: /* fmovs */
2834 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2835 gen_store_fpr_F(dc, rd, cpu_src1_32);
2836 break;
2837 case 0x5: /* fnegs */
2838 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2839 break;
2840 case 0x9: /* fabss */
2841 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2842 break;
2843 case 0x29: /* fsqrts */
2844 CHECK_FPU_FEATURE(dc, FSQRT);
2845 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2846 break;
2847 case 0x2a: /* fsqrtd */
2848 CHECK_FPU_FEATURE(dc, FSQRT);
2849 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2850 break;
2851 case 0x2b: /* fsqrtq */
2852 CHECK_FPU_FEATURE(dc, FLOAT128);
2853 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
2854 break;
2855 case 0x41: /* fadds */
2856 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
2857 break;
2858 case 0x42: /* faddd */
2859 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
2860 break;
2861 case 0x43: /* faddq */
2862 CHECK_FPU_FEATURE(dc, FLOAT128);
2863 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
2864 break;
2865 case 0x45: /* fsubs */
2866 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
2867 break;
2868 case 0x46: /* fsubd */
2869 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
2870 break;
2871 case 0x47: /* fsubq */
2872 CHECK_FPU_FEATURE(dc, FLOAT128);
2873 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
2874 break;
2875 case 0x49: /* fmuls */
2876 CHECK_FPU_FEATURE(dc, FMUL);
2877 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
2878 break;
2879 case 0x4a: /* fmuld */
2880 CHECK_FPU_FEATURE(dc, FMUL);
2881 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
2882 break;
2883 case 0x4b: /* fmulq */
2884 CHECK_FPU_FEATURE(dc, FLOAT128);
2885 CHECK_FPU_FEATURE(dc, FMUL);
2886 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
2887 break;
2888 case 0x4d: /* fdivs */
2889 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
2890 break;
2891 case 0x4e: /* fdivd */
2892 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
2893 break;
2894 case 0x4f: /* fdivq */
2895 CHECK_FPU_FEATURE(dc, FLOAT128);
2896 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
2897 break;
2898 case 0x69: /* fsmuld */
2899 CHECK_FPU_FEATURE(dc, FSMULD);
2900 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
2901 break;
2902 case 0x6e: /* fdmulq */
2903 CHECK_FPU_FEATURE(dc, FLOAT128);
2904 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
2905 break;
2906 case 0xc4: /* fitos */
2907 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
2908 break;
2909 case 0xc6: /* fdtos */
2910 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
2911 break;
2912 case 0xc7: /* fqtos */
2913 CHECK_FPU_FEATURE(dc, FLOAT128);
2914 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
2915 break;
2916 case 0xc8: /* fitod */
2917 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
2918 break;
2919 case 0xc9: /* fstod */
2920 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
2921 break;
2922 case 0xcb: /* fqtod */
2923 CHECK_FPU_FEATURE(dc, FLOAT128);
2924 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
2925 break;
2926 case 0xcc: /* fitoq */
2927 CHECK_FPU_FEATURE(dc, FLOAT128);
2928 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
2929 break;
2930 case 0xcd: /* fstoq */
2931 CHECK_FPU_FEATURE(dc, FLOAT128);
2932 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
2933 break;
2934 case 0xce: /* fdtoq */
2935 CHECK_FPU_FEATURE(dc, FLOAT128);
2936 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
2937 break;
2938 case 0xd1: /* fstoi */
2939 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
2940 break;
2941 case 0xd2: /* fdtoi */
2942 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
2943 break;
2944 case 0xd3: /* fqtoi */
2945 CHECK_FPU_FEATURE(dc, FLOAT128);
2946 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
2947 break;
2948 #ifdef TARGET_SPARC64
2949 case 0x2: /* V9 fmovd */
2950 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
2951 gen_store_fpr_D(dc, rd, cpu_src1_64);
2952 break;
2953 case 0x3: /* V9 fmovq */
2954 CHECK_FPU_FEATURE(dc, FLOAT128);
2955 gen_move_Q(rd, rs2);
2956 break;
2957 case 0x6: /* V9 fnegd */
2958 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
2959 break;
2960 case 0x7: /* V9 fnegq */
2961 CHECK_FPU_FEATURE(dc, FLOAT128);
2962 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
2963 break;
2964 case 0xa: /* V9 fabsd */
2965 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
2966 break;
2967 case 0xb: /* V9 fabsq */
2968 CHECK_FPU_FEATURE(dc, FLOAT128);
2969 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
2970 break;
2971 case 0x81: /* V9 fstox */
2972 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
2973 break;
2974 case 0x82: /* V9 fdtox */
2975 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
2976 break;
2977 case 0x83: /* V9 fqtox */
2978 CHECK_FPU_FEATURE(dc, FLOAT128);
2979 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
2980 break;
2981 case 0x84: /* V9 fxtos */
2982 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
2983 break;
2984 case 0x88: /* V9 fxtod */
2985 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
2986 break;
2987 case 0x8c: /* V9 fxtoq */
2988 CHECK_FPU_FEATURE(dc, FLOAT128);
2989 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
2990 break;
2991 #endif
2992 default:
2993 goto illegal_insn;
2994 }
2995 } else if (xop == 0x35) { /* FPU Operations */
2996 #ifdef TARGET_SPARC64
2997 int cond;
2998 #endif
2999 if (gen_trap_ifnofpu(dc, cpu_cond))
3000 goto jmp_insn;
3001 gen_op_clear_ieee_excp_and_FTT();
3002 rs1 = GET_FIELD(insn, 13, 17);
3003 rs2 = GET_FIELD(insn, 27, 31);
3004 xop = GET_FIELD(insn, 18, 26);
3005 save_state(dc, cpu_cond);
3006 #ifdef TARGET_SPARC64
3007 if ((xop & 0x11f) == 0x005) { // V9 fmovsr
3008 int l1;
3009
3010 l1 = gen_new_label();
3011 cond = GET_FIELD_SP(insn, 14, 17);
3012 cpu_src1 = get_src1(insn, cpu_src1);
3013 tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], cpu_src1,
3014 0, l1);
3015 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3016 gen_store_fpr_F(dc, rd, cpu_src1_32);
3017 gen_set_label(l1);
3018 break;
3019 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3020 int l1;
3021
3022 l1 = gen_new_label();
3023 cond = GET_FIELD_SP(insn, 14, 17);
3024 cpu_src1 = get_src1(insn, cpu_src1);
3025 tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], cpu_src1,
3026 0, l1);
3027 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3028 gen_store_fpr_D(dc, rd, cpu_src1_64);
3029 gen_set_label(l1);
3030 break;
3031 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3032 int l1;
3033
3034 CHECK_FPU_FEATURE(dc, FLOAT128);
3035 l1 = gen_new_label();
3036 cond = GET_FIELD_SP(insn, 14, 17);
3037 cpu_src1 = get_src1(insn, cpu_src1);
3038 tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], cpu_src1,
3039 0, l1);
3040 gen_move_Q(rd, rs2);
3041 gen_set_label(l1);
3042 break;
3043 }
3044 #endif
3045 switch (xop) {
3046 #ifdef TARGET_SPARC64
3047 #define FMOVSCC(fcc) \
3048 { \
3049 TCGv r_cond; \
3050 int l1; \
3051 \
3052 l1 = gen_new_label(); \
3053 r_cond = tcg_temp_new(); \
3054 cond = GET_FIELD_SP(insn, 14, 17); \
3055 gen_fcond(r_cond, fcc, cond); \
3056 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \
3057 0, l1); \
3058 cpu_src1_32 = gen_load_fpr_F(dc, rs2); \
3059 gen_store_fpr_F(dc, rd, cpu_src1_32); \
3060 gen_set_label(l1); \
3061 tcg_temp_free(r_cond); \
3062 }
3063 #define FMOVDCC(fcc) \
3064 { \
3065 TCGv r_cond; \
3066 int l1; \
3067 \
3068 l1 = gen_new_label(); \
3069 r_cond = tcg_temp_new(); \
3070 cond = GET_FIELD_SP(insn, 14, 17); \
3071 gen_fcond(r_cond, fcc, cond); \
3072 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \
3073 0, l1); \
3074 cpu_src1_64 = gen_load_fpr_D(dc, rs2); \
3075 gen_store_fpr_D(dc, rd, cpu_src1_64); \
3076 gen_set_label(l1); \
3077 tcg_temp_free(r_cond); \
3078 }
3079 #define FMOVQCC(fcc) \
3080 { \
3081 TCGv r_cond; \
3082 int l1; \
3083 \
3084 l1 = gen_new_label(); \
3085 r_cond = tcg_temp_new(); \
3086 cond = GET_FIELD_SP(insn, 14, 17); \
3087 gen_fcond(r_cond, fcc, cond); \
3088 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \
3089 0, l1); \
3090 gen_move_Q(rd, rs2); \
3091 gen_set_label(l1); \
3092 tcg_temp_free(r_cond); \
3093 }
3094 case 0x001: /* V9 fmovscc %fcc0 */
3095 FMOVSCC(0);
3096 break;
3097 case 0x002: /* V9 fmovdcc %fcc0 */
3098 FMOVDCC(0);
3099 break;
3100 case 0x003: /* V9 fmovqcc %fcc0 */
3101 CHECK_FPU_FEATURE(dc, FLOAT128);
3102 FMOVQCC(0);
3103 break;
3104 case 0x041: /* V9 fmovscc %fcc1 */
3105 FMOVSCC(1);
3106 break;
3107 case 0x042: /* V9 fmovdcc %fcc1 */
3108 FMOVDCC(1);
3109 break;
3110 case 0x043: /* V9 fmovqcc %fcc1 */
3111 CHECK_FPU_FEATURE(dc, FLOAT128);
3112 FMOVQCC(1);
3113 break;
3114 case 0x081: /* V9 fmovscc %fcc2 */
3115 FMOVSCC(2);
3116 break;
3117 case 0x082: /* V9 fmovdcc %fcc2 */
3118 FMOVDCC(2);
3119 break;
3120 case 0x083: /* V9 fmovqcc %fcc2 */
3121 CHECK_FPU_FEATURE(dc, FLOAT128);
3122 FMOVQCC(2);
3123 break;
3124 case 0x0c1: /* V9 fmovscc %fcc3 */
3125 FMOVSCC(3);
3126 break;
3127 case 0x0c2: /* V9 fmovdcc %fcc3 */
3128 FMOVDCC(3);
3129 break;
3130 case 0x0c3: /* V9 fmovqcc %fcc3 */
3131 CHECK_FPU_FEATURE(dc, FLOAT128);
3132 FMOVQCC(3);
3133 break;
3134 #undef FMOVSCC
3135 #undef FMOVDCC
3136 #undef FMOVQCC
3137 #define FMOVSCC(icc) \
3138 { \
3139 TCGv r_cond; \
3140 int l1; \
3141 \
3142 l1 = gen_new_label(); \
3143 r_cond = tcg_temp_new(); \
3144 cond = GET_FIELD_SP(insn, 14, 17); \
3145 gen_cond(r_cond, icc, cond, dc); \
3146 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \
3147 0, l1); \
3148 cpu_src1_32 = gen_load_fpr_F(dc, rs2); \
3149 gen_store_fpr_F(dc, rd, cpu_src1_32); \
3150 gen_set_label(l1); \
3151 tcg_temp_free(r_cond); \
3152 }
3153 #define FMOVDCC(icc) \
3154 { \
3155 TCGv r_cond; \
3156 int l1; \
3157 \
3158 l1 = gen_new_label(); \
3159 r_cond = tcg_temp_new(); \
3160 cond = GET_FIELD_SP(insn, 14, 17); \
3161 gen_cond(r_cond, icc, cond, dc); \
3162 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \
3163 0, l1); \
3164 cpu_src1_64 = gen_load_fpr_D(dc, rs2); \
3165 gen_store_fpr_D(dc, rd, cpu_src1_64); \
3166 gen_update_fprs_dirty(DFPREG(rd)); \
3167 gen_set_label(l1); \
3168 tcg_temp_free(r_cond); \
3169 }
3170 #define FMOVQCC(icc) \
3171 { \
3172 TCGv r_cond; \
3173 int l1; \
3174 \
3175 l1 = gen_new_label(); \
3176 r_cond = tcg_temp_new(); \
3177 cond = GET_FIELD_SP(insn, 14, 17); \
3178 gen_cond(r_cond, icc, cond, dc); \
3179 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \
3180 0, l1); \
3181 gen_move_Q(rd, rs2); \
3182 gen_set_label(l1); \
3183 tcg_temp_free(r_cond); \
3184 }
3185
3186 case 0x101: /* V9 fmovscc %icc */
3187 FMOVSCC(0);
3188 break;
3189 case 0x102: /* V9 fmovdcc %icc */
3190 FMOVDCC(0);
3191 break;
3192 case 0x103: /* V9 fmovqcc %icc */
3193 CHECK_FPU_FEATURE(dc, FLOAT128);
3194 FMOVQCC(0);
3195 break;
3196 case 0x181: /* V9 fmovscc %xcc */
3197 FMOVSCC(1);
3198 break;
3199 case 0x182: /* V9 fmovdcc %xcc */
3200 FMOVDCC(1);
3201 break;
3202 case 0x183: /* V9 fmovqcc %xcc */
3203 CHECK_FPU_FEATURE(dc, FLOAT128);
3204 FMOVQCC(1);
3205 break;
3206 #undef FMOVSCC
3207 #undef FMOVDCC
3208 #undef FMOVQCC
3209 #endif
3210 case 0x51: /* fcmps, V9 %fcc */
3211 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3212 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3213 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3214 break;
3215 case 0x52: /* fcmpd, V9 %fcc */
3216 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3217 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3218 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3219 break;
3220 case 0x53: /* fcmpq, V9 %fcc */
3221 CHECK_FPU_FEATURE(dc, FLOAT128);
3222 gen_op_load_fpr_QT0(QFPREG(rs1));
3223 gen_op_load_fpr_QT1(QFPREG(rs2));
3224 gen_op_fcmpq(rd & 3);
3225 break;
3226 case 0x55: /* fcmpes, V9 %fcc */
3227 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3228 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3229 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3230 break;