4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
38 #define DYNAMIC_PC 1 /* dynamic pc value */
39 #define JUMP_PC 2 /* dynamic pc value which takes only two values
40 according to jump_pc[T2] */
42 /* global register indexes */
43 static TCGv_env cpu_env
;
44 static TCGv_ptr cpu_regwptr
;
45 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
46 static TCGv_i32 cpu_cc_op
;
47 static TCGv_i32 cpu_psr
;
48 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
49 static TCGv cpu_regs
[32];
51 #ifndef CONFIG_USER_ONLY
56 static TCGv_i32 cpu_xcc
, cpu_asi
, cpu_fprs
;
58 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
59 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
60 static TCGv_i32 cpu_softint
;
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
67 #include "exec/gen-icount.h"
69 typedef struct DisasContext
{
70 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
71 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
76 int address_mask_32bit
;
78 uint32_t cc_op
; /* current CC operation */
79 struct TranslationBlock
*tb
;
94 // This function uses non-native bit order
95 #define GET_FIELD(X, FROM, TO) \
96 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
98 // This function uses the order in the manuals, i.e. bit 0 is 2^0
99 #define GET_FIELD_SP(X, FROM, TO) \
100 GET_FIELD(X, 31 - (TO), 31 - (FROM))
102 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
103 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
105 #ifdef TARGET_SPARC64
106 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
107 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
109 #define DFPREG(r) (r & 0x1e)
110 #define QFPREG(r) (r & 0x1c)
113 #define UA2005_HTRAP_MASK 0xff
114 #define V8_TRAP_MASK 0x7f
116 static int sign_extend(int x
, int len
)
119 return (x
<< len
) >> len
;
122 #define IS_IMM (insn & (1<<13))
124 static inline TCGv_i32
get_temp_i32(DisasContext
*dc
)
127 assert(dc
->n_t32
< ARRAY_SIZE(dc
->t32
));
128 dc
->t32
[dc
->n_t32
++] = t
= tcg_temp_new_i32();
132 static inline TCGv
get_temp_tl(DisasContext
*dc
)
135 assert(dc
->n_ttl
< ARRAY_SIZE(dc
->ttl
));
136 dc
->ttl
[dc
->n_ttl
++] = t
= tcg_temp_new();
140 static inline void gen_update_fprs_dirty(int rd
)
142 #if defined(TARGET_SPARC64)
143 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, (rd
< 32) ?
1 : 2);
147 /* floating point registers moves */
148 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
150 #if TCG_TARGET_REG_BITS == 32
152 return TCGV_LOW(cpu_fpr
[src
/ 2]);
154 return TCGV_HIGH(cpu_fpr
[src
/ 2]);
158 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr
[src
/ 2]));
160 TCGv_i32 ret
= get_temp_i32(dc
);
161 TCGv_i64 t
= tcg_temp_new_i64();
163 tcg_gen_shri_i64(t
, cpu_fpr
[src
/ 2], 32);
164 tcg_gen_extrl_i64_i32(ret
, t
);
165 tcg_temp_free_i64(t
);
172 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
174 #if TCG_TARGET_REG_BITS == 32
176 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr
[dst
/ 2]), v
);
178 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr
[dst
/ 2]), v
);
181 TCGv_i64 t
= MAKE_TCGV_I64(GET_TCGV_I32(v
));
182 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
183 (dst
& 1 ?
0 : 32), 32);
185 gen_update_fprs_dirty(dst
);
188 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
190 return get_temp_i32(dc
);
193 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
196 return cpu_fpr
[src
/ 2];
199 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
202 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
203 gen_update_fprs_dirty(dst
);
206 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
208 return cpu_fpr
[DFPREG(dst
) / 2];
211 static void gen_op_load_fpr_QT0(unsigned int src
)
213 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
214 offsetof(CPU_QuadU
, ll
.upper
));
215 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
216 offsetof(CPU_QuadU
, ll
.lower
));
219 static void gen_op_load_fpr_QT1(unsigned int src
)
221 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
222 offsetof(CPU_QuadU
, ll
.upper
));
223 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
224 offsetof(CPU_QuadU
, ll
.lower
));
227 static void gen_op_store_QT0_fpr(unsigned int dst
)
229 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
230 offsetof(CPU_QuadU
, ll
.upper
));
231 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
232 offsetof(CPU_QuadU
, ll
.lower
));
235 #ifdef TARGET_SPARC64
236 static void gen_move_Q(unsigned int rd
, unsigned int rs
)
241 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
242 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
243 gen_update_fprs_dirty(rd
);
248 #ifdef CONFIG_USER_ONLY
249 #define supervisor(dc) 0
250 #ifdef TARGET_SPARC64
251 #define hypervisor(dc) 0
254 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
255 #ifdef TARGET_SPARC64
256 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
261 #ifdef TARGET_SPARC64
263 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
265 #define AM_CHECK(dc) (1)
269 static inline void gen_address_mask(DisasContext
*dc
, TCGv addr
)
271 #ifdef TARGET_SPARC64
273 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
277 static inline TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
281 return cpu_regs
[reg
];
283 TCGv t
= get_temp_tl(dc
);
284 tcg_gen_movi_tl(t
, 0);
289 static inline void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
293 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
297 static inline TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
301 return cpu_regs
[reg
];
303 return get_temp_tl(dc
);
307 static inline bool use_goto_tb(DisasContext
*s
, target_ulong pc
,
310 if (unlikely(s
->singlestep
)) {
314 #ifndef CONFIG_USER_ONLY
315 return (pc
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) &&
316 (npc
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
);
322 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
,
323 target_ulong pc
, target_ulong npc
)
325 if (use_goto_tb(s
, pc
, npc
)) {
326 /* jump to same page: we can use a direct jump */
327 tcg_gen_goto_tb(tb_num
);
328 tcg_gen_movi_tl(cpu_pc
, pc
);
329 tcg_gen_movi_tl(cpu_npc
, npc
);
330 tcg_gen_exit_tb((uintptr_t)s
->tb
+ tb_num
);
332 /* jump to another page: currently not optimized */
333 tcg_gen_movi_tl(cpu_pc
, pc
);
334 tcg_gen_movi_tl(cpu_npc
, npc
);
340 static inline void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
342 tcg_gen_extu_i32_tl(reg
, src
);
343 tcg_gen_shri_tl(reg
, reg
, PSR_NEG_SHIFT
);
344 tcg_gen_andi_tl(reg
, reg
, 0x1);
347 static inline void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
349 tcg_gen_extu_i32_tl(reg
, src
);
350 tcg_gen_shri_tl(reg
, reg
, PSR_ZERO_SHIFT
);
351 tcg_gen_andi_tl(reg
, reg
, 0x1);
354 static inline void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
356 tcg_gen_extu_i32_tl(reg
, src
);
357 tcg_gen_shri_tl(reg
, reg
, PSR_OVF_SHIFT
);
358 tcg_gen_andi_tl(reg
, reg
, 0x1);
361 static inline void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
363 tcg_gen_extu_i32_tl(reg
, src
);
364 tcg_gen_shri_tl(reg
, reg
, PSR_CARRY_SHIFT
);
365 tcg_gen_andi_tl(reg
, reg
, 0x1);
368 static inline void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
370 tcg_gen_mov_tl(cpu_cc_src
, src1
);
371 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
372 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
373 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
376 static TCGv_i32
gen_add32_carry32(void)
378 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
380 /* Carry is computed from a previous add: (dst < src) */
381 #if TARGET_LONG_BITS == 64
382 cc_src1_32
= tcg_temp_new_i32();
383 cc_src2_32
= tcg_temp_new_i32();
384 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
385 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
387 cc_src1_32
= cpu_cc_dst
;
388 cc_src2_32
= cpu_cc_src
;
391 carry_32
= tcg_temp_new_i32();
392 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
394 #if TARGET_LONG_BITS == 64
395 tcg_temp_free_i32(cc_src1_32
);
396 tcg_temp_free_i32(cc_src2_32
);
402 static TCGv_i32
gen_sub32_carry32(void)
404 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
406 /* Carry is computed from a previous borrow: (src1 < src2) */
407 #if TARGET_LONG_BITS == 64
408 cc_src1_32
= tcg_temp_new_i32();
409 cc_src2_32
= tcg_temp_new_i32();
410 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
411 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
413 cc_src1_32
= cpu_cc_src
;
414 cc_src2_32
= cpu_cc_src2
;
417 carry_32
= tcg_temp_new_i32();
418 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
420 #if TARGET_LONG_BITS == 64
421 tcg_temp_free_i32(cc_src1_32
);
422 tcg_temp_free_i32(cc_src2_32
);
428 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
429 TCGv src2
, int update_cc
)
437 /* Carry is known to be zero. Fall back to plain ADD. */
439 gen_op_add_cc(dst
, src1
, src2
);
441 tcg_gen_add_tl(dst
, src1
, src2
);
448 if (TARGET_LONG_BITS
== 32) {
449 /* We can re-use the host's hardware carry generation by using
450 an ADD2 opcode. We discard the low part of the output.
451 Ideally we'd combine this operation with the add that
452 generated the carry in the first place. */
453 carry
= tcg_temp_new();
454 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
455 tcg_temp_free(carry
);
458 carry_32
= gen_add32_carry32();
464 carry_32
= gen_sub32_carry32();
468 /* We need external help to produce the carry. */
469 carry_32
= tcg_temp_new_i32();
470 gen_helper_compute_C_icc(carry_32
, cpu_env
);
474 #if TARGET_LONG_BITS == 64
475 carry
= tcg_temp_new();
476 tcg_gen_extu_i32_i64(carry
, carry_32
);
481 tcg_gen_add_tl(dst
, src1
, src2
);
482 tcg_gen_add_tl(dst
, dst
, carry
);
484 tcg_temp_free_i32(carry_32
);
485 #if TARGET_LONG_BITS == 64
486 tcg_temp_free(carry
);
491 tcg_gen_mov_tl(cpu_cc_src
, src1
);
492 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
493 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
494 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
495 dc
->cc_op
= CC_OP_ADDX
;
499 static inline void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
501 tcg_gen_mov_tl(cpu_cc_src
, src1
);
502 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
503 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
504 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
507 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
508 TCGv src2
, int update_cc
)
516 /* Carry is known to be zero. Fall back to plain SUB. */
518 gen_op_sub_cc(dst
, src1
, src2
);
520 tcg_gen_sub_tl(dst
, src1
, src2
);
527 carry_32
= gen_add32_carry32();
533 if (TARGET_LONG_BITS
== 32) {
534 /* We can re-use the host's hardware carry generation by using
535 a SUB2 opcode. We discard the low part of the output.
536 Ideally we'd combine this operation with the add that
537 generated the carry in the first place. */
538 carry
= tcg_temp_new();
539 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
540 tcg_temp_free(carry
);
543 carry_32
= gen_sub32_carry32();
547 /* We need external help to produce the carry. */
548 carry_32
= tcg_temp_new_i32();
549 gen_helper_compute_C_icc(carry_32
, cpu_env
);
553 #if TARGET_LONG_BITS == 64
554 carry
= tcg_temp_new();
555 tcg_gen_extu_i32_i64(carry
, carry_32
);
560 tcg_gen_sub_tl(dst
, src1
, src2
);
561 tcg_gen_sub_tl(dst
, dst
, carry
);
563 tcg_temp_free_i32(carry_32
);
564 #if TARGET_LONG_BITS == 64
565 tcg_temp_free(carry
);
570 tcg_gen_mov_tl(cpu_cc_src
, src1
);
571 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
572 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
573 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
574 dc
->cc_op
= CC_OP_SUBX
;
578 static inline void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
580 TCGv r_temp
, zero
, t0
;
582 r_temp
= tcg_temp_new();
589 zero
= tcg_const_tl(0);
590 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
591 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
592 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
593 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
598 // env->y = (b2 << 31) | (env->y >> 1);
599 tcg_gen_andi_tl(r_temp
, cpu_cc_src
, 0x1);
600 tcg_gen_shli_tl(r_temp
, r_temp
, 31);
601 tcg_gen_shri_tl(t0
, cpu_y
, 1);
602 tcg_gen_andi_tl(t0
, t0
, 0x7fffffff);
603 tcg_gen_or_tl(t0
, t0
, r_temp
);
604 tcg_gen_andi_tl(cpu_y
, t0
, 0xffffffff);
607 gen_mov_reg_N(t0
, cpu_psr
);
608 gen_mov_reg_V(r_temp
, cpu_psr
);
609 tcg_gen_xor_tl(t0
, t0
, r_temp
);
610 tcg_temp_free(r_temp
);
612 // T0 = (b1 << 31) | (T0 >> 1);
614 tcg_gen_shli_tl(t0
, t0
, 31);
615 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
616 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
619 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
621 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
624 static inline void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
626 #if TARGET_LONG_BITS == 32
628 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
630 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
633 TCGv t0
= tcg_temp_new_i64();
634 TCGv t1
= tcg_temp_new_i64();
637 tcg_gen_ext32s_i64(t0
, src1
);
638 tcg_gen_ext32s_i64(t1
, src2
);
640 tcg_gen_ext32u_i64(t0
, src1
);
641 tcg_gen_ext32u_i64(t1
, src2
);
644 tcg_gen_mul_i64(dst
, t0
, t1
);
648 tcg_gen_shri_i64(cpu_y
, dst
, 32);
652 static inline void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
654 /* zero-extend truncated operands before multiplication */
655 gen_op_multiply(dst
, src1
, src2
, 0);
658 static inline void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
660 /* sign-extend truncated operands before multiplication */
661 gen_op_multiply(dst
, src1
, src2
, 1);
665 static inline void gen_op_eval_ba(TCGv dst
)
667 tcg_gen_movi_tl(dst
, 1);
671 static inline void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
673 gen_mov_reg_Z(dst
, src
);
677 static inline void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
679 TCGv t0
= tcg_temp_new();
680 gen_mov_reg_N(t0
, src
);
681 gen_mov_reg_V(dst
, src
);
682 tcg_gen_xor_tl(dst
, dst
, t0
);
683 gen_mov_reg_Z(t0
, src
);
684 tcg_gen_or_tl(dst
, dst
, t0
);
689 static inline void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
691 TCGv t0
= tcg_temp_new();
692 gen_mov_reg_V(t0
, src
);
693 gen_mov_reg_N(dst
, src
);
694 tcg_gen_xor_tl(dst
, dst
, t0
);
699 static inline void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
701 TCGv t0
= tcg_temp_new();
702 gen_mov_reg_Z(t0
, src
);
703 gen_mov_reg_C(dst
, src
);
704 tcg_gen_or_tl(dst
, dst
, t0
);
709 static inline void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
711 gen_mov_reg_C(dst
, src
);
715 static inline void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
717 gen_mov_reg_V(dst
, src
);
721 static inline void gen_op_eval_bn(TCGv dst
)
723 tcg_gen_movi_tl(dst
, 0);
727 static inline void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
729 gen_mov_reg_N(dst
, src
);
733 static inline void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
735 gen_mov_reg_Z(dst
, src
);
736 tcg_gen_xori_tl(dst
, dst
, 0x1);
740 static inline void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
742 gen_op_eval_ble(dst
, src
);
743 tcg_gen_xori_tl(dst
, dst
, 0x1);
747 static inline void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
749 gen_op_eval_bl(dst
, src
);
750 tcg_gen_xori_tl(dst
, dst
, 0x1);
754 static inline void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
756 gen_op_eval_bleu(dst
, src
);
757 tcg_gen_xori_tl(dst
, dst
, 0x1);
761 static inline void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
763 gen_mov_reg_C(dst
, src
);
764 tcg_gen_xori_tl(dst
, dst
, 0x1);
768 static inline void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
770 gen_mov_reg_N(dst
, src
);
771 tcg_gen_xori_tl(dst
, dst
, 0x1);
775 static inline void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
777 gen_mov_reg_V(dst
, src
);
778 tcg_gen_xori_tl(dst
, dst
, 0x1);
782 FPSR bit field FCC1 | FCC0:
788 static inline void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
789 unsigned int fcc_offset
)
791 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
792 tcg_gen_andi_tl(reg
, reg
, 0x1);
795 static inline void gen_mov_reg_FCC1(TCGv reg
, TCGv src
,
796 unsigned int fcc_offset
)
798 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
799 tcg_gen_andi_tl(reg
, reg
, 0x1);
803 static inline void gen_op_eval_fbne(TCGv dst
, TCGv src
,
804 unsigned int fcc_offset
)
806 TCGv t0
= tcg_temp_new();
807 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
808 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
809 tcg_gen_or_tl(dst
, dst
, t0
);
813 // 1 or 2: FCC0 ^ FCC1
814 static inline void gen_op_eval_fblg(TCGv dst
, TCGv src
,
815 unsigned int fcc_offset
)
817 TCGv t0
= tcg_temp_new();
818 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
819 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
820 tcg_gen_xor_tl(dst
, dst
, t0
);
825 static inline void gen_op_eval_fbul(TCGv dst
, TCGv src
,
826 unsigned int fcc_offset
)
828 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
832 static inline void gen_op_eval_fbl(TCGv dst
, TCGv src
,
833 unsigned int fcc_offset
)
835 TCGv t0
= tcg_temp_new();
836 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
837 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
838 tcg_gen_andc_tl(dst
, dst
, t0
);
843 static inline void gen_op_eval_fbug(TCGv dst
, TCGv src
,
844 unsigned int fcc_offset
)
846 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
850 static inline void gen_op_eval_fbg(TCGv dst
, TCGv src
,
851 unsigned int fcc_offset
)
853 TCGv t0
= tcg_temp_new();
854 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
855 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
856 tcg_gen_andc_tl(dst
, t0
, dst
);
861 static inline void gen_op_eval_fbu(TCGv dst
, TCGv src
,
862 unsigned int fcc_offset
)
864 TCGv t0
= tcg_temp_new();
865 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
866 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
867 tcg_gen_and_tl(dst
, dst
, t0
);
872 static inline void gen_op_eval_fbe(TCGv dst
, TCGv src
,
873 unsigned int fcc_offset
)
875 TCGv t0
= tcg_temp_new();
876 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
877 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
878 tcg_gen_or_tl(dst
, dst
, t0
);
879 tcg_gen_xori_tl(dst
, dst
, 0x1);
883 // 0 or 3: !(FCC0 ^ FCC1)
884 static inline void gen_op_eval_fbue(TCGv dst
, TCGv src
,
885 unsigned int fcc_offset
)
887 TCGv t0
= tcg_temp_new();
888 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
889 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
890 tcg_gen_xor_tl(dst
, dst
, t0
);
891 tcg_gen_xori_tl(dst
, dst
, 0x1);
896 static inline void gen_op_eval_fbge(TCGv dst
, TCGv src
,
897 unsigned int fcc_offset
)
899 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
900 tcg_gen_xori_tl(dst
, dst
, 0x1);
903 // !1: !(FCC0 & !FCC1)
904 static inline void gen_op_eval_fbuge(TCGv dst
, TCGv src
,
905 unsigned int fcc_offset
)
907 TCGv t0
= tcg_temp_new();
908 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
909 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
910 tcg_gen_andc_tl(dst
, dst
, t0
);
911 tcg_gen_xori_tl(dst
, dst
, 0x1);
916 static inline void gen_op_eval_fble(TCGv dst
, TCGv src
,
917 unsigned int fcc_offset
)
919 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
920 tcg_gen_xori_tl(dst
, dst
, 0x1);
923 // !2: !(!FCC0 & FCC1)
924 static inline void gen_op_eval_fbule(TCGv dst
, TCGv src
,
925 unsigned int fcc_offset
)
927 TCGv t0
= tcg_temp_new();
928 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
929 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
930 tcg_gen_andc_tl(dst
, t0
, dst
);
931 tcg_gen_xori_tl(dst
, dst
, 0x1);
935 // !3: !(FCC0 & FCC1)
936 static inline void gen_op_eval_fbo(TCGv dst
, TCGv src
,
937 unsigned int fcc_offset
)
939 TCGv t0
= tcg_temp_new();
940 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
941 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
942 tcg_gen_and_tl(dst
, dst
, t0
);
943 tcg_gen_xori_tl(dst
, dst
, 0x1);
947 static inline void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
948 target_ulong pc2
, TCGv r_cond
)
950 TCGLabel
*l1
= gen_new_label();
952 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
954 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
957 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
960 static void gen_branch_a(DisasContext
*dc
, target_ulong pc1
)
962 TCGLabel
*l1
= gen_new_label();
963 target_ulong npc
= dc
->npc
;
965 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cond
, 0, l1
);
967 gen_goto_tb(dc
, 0, npc
, pc1
);
970 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
975 static void gen_branch_n(DisasContext
*dc
, target_ulong pc1
)
977 target_ulong npc
= dc
->npc
;
979 if (likely(npc
!= DYNAMIC_PC
)) {
981 dc
->jump_pc
[0] = pc1
;
982 dc
->jump_pc
[1] = npc
+ 4;
987 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
989 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
990 t
= tcg_const_tl(pc1
);
992 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, z
, t
, cpu_npc
);
1000 static inline void gen_generic_branch(DisasContext
*dc
)
1002 TCGv npc0
= tcg_const_tl(dc
->jump_pc
[0]);
1003 TCGv npc1
= tcg_const_tl(dc
->jump_pc
[1]);
1004 TCGv zero
= tcg_const_tl(0);
1006 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1008 tcg_temp_free(npc0
);
1009 tcg_temp_free(npc1
);
1010 tcg_temp_free(zero
);
1013 /* call this function before using the condition register as it may
1014 have been set for a jump */
1015 static inline void flush_cond(DisasContext
*dc
)
1017 if (dc
->npc
== JUMP_PC
) {
1018 gen_generic_branch(dc
);
1019 dc
->npc
= DYNAMIC_PC
;
1023 static inline void save_npc(DisasContext
*dc
)
1025 if (dc
->npc
== JUMP_PC
) {
1026 gen_generic_branch(dc
);
1027 dc
->npc
= DYNAMIC_PC
;
1028 } else if (dc
->npc
!= DYNAMIC_PC
) {
1029 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1033 static inline void update_psr(DisasContext
*dc
)
1035 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1036 dc
->cc_op
= CC_OP_FLAGS
;
1037 gen_helper_compute_psr(cpu_env
);
1041 static inline void save_state(DisasContext
*dc
)
1043 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1047 static inline void gen_mov_pc_npc(DisasContext
*dc
)
1049 if (dc
->npc
== JUMP_PC
) {
1050 gen_generic_branch(dc
);
1051 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1052 dc
->pc
= DYNAMIC_PC
;
1053 } else if (dc
->npc
== DYNAMIC_PC
) {
1054 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1055 dc
->pc
= DYNAMIC_PC
;
1061 static inline void gen_op_next_insn(void)
1063 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1064 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1067 static void free_compare(DisasCompare
*cmp
)
1070 tcg_temp_free(cmp
->c1
);
1073 tcg_temp_free(cmp
->c2
);
1077 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1080 static int subcc_cond
[16] = {
1096 -1, /* no overflow */
1099 static int logic_cond
[16] = {
1101 TCG_COND_EQ
, /* eq: Z */
1102 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1103 TCG_COND_LT
, /* lt: N ^ V -> N */
1104 TCG_COND_EQ
, /* leu: C | Z -> Z */
1105 TCG_COND_NEVER
, /* ltu: C -> 0 */
1106 TCG_COND_LT
, /* neg: N */
1107 TCG_COND_NEVER
, /* vs: V -> 0 */
1109 TCG_COND_NE
, /* ne: !Z */
1110 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1111 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1112 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1113 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1114 TCG_COND_GE
, /* pos: !N */
1115 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1121 #ifdef TARGET_SPARC64
1131 switch (dc
->cc_op
) {
1133 cmp
->cond
= logic_cond
[cond
];
1135 cmp
->is_bool
= false;
1137 cmp
->c2
= tcg_const_tl(0);
1138 #ifdef TARGET_SPARC64
1141 cmp
->c1
= tcg_temp_new();
1142 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1147 cmp
->c1
= cpu_cc_dst
;
1154 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1155 goto do_compare_dst_0
;
1157 case 7: /* overflow */
1158 case 15: /* !overflow */
1162 cmp
->cond
= subcc_cond
[cond
];
1163 cmp
->is_bool
= false;
1164 #ifdef TARGET_SPARC64
1166 /* Note that sign-extension works for unsigned compares as
1167 long as both operands are sign-extended. */
1168 cmp
->g1
= cmp
->g2
= false;
1169 cmp
->c1
= tcg_temp_new();
1170 cmp
->c2
= tcg_temp_new();
1171 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1172 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1176 cmp
->g1
= cmp
->g2
= true;
1177 cmp
->c1
= cpu_cc_src
;
1178 cmp
->c2
= cpu_cc_src2
;
1185 gen_helper_compute_psr(cpu_env
);
1186 dc
->cc_op
= CC_OP_FLAGS
;
1190 /* We're going to generate a boolean result. */
1191 cmp
->cond
= TCG_COND_NE
;
1192 cmp
->is_bool
= true;
1193 cmp
->g1
= cmp
->g2
= false;
1194 cmp
->c1
= r_dst
= tcg_temp_new();
1195 cmp
->c2
= tcg_const_tl(0);
1199 gen_op_eval_bn(r_dst
);
1202 gen_op_eval_be(r_dst
, r_src
);
1205 gen_op_eval_ble(r_dst
, r_src
);
1208 gen_op_eval_bl(r_dst
, r_src
);
1211 gen_op_eval_bleu(r_dst
, r_src
);
1214 gen_op_eval_bcs(r_dst
, r_src
);
1217 gen_op_eval_bneg(r_dst
, r_src
);
1220 gen_op_eval_bvs(r_dst
, r_src
);
1223 gen_op_eval_ba(r_dst
);
1226 gen_op_eval_bne(r_dst
, r_src
);
1229 gen_op_eval_bg(r_dst
, r_src
);
1232 gen_op_eval_bge(r_dst
, r_src
);
1235 gen_op_eval_bgu(r_dst
, r_src
);
1238 gen_op_eval_bcc(r_dst
, r_src
);
1241 gen_op_eval_bpos(r_dst
, r_src
);
1244 gen_op_eval_bvc(r_dst
, r_src
);
1251 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1253 unsigned int offset
;
1256 /* For now we still generate a straight boolean result. */
1257 cmp
->cond
= TCG_COND_NE
;
1258 cmp
->is_bool
= true;
1259 cmp
->g1
= cmp
->g2
= false;
1260 cmp
->c1
= r_dst
= tcg_temp_new();
1261 cmp
->c2
= tcg_const_tl(0);
1281 gen_op_eval_bn(r_dst
);
1284 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1287 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1290 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1293 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1296 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1299 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1302 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1305 gen_op_eval_ba(r_dst
);
1308 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1311 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1314 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1317 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1320 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1323 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1326 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1331 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1335 gen_compare(&cmp
, cc
, cond
, dc
);
1337 /* The interface is to return a boolean in r_dst. */
1339 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1341 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1347 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1350 gen_fcompare(&cmp
, cc
, cond
);
1352 /* The interface is to return a boolean in r_dst. */
1354 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1356 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1362 #ifdef TARGET_SPARC64
1364 static const int gen_tcg_cond_reg
[8] = {
1375 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1377 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1378 cmp
->is_bool
= false;
1382 cmp
->c2
= tcg_const_tl(0);
1385 static inline void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1388 gen_compare_reg(&cmp
, cond
, r_src
);
1390 /* The interface is to return a boolean in r_dst. */
1391 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1397 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1399 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1400 target_ulong target
= dc
->pc
+ offset
;
1402 #ifdef TARGET_SPARC64
1403 if (unlikely(AM_CHECK(dc
))) {
1404 target
&= 0xffffffffULL
;
1408 /* unconditional not taken */
1410 dc
->pc
= dc
->npc
+ 4;
1411 dc
->npc
= dc
->pc
+ 4;
1414 dc
->npc
= dc
->pc
+ 4;
1416 } else if (cond
== 0x8) {
1417 /* unconditional taken */
1420 dc
->npc
= dc
->pc
+ 4;
1424 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1428 gen_cond(cpu_cond
, cc
, cond
, dc
);
1430 gen_branch_a(dc
, target
);
1432 gen_branch_n(dc
, target
);
1437 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1439 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1440 target_ulong target
= dc
->pc
+ offset
;
1442 #ifdef TARGET_SPARC64
1443 if (unlikely(AM_CHECK(dc
))) {
1444 target
&= 0xffffffffULL
;
1448 /* unconditional not taken */
1450 dc
->pc
= dc
->npc
+ 4;
1451 dc
->npc
= dc
->pc
+ 4;
1454 dc
->npc
= dc
->pc
+ 4;
1456 } else if (cond
== 0x8) {
1457 /* unconditional taken */
1460 dc
->npc
= dc
->pc
+ 4;
1464 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1468 gen_fcond(cpu_cond
, cc
, cond
);
1470 gen_branch_a(dc
, target
);
1472 gen_branch_n(dc
, target
);
1477 #ifdef TARGET_SPARC64
1478 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1481 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1482 target_ulong target
= dc
->pc
+ offset
;
1484 if (unlikely(AM_CHECK(dc
))) {
1485 target
&= 0xffffffffULL
;
1488 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1490 gen_branch_a(dc
, target
);
1492 gen_branch_n(dc
, target
);
1496 static inline void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1500 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1503 gen_helper_fcmps_fcc1(cpu_env
, r_rs1
, r_rs2
);
1506 gen_helper_fcmps_fcc2(cpu_env
, r_rs1
, r_rs2
);
1509 gen_helper_fcmps_fcc3(cpu_env
, r_rs1
, r_rs2
);
1514 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1518 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1521 gen_helper_fcmpd_fcc1(cpu_env
, r_rs1
, r_rs2
);
1524 gen_helper_fcmpd_fcc2(cpu_env
, r_rs1
, r_rs2
);
1527 gen_helper_fcmpd_fcc3(cpu_env
, r_rs1
, r_rs2
);
1532 static inline void gen_op_fcmpq(int fccno
)
1536 gen_helper_fcmpq(cpu_env
);
1539 gen_helper_fcmpq_fcc1(cpu_env
);
1542 gen_helper_fcmpq_fcc2(cpu_env
);
1545 gen_helper_fcmpq_fcc3(cpu_env
);
1550 static inline void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1554 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1557 gen_helper_fcmpes_fcc1(cpu_env
, r_rs1
, r_rs2
);
1560 gen_helper_fcmpes_fcc2(cpu_env
, r_rs1
, r_rs2
);
1563 gen_helper_fcmpes_fcc3(cpu_env
, r_rs1
, r_rs2
);
1568 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1572 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1575 gen_helper_fcmped_fcc1(cpu_env
, r_rs1
, r_rs2
);
1578 gen_helper_fcmped_fcc2(cpu_env
, r_rs1
, r_rs2
);
1581 gen_helper_fcmped_fcc3(cpu_env
, r_rs1
, r_rs2
);
1586 static inline void gen_op_fcmpeq(int fccno
)
1590 gen_helper_fcmpeq(cpu_env
);
1593 gen_helper_fcmpeq_fcc1(cpu_env
);
1596 gen_helper_fcmpeq_fcc2(cpu_env
);
1599 gen_helper_fcmpeq_fcc3(cpu_env
);
1606 static inline void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1608 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1611 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1613 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1616 static inline void gen_op_fcmpq(int fccno
)
1618 gen_helper_fcmpq(cpu_env
);
1621 static inline void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1623 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1626 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1628 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1631 static inline void gen_op_fcmpeq(int fccno
)
1633 gen_helper_fcmpeq(cpu_env
);
1637 static inline void gen_op_fpexception_im(int fsr_flags
)
1641 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1642 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1643 r_const
= tcg_const_i32(TT_FP_EXCP
);
1644 gen_helper_raise_exception(cpu_env
, r_const
);
1645 tcg_temp_free_i32(r_const
);
1648 static int gen_trap_ifnofpu(DisasContext
*dc
)
1650 #if !defined(CONFIG_USER_ONLY)
1651 if (!dc
->fpu_enabled
) {
1655 r_const
= tcg_const_i32(TT_NFPU_INSN
);
1656 gen_helper_raise_exception(cpu_env
, r_const
);
1657 tcg_temp_free_i32(r_const
);
1665 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1667 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1670 static inline void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1671 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1675 src
= gen_load_fpr_F(dc
, rs
);
1676 dst
= gen_dest_fpr_F(dc
);
1678 gen(dst
, cpu_env
, src
);
1680 gen_store_fpr_F(dc
, rd
, dst
);
1683 static inline void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1684 void (*gen
)(TCGv_i32
, TCGv_i32
))
1688 src
= gen_load_fpr_F(dc
, rs
);
1689 dst
= gen_dest_fpr_F(dc
);
1693 gen_store_fpr_F(dc
, rd
, dst
);
1696 static inline void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1697 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1699 TCGv_i32 dst
, src1
, src2
;
1701 src1
= gen_load_fpr_F(dc
, rs1
);
1702 src2
= gen_load_fpr_F(dc
, rs2
);
1703 dst
= gen_dest_fpr_F(dc
);
1705 gen(dst
, cpu_env
, src1
, src2
);
1707 gen_store_fpr_F(dc
, rd
, dst
);
1710 #ifdef TARGET_SPARC64
1711 static inline void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1712 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1714 TCGv_i32 dst
, src1
, src2
;
1716 src1
= gen_load_fpr_F(dc
, rs1
);
1717 src2
= gen_load_fpr_F(dc
, rs2
);
1718 dst
= gen_dest_fpr_F(dc
);
1720 gen(dst
, src1
, src2
);
1722 gen_store_fpr_F(dc
, rd
, dst
);
1726 static inline void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1727 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1731 src
= gen_load_fpr_D(dc
, rs
);
1732 dst
= gen_dest_fpr_D(dc
, rd
);
1734 gen(dst
, cpu_env
, src
);
1736 gen_store_fpr_D(dc
, rd
, dst
);
1739 #ifdef TARGET_SPARC64
1740 static inline void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1741 void (*gen
)(TCGv_i64
, TCGv_i64
))
1745 src
= gen_load_fpr_D(dc
, rs
);
1746 dst
= gen_dest_fpr_D(dc
, rd
);
1750 gen_store_fpr_D(dc
, rd
, dst
);
1754 static inline void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1755 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1757 TCGv_i64 dst
, src1
, src2
;
1759 src1
= gen_load_fpr_D(dc
, rs1
);
1760 src2
= gen_load_fpr_D(dc
, rs2
);
1761 dst
= gen_dest_fpr_D(dc
, rd
);
1763 gen(dst
, cpu_env
, src1
, src2
);
1765 gen_store_fpr_D(dc
, rd
, dst
);
1768 #ifdef TARGET_SPARC64
1769 static inline void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1770 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1772 TCGv_i64 dst
, src1
, src2
;
1774 src1
= gen_load_fpr_D(dc
, rs1
);
1775 src2
= gen_load_fpr_D(dc
, rs2
);
1776 dst
= gen_dest_fpr_D(dc
, rd
);
1778 gen(dst
, src1
, src2
);
1780 gen_store_fpr_D(dc
, rd
, dst
);
1783 static inline void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1784 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1786 TCGv_i64 dst
, src1
, src2
;
1788 src1
= gen_load_fpr_D(dc
, rs1
);
1789 src2
= gen_load_fpr_D(dc
, rs2
);
1790 dst
= gen_dest_fpr_D(dc
, rd
);
1792 gen(dst
, cpu_gsr
, src1
, src2
);
1794 gen_store_fpr_D(dc
, rd
, dst
);
1797 static inline void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1798 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1800 TCGv_i64 dst
, src0
, src1
, src2
;
1802 src1
= gen_load_fpr_D(dc
, rs1
);
1803 src2
= gen_load_fpr_D(dc
, rs2
);
1804 src0
= gen_load_fpr_D(dc
, rd
);
1805 dst
= gen_dest_fpr_D(dc
, rd
);
1807 gen(dst
, src0
, src1
, src2
);
1809 gen_store_fpr_D(dc
, rd
, dst
);
1813 static inline void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1814 void (*gen
)(TCGv_ptr
))
1816 gen_op_load_fpr_QT1(QFPREG(rs
));
1820 gen_op_store_QT0_fpr(QFPREG(rd
));
1821 gen_update_fprs_dirty(QFPREG(rd
));
1824 #ifdef TARGET_SPARC64
1825 static inline void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1826 void (*gen
)(TCGv_ptr
))
1828 gen_op_load_fpr_QT1(QFPREG(rs
));
1832 gen_op_store_QT0_fpr(QFPREG(rd
));
1833 gen_update_fprs_dirty(QFPREG(rd
));
1837 static inline void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1838 void (*gen
)(TCGv_ptr
))
1840 gen_op_load_fpr_QT0(QFPREG(rs1
));
1841 gen_op_load_fpr_QT1(QFPREG(rs2
));
1845 gen_op_store_QT0_fpr(QFPREG(rd
));
1846 gen_update_fprs_dirty(QFPREG(rd
));
1849 static inline void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1850 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1853 TCGv_i32 src1
, src2
;
1855 src1
= gen_load_fpr_F(dc
, rs1
);
1856 src2
= gen_load_fpr_F(dc
, rs2
);
1857 dst
= gen_dest_fpr_D(dc
, rd
);
1859 gen(dst
, cpu_env
, src1
, src2
);
1861 gen_store_fpr_D(dc
, rd
, dst
);
1864 static inline void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1865 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1867 TCGv_i64 src1
, src2
;
1869 src1
= gen_load_fpr_D(dc
, rs1
);
1870 src2
= gen_load_fpr_D(dc
, rs2
);
1872 gen(cpu_env
, src1
, src2
);
1874 gen_op_store_QT0_fpr(QFPREG(rd
));
1875 gen_update_fprs_dirty(QFPREG(rd
));
1878 #ifdef TARGET_SPARC64
1879 static inline void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1880 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1885 src
= gen_load_fpr_F(dc
, rs
);
1886 dst
= gen_dest_fpr_D(dc
, rd
);
1888 gen(dst
, cpu_env
, src
);
1890 gen_store_fpr_D(dc
, rd
, dst
);
1894 static inline void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1895 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1900 src
= gen_load_fpr_F(dc
, rs
);
1901 dst
= gen_dest_fpr_D(dc
, rd
);
1903 gen(dst
, cpu_env
, src
);
1905 gen_store_fpr_D(dc
, rd
, dst
);
1908 static inline void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1909 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1914 src
= gen_load_fpr_D(dc
, rs
);
1915 dst
= gen_dest_fpr_F(dc
);
1917 gen(dst
, cpu_env
, src
);
1919 gen_store_fpr_F(dc
, rd
, dst
);
1922 static inline void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1923 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1927 gen_op_load_fpr_QT1(QFPREG(rs
));
1928 dst
= gen_dest_fpr_F(dc
);
1932 gen_store_fpr_F(dc
, rd
, dst
);
1935 static inline void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1936 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1940 gen_op_load_fpr_QT1(QFPREG(rs
));
1941 dst
= gen_dest_fpr_D(dc
, rd
);
1945 gen_store_fpr_D(dc
, rd
, dst
);
1948 static inline void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1949 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1953 src
= gen_load_fpr_F(dc
, rs
);
1957 gen_op_store_QT0_fpr(QFPREG(rd
));
1958 gen_update_fprs_dirty(QFPREG(rd
));
1961 static inline void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1962 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1966 src
= gen_load_fpr_D(dc
, rs
);
1970 gen_op_store_QT0_fpr(QFPREG(rd
));
1971 gen_update_fprs_dirty(QFPREG(rd
));
1975 #ifdef TARGET_SPARC64
1976 static inline TCGv_i32
gen_get_asi(int insn
, TCGv r_addr
)
1982 r_asi
= tcg_temp_new_i32();
1983 tcg_gen_mov_i32(r_asi
, cpu_asi
);
1985 asi
= GET_FIELD(insn
, 19, 26);
1986 r_asi
= tcg_const_i32(asi
);
1991 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
1994 TCGv_i32 r_asi
, r_size
, r_sign
;
1996 r_asi
= gen_get_asi(insn
, addr
);
1997 r_size
= tcg_const_i32(size
);
1998 r_sign
= tcg_const_i32(sign
);
1999 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2000 tcg_temp_free_i32(r_sign
);
2001 tcg_temp_free_i32(r_size
);
2002 tcg_temp_free_i32(r_asi
);
2005 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2007 TCGv_i32 r_asi
, r_size
;
2009 r_asi
= gen_get_asi(insn
, addr
);
2010 r_size
= tcg_const_i32(size
);
2011 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2012 tcg_temp_free_i32(r_size
);
2013 tcg_temp_free_i32(r_asi
);
2016 static inline void gen_ldf_asi(TCGv addr
, int insn
, int size
, int rd
)
2018 TCGv_i32 r_asi
, r_size
, r_rd
;
2020 r_asi
= gen_get_asi(insn
, addr
);
2021 r_size
= tcg_const_i32(size
);
2022 r_rd
= tcg_const_i32(rd
);
2023 gen_helper_ldf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2024 tcg_temp_free_i32(r_rd
);
2025 tcg_temp_free_i32(r_size
);
2026 tcg_temp_free_i32(r_asi
);
2029 static inline void gen_stf_asi(TCGv addr
, int insn
, int size
, int rd
)
2031 TCGv_i32 r_asi
, r_size
, r_rd
;
2033 r_asi
= gen_get_asi(insn
, addr
);
2034 r_size
= tcg_const_i32(size
);
2035 r_rd
= tcg_const_i32(rd
);
2036 gen_helper_stf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2037 tcg_temp_free_i32(r_rd
);
2038 tcg_temp_free_i32(r_size
);
2039 tcg_temp_free_i32(r_asi
);
2042 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2044 TCGv_i32 r_asi
, r_size
, r_sign
;
2045 TCGv_i64 t64
= tcg_temp_new_i64();
2047 r_asi
= gen_get_asi(insn
, addr
);
2048 r_size
= tcg_const_i32(4);
2049 r_sign
= tcg_const_i32(0);
2050 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2051 tcg_temp_free_i32(r_sign
);
2052 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2053 tcg_temp_free_i32(r_size
);
2054 tcg_temp_free_i32(r_asi
);
2055 tcg_gen_trunc_i64_tl(dst
, t64
);
2056 tcg_temp_free_i64(t64
);
2059 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2062 TCGv_i32 r_asi
, r_rd
;
2064 r_asi
= gen_get_asi(insn
, addr
);
2065 r_rd
= tcg_const_i32(rd
);
2066 gen_helper_ldda_asi(cpu_env
, addr
, r_asi
, r_rd
);
2067 tcg_temp_free_i32(r_rd
);
2068 tcg_temp_free_i32(r_asi
);
2071 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2074 TCGv_i32 r_asi
, r_size
;
2075 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2076 TCGv_i64 t64
= tcg_temp_new_i64();
2078 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2079 r_asi
= gen_get_asi(insn
, addr
);
2080 r_size
= tcg_const_i32(8);
2081 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2082 tcg_temp_free_i32(r_size
);
2083 tcg_temp_free_i32(r_asi
);
2084 tcg_temp_free_i64(t64
);
2087 static inline void gen_casx_asi(DisasContext
*dc
, TCGv addr
,
2088 TCGv val2
, int insn
, int rd
)
2090 TCGv val1
= gen_load_gpr(dc
, rd
);
2091 TCGv dst
= gen_dest_gpr(dc
, rd
);
2092 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2094 gen_helper_casx_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2095 tcg_temp_free_i32(r_asi
);
2096 gen_store_gpr(dc
, rd
, dst
);
2099 #elif !defined(CONFIG_USER_ONLY)
2101 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
2104 TCGv_i32 r_asi
, r_size
, r_sign
;
2105 TCGv_i64 t64
= tcg_temp_new_i64();
2107 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2108 r_size
= tcg_const_i32(size
);
2109 r_sign
= tcg_const_i32(sign
);
2110 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2111 tcg_temp_free_i32(r_sign
);
2112 tcg_temp_free_i32(r_size
);
2113 tcg_temp_free_i32(r_asi
);
2114 tcg_gen_trunc_i64_tl(dst
, t64
);
2115 tcg_temp_free_i64(t64
);
2118 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2120 TCGv_i32 r_asi
, r_size
;
2121 TCGv_i64 t64
= tcg_temp_new_i64();
2123 tcg_gen_extu_tl_i64(t64
, src
);
2124 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2125 r_size
= tcg_const_i32(size
);
2126 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2127 tcg_temp_free_i32(r_size
);
2128 tcg_temp_free_i32(r_asi
);
2129 tcg_temp_free_i64(t64
);
2132 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2134 TCGv_i32 r_asi
, r_size
, r_sign
;
2135 TCGv_i64 r_val
, t64
;
2137 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2138 r_size
= tcg_const_i32(4);
2139 r_sign
= tcg_const_i32(0);
2140 t64
= tcg_temp_new_i64();
2141 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2142 tcg_temp_free(r_sign
);
2143 r_val
= tcg_temp_new_i64();
2144 tcg_gen_extu_tl_i64(r_val
, src
);
2145 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2146 tcg_temp_free_i64(r_val
);
2147 tcg_temp_free_i32(r_size
);
2148 tcg_temp_free_i32(r_asi
);
2149 tcg_gen_trunc_i64_tl(dst
, t64
);
2150 tcg_temp_free_i64(t64
);
2153 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2156 TCGv_i32 r_asi
, r_size
, r_sign
;
2160 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2161 r_size
= tcg_const_i32(8);
2162 r_sign
= tcg_const_i32(0);
2163 t64
= tcg_temp_new_i64();
2164 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2165 tcg_temp_free_i32(r_sign
);
2166 tcg_temp_free_i32(r_size
);
2167 tcg_temp_free_i32(r_asi
);
2169 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2170 whereby "rd + 1" elicits "error: array subscript is above array".
2171 Since we have already asserted that rd is even, the semantics
2173 t
= gen_dest_gpr(dc
, rd
| 1);
2174 tcg_gen_trunc_i64_tl(t
, t64
);
2175 gen_store_gpr(dc
, rd
| 1, t
);
2177 tcg_gen_shri_i64(t64
, t64
, 32);
2178 tcg_gen_trunc_i64_tl(hi
, t64
);
2179 tcg_temp_free_i64(t64
);
2180 gen_store_gpr(dc
, rd
, hi
);
2183 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2186 TCGv_i32 r_asi
, r_size
;
2187 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2188 TCGv_i64 t64
= tcg_temp_new_i64();
2190 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2191 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2192 r_size
= tcg_const_i32(8);
2193 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2194 tcg_temp_free_i32(r_size
);
2195 tcg_temp_free_i32(r_asi
);
2196 tcg_temp_free_i64(t64
);
2200 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2201 static inline void gen_cas_asi(DisasContext
*dc
, TCGv addr
,
2202 TCGv val2
, int insn
, int rd
)
2204 TCGv val1
= gen_load_gpr(dc
, rd
);
2205 TCGv dst
= gen_dest_gpr(dc
, rd
);
2206 #ifdef TARGET_SPARC64
2207 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2209 TCGv_i32 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2212 gen_helper_cas_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2213 tcg_temp_free_i32(r_asi
);
2214 gen_store_gpr(dc
, rd
, dst
);
2217 static inline void gen_ldstub_asi(TCGv dst
, TCGv addr
, int insn
)
2220 TCGv_i32 r_asi
, r_size
;
2222 gen_ld_asi(dst
, addr
, insn
, 1, 0);
2224 r_val
= tcg_const_i64(0xffULL
);
2225 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2226 r_size
= tcg_const_i32(1);
2227 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2228 tcg_temp_free_i32(r_size
);
2229 tcg_temp_free_i32(r_asi
);
2230 tcg_temp_free_i64(r_val
);
2234 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2236 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2237 return gen_load_gpr(dc
, rs1
);
2240 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2242 if (IS_IMM
) { /* immediate */
2243 target_long simm
= GET_FIELDs(insn
, 19, 31);
2244 TCGv t
= get_temp_tl(dc
);
2245 tcg_gen_movi_tl(t
, simm
);
2247 } else { /* register */
2248 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2249 return gen_load_gpr(dc
, rs2
);
2253 #ifdef TARGET_SPARC64
2254 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2256 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2258 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2259 or fold the comparison down to 32 bits and use movcond_i32. Choose
2261 c32
= tcg_temp_new_i32();
2263 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2265 TCGv_i64 c64
= tcg_temp_new_i64();
2266 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2267 tcg_gen_extrl_i64_i32(c32
, c64
);
2268 tcg_temp_free_i64(c64
);
2271 s1
= gen_load_fpr_F(dc
, rs
);
2272 s2
= gen_load_fpr_F(dc
, rd
);
2273 dst
= gen_dest_fpr_F(dc
);
2274 zero
= tcg_const_i32(0);
2276 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2278 tcg_temp_free_i32(c32
);
2279 tcg_temp_free_i32(zero
);
2280 gen_store_fpr_F(dc
, rd
, dst
);
2283 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2285 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2286 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2287 gen_load_fpr_D(dc
, rs
),
2288 gen_load_fpr_D(dc
, rd
));
2289 gen_store_fpr_D(dc
, rd
, dst
);
2292 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2294 int qd
= QFPREG(rd
);
2295 int qs
= QFPREG(rs
);
2297 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2298 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2299 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2300 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2302 gen_update_fprs_dirty(qd
);
2305 #ifndef CONFIG_USER_ONLY
2306 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_env cpu_env
)
2308 TCGv_i32 r_tl
= tcg_temp_new_i32();
2310 /* load env->tl into r_tl */
2311 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2313 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2314 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2316 /* calculate offset to current trap state from env->ts, reuse r_tl */
2317 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2318 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2320 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2322 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2323 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2324 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2325 tcg_temp_free_ptr(r_tl_tmp
);
2328 tcg_temp_free_i32(r_tl
);
2332 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2333 int width
, bool cc
, bool left
)
2335 TCGv lo1
, lo2
, t1
, t2
;
2336 uint64_t amask
, tabl
, tabr
;
2337 int shift
, imask
, omask
;
2340 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2341 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2342 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2343 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2344 dc
->cc_op
= CC_OP_SUB
;
2347 /* Theory of operation: there are two tables, left and right (not to
2348 be confused with the left and right versions of the opcode). These
2349 are indexed by the low 3 bits of the inputs. To make things "easy",
2350 these tables are loaded into two constants, TABL and TABR below.
2351 The operation index = (input & imask) << shift calculates the index
2352 into the constant, while val = (table >> index) & omask calculates
2353 the value we're looking for. */
2360 tabl
= 0x80c0e0f0f8fcfeffULL
;
2361 tabr
= 0xff7f3f1f0f070301ULL
;
2363 tabl
= 0x0103070f1f3f7fffULL
;
2364 tabr
= 0xfffefcf8f0e0c080ULL
;
2384 tabl
= (2 << 2) | 3;
2385 tabr
= (3 << 2) | 1;
2387 tabl
= (1 << 2) | 3;
2388 tabr
= (3 << 2) | 2;
2395 lo1
= tcg_temp_new();
2396 lo2
= tcg_temp_new();
2397 tcg_gen_andi_tl(lo1
, s1
, imask
);
2398 tcg_gen_andi_tl(lo2
, s2
, imask
);
2399 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2400 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2402 t1
= tcg_const_tl(tabl
);
2403 t2
= tcg_const_tl(tabr
);
2404 tcg_gen_shr_tl(lo1
, t1
, lo1
);
2405 tcg_gen_shr_tl(lo2
, t2
, lo2
);
2406 tcg_gen_andi_tl(dst
, lo1
, omask
);
2407 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2411 amask
&= 0xffffffffULL
;
2413 tcg_gen_andi_tl(s1
, s1
, amask
);
2414 tcg_gen_andi_tl(s2
, s2
, amask
);
2416 /* We want to compute
2417 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2418 We've already done dst = lo1, so this reduces to
2419 dst &= (s1 == s2 ? -1 : lo2)
2424 tcg_gen_setcond_tl(TCG_COND_EQ
, t1
, s1
, s2
);
2425 tcg_gen_neg_tl(t1
, t1
);
2426 tcg_gen_or_tl(lo2
, lo2
, t1
);
2427 tcg_gen_and_tl(dst
, dst
, lo2
);
2435 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2437 TCGv tmp
= tcg_temp_new();
2439 tcg_gen_add_tl(tmp
, s1
, s2
);
2440 tcg_gen_andi_tl(dst
, tmp
, -8);
2442 tcg_gen_neg_tl(tmp
, tmp
);
2444 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2449 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2453 t1
= tcg_temp_new();
2454 t2
= tcg_temp_new();
2455 shift
= tcg_temp_new();
2457 tcg_gen_andi_tl(shift
, gsr
, 7);
2458 tcg_gen_shli_tl(shift
, shift
, 3);
2459 tcg_gen_shl_tl(t1
, s1
, shift
);
2461 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2462 shift of (up to 63) followed by a constant shift of 1. */
2463 tcg_gen_xori_tl(shift
, shift
, 63);
2464 tcg_gen_shr_tl(t2
, s2
, shift
);
2465 tcg_gen_shri_tl(t2
, t2
, 1);
2467 tcg_gen_or_tl(dst
, t1
, t2
);
2471 tcg_temp_free(shift
);
2475 #define CHECK_IU_FEATURE(dc, FEATURE) \
2476 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2478 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2479 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2482 /* before an instruction, dc->pc must be static */
2483 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
2485 unsigned int opc
, rs1
, rs2
, rd
;
2486 TCGv cpu_src1
, cpu_src2
;
2487 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
2488 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
2491 opc
= GET_FIELD(insn
, 0, 1);
2492 rd
= GET_FIELD(insn
, 2, 6);
2495 case 0: /* branches/sethi */
2497 unsigned int xop
= GET_FIELD(insn
, 7, 9);
2500 #ifdef TARGET_SPARC64
2501 case 0x1: /* V9 BPcc */
2505 target
= GET_FIELD_SP(insn
, 0, 18);
2506 target
= sign_extend(target
, 19);
2508 cc
= GET_FIELD_SP(insn
, 20, 21);
2510 do_branch(dc
, target
, insn
, 0);
2512 do_branch(dc
, target
, insn
, 1);
2517 case 0x3: /* V9 BPr */
2519 target
= GET_FIELD_SP(insn
, 0, 13) |
2520 (GET_FIELD_SP(insn
, 20, 21) << 14);
2521 target
= sign_extend(target
, 16);
2523 cpu_src1
= get_src1(dc
, insn
);
2524 do_branch_reg(dc
, target
, insn
, cpu_src1
);
2527 case 0x5: /* V9 FBPcc */
2529 int cc
= GET_FIELD_SP(insn
, 20, 21);
2530 if (gen_trap_ifnofpu(dc
)) {
2533 target
= GET_FIELD_SP(insn
, 0, 18);
2534 target
= sign_extend(target
, 19);
2536 do_fbranch(dc
, target
, insn
, cc
);
2540 case 0x7: /* CBN+x */
2545 case 0x2: /* BN+x */
2547 target
= GET_FIELD(insn
, 10, 31);
2548 target
= sign_extend(target
, 22);
2550 do_branch(dc
, target
, insn
, 0);
2553 case 0x6: /* FBN+x */
2555 if (gen_trap_ifnofpu(dc
)) {
2558 target
= GET_FIELD(insn
, 10, 31);
2559 target
= sign_extend(target
, 22);
2561 do_fbranch(dc
, target
, insn
, 0);
2564 case 0x4: /* SETHI */
2565 /* Special-case %g0 because that's the canonical nop. */
2567 uint32_t value
= GET_FIELD(insn
, 10, 31);
2568 TCGv t
= gen_dest_gpr(dc
, rd
);
2569 tcg_gen_movi_tl(t
, value
<< 10);
2570 gen_store_gpr(dc
, rd
, t
);
2573 case 0x0: /* UNIMPL */
2582 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
2583 TCGv o7
= gen_dest_gpr(dc
, 15);
2585 tcg_gen_movi_tl(o7
, dc
->pc
);
2586 gen_store_gpr(dc
, 15, o7
);
2589 #ifdef TARGET_SPARC64
2590 if (unlikely(AM_CHECK(dc
))) {
2591 target
&= 0xffffffffULL
;
2597 case 2: /* FPU & Logical Operations */
2599 unsigned int xop
= GET_FIELD(insn
, 7, 12);
2600 TCGv cpu_dst
= get_temp_tl(dc
);
2603 if (xop
== 0x3a) { /* generate trap */
2604 int cond
= GET_FIELD(insn
, 3, 6);
2606 TCGLabel
*l1
= NULL
;
2617 /* Conditional trap. */
2619 #ifdef TARGET_SPARC64
2621 int cc
= GET_FIELD_SP(insn
, 11, 12);
2623 gen_compare(&cmp
, 0, cond
, dc
);
2624 } else if (cc
== 2) {
2625 gen_compare(&cmp
, 1, cond
, dc
);
2630 gen_compare(&cmp
, 0, cond
, dc
);
2632 l1
= gen_new_label();
2633 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
2634 cmp
.c1
, cmp
.c2
, l1
);
2638 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2639 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2641 /* Don't use the normal temporaries, as they may well have
2642 gone out of scope with the branch above. While we're
2643 doing that we might as well pre-truncate to 32-bit. */
2644 trap
= tcg_temp_new_i32();
2646 rs1
= GET_FIELD_SP(insn
, 14, 18);
2648 rs2
= GET_FIELD_SP(insn
, 0, 6);
2650 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
2651 /* Signal that the trap value is fully constant. */
2654 TCGv t1
= gen_load_gpr(dc
, rs1
);
2655 tcg_gen_trunc_tl_i32(trap
, t1
);
2656 tcg_gen_addi_i32(trap
, trap
, rs2
);
2660 rs2
= GET_FIELD_SP(insn
, 0, 4);
2661 t1
= gen_load_gpr(dc
, rs1
);
2662 t2
= gen_load_gpr(dc
, rs2
);
2663 tcg_gen_add_tl(t1
, t1
, t2
);
2664 tcg_gen_trunc_tl_i32(trap
, t1
);
2667 tcg_gen_andi_i32(trap
, trap
, mask
);
2668 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2671 gen_helper_raise_exception(cpu_env
, trap
);
2672 tcg_temp_free_i32(trap
);
2675 /* An unconditional trap ends the TB. */
2679 /* A conditional trap falls through to the next insn. */
2683 } else if (xop
== 0x28) {
2684 rs1
= GET_FIELD(insn
, 13, 17);
2687 #ifndef TARGET_SPARC64
2688 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2689 manual, rdy on the microSPARC
2691 case 0x0f: /* stbar in the SPARCv8 manual,
2692 rdy on the microSPARC II */
2693 case 0x10 ... 0x1f: /* implementation-dependent in the
2694 SPARCv8 manual, rdy on the
2697 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
2698 TCGv t
= gen_dest_gpr(dc
, rd
);
2699 /* Read Asr17 for a Leon3 monoprocessor */
2700 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
2701 gen_store_gpr(dc
, rd
, t
);
2705 gen_store_gpr(dc
, rd
, cpu_y
);
2707 #ifdef TARGET_SPARC64
2708 case 0x2: /* V9 rdccr */
2710 gen_helper_rdccr(cpu_dst
, cpu_env
);
2711 gen_store_gpr(dc
, rd
, cpu_dst
);
2713 case 0x3: /* V9 rdasi */
2714 tcg_gen_ext_i32_tl(cpu_dst
, cpu_asi
);
2715 gen_store_gpr(dc
, rd
, cpu_dst
);
2717 case 0x4: /* V9 rdtick */
2722 r_tickptr
= tcg_temp_new_ptr();
2723 r_const
= tcg_const_i32(dc
->mem_idx
);
2724 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2725 offsetof(CPUSPARCState
, tick
));
2726 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
2728 tcg_temp_free_ptr(r_tickptr
);
2729 tcg_temp_free_i32(r_const
);
2730 gen_store_gpr(dc
, rd
, cpu_dst
);
2733 case 0x5: /* V9 rdpc */
2735 TCGv t
= gen_dest_gpr(dc
, rd
);
2736 if (unlikely(AM_CHECK(dc
))) {
2737 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
2739 tcg_gen_movi_tl(t
, dc
->pc
);
2741 gen_store_gpr(dc
, rd
, t
);
2744 case 0x6: /* V9 rdfprs */
2745 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
2746 gen_store_gpr(dc
, rd
, cpu_dst
);
2748 case 0xf: /* V9 membar */
2749 break; /* no effect */
2750 case 0x13: /* Graphics Status */
2751 if (gen_trap_ifnofpu(dc
)) {
2754 gen_store_gpr(dc
, rd
, cpu_gsr
);
2756 case 0x16: /* Softint */
2757 tcg_gen_ext_i32_tl(cpu_dst
, cpu_softint
);
2758 gen_store_gpr(dc
, rd
, cpu_dst
);
2760 case 0x17: /* Tick compare */
2761 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
2763 case 0x18: /* System tick */
2768 r_tickptr
= tcg_temp_new_ptr();
2769 r_const
= tcg_const_i32(dc
->mem_idx
);
2770 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2771 offsetof(CPUSPARCState
, stick
));
2772 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
2774 tcg_temp_free_ptr(r_tickptr
);
2775 tcg_temp_free_i32(r_const
);
2776 gen_store_gpr(dc
, rd
, cpu_dst
);
2779 case 0x19: /* System tick compare */
2780 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
2782 case 0x10: /* Performance Control */
2783 case 0x11: /* Performance Instrumentation Counter */
2784 case 0x12: /* Dispatch Control */
2785 case 0x14: /* Softint set, WO */
2786 case 0x15: /* Softint clear, WO */
2791 #if !defined(CONFIG_USER_ONLY)
2792 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
2793 #ifndef TARGET_SPARC64
2794 if (!supervisor(dc
)) {
2798 gen_helper_rdpsr(cpu_dst
, cpu_env
);
2800 CHECK_IU_FEATURE(dc
, HYPV
);
2801 if (!hypervisor(dc
))
2803 rs1
= GET_FIELD(insn
, 13, 17);
2806 // gen_op_rdhpstate();
2809 // gen_op_rdhtstate();
2812 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
2815 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
2818 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
2820 case 31: // hstick_cmpr
2821 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
2827 gen_store_gpr(dc
, rd
, cpu_dst
);
2829 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
2830 if (!supervisor(dc
)) {
2833 cpu_tmp0
= get_temp_tl(dc
);
2834 #ifdef TARGET_SPARC64
2835 rs1
= GET_FIELD(insn
, 13, 17);
2841 r_tsptr
= tcg_temp_new_ptr();
2842 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2843 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2844 offsetof(trap_state
, tpc
));
2845 tcg_temp_free_ptr(r_tsptr
);
2852 r_tsptr
= tcg_temp_new_ptr();
2853 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2854 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2855 offsetof(trap_state
, tnpc
));
2856 tcg_temp_free_ptr(r_tsptr
);
2863 r_tsptr
= tcg_temp_new_ptr();
2864 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2865 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2866 offsetof(trap_state
, tstate
));
2867 tcg_temp_free_ptr(r_tsptr
);
2872 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2874 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2875 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
2876 offsetof(trap_state
, tt
));
2877 tcg_temp_free_ptr(r_tsptr
);
2885 r_tickptr
= tcg_temp_new_ptr();
2886 r_const
= tcg_const_i32(dc
->mem_idx
);
2887 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2888 offsetof(CPUSPARCState
, tick
));
2889 gen_helper_tick_get_count(cpu_tmp0
, cpu_env
,
2890 r_tickptr
, r_const
);
2891 tcg_temp_free_ptr(r_tickptr
);
2892 tcg_temp_free_i32(r_const
);
2896 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
2899 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2900 offsetof(CPUSPARCState
, pstate
));
2903 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2904 offsetof(CPUSPARCState
, tl
));
2907 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2908 offsetof(CPUSPARCState
, psrpil
));
2911 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
2914 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2915 offsetof(CPUSPARCState
, cansave
));
2917 case 11: // canrestore
2918 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2919 offsetof(CPUSPARCState
, canrestore
));
2921 case 12: // cleanwin
2922 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2923 offsetof(CPUSPARCState
, cleanwin
));
2925 case 13: // otherwin
2926 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2927 offsetof(CPUSPARCState
, otherwin
));
2930 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2931 offsetof(CPUSPARCState
, wstate
));
2933 case 16: // UA2005 gl
2934 CHECK_IU_FEATURE(dc
, GL
);
2935 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2936 offsetof(CPUSPARCState
, gl
));
2938 case 26: // UA2005 strand status
2939 CHECK_IU_FEATURE(dc
, HYPV
);
2940 if (!hypervisor(dc
))
2942 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
2945 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
2952 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
2954 gen_store_gpr(dc
, rd
, cpu_tmp0
);
2956 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
2957 #ifdef TARGET_SPARC64
2959 gen_helper_flushw(cpu_env
);
2961 if (!supervisor(dc
))
2963 gen_store_gpr(dc
, rd
, cpu_tbr
);
2967 } else if (xop
== 0x34) { /* FPU Operations */
2968 if (gen_trap_ifnofpu(dc
)) {
2971 gen_op_clear_ieee_excp_and_FTT();
2972 rs1
= GET_FIELD(insn
, 13, 17);
2973 rs2
= GET_FIELD(insn
, 27, 31);
2974 xop
= GET_FIELD(insn
, 18, 26);
2977 case 0x1: /* fmovs */
2978 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
2979 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
2981 case 0x5: /* fnegs */
2982 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
2984 case 0x9: /* fabss */
2985 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
2987 case 0x29: /* fsqrts */
2988 CHECK_FPU_FEATURE(dc
, FSQRT
);
2989 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
2991 case 0x2a: /* fsqrtd */
2992 CHECK_FPU_FEATURE(dc
, FSQRT
);
2993 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
2995 case 0x2b: /* fsqrtq */
2996 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2997 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
2999 case 0x41: /* fadds */
3000 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
3002 case 0x42: /* faddd */
3003 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
3005 case 0x43: /* faddq */
3006 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3007 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
3009 case 0x45: /* fsubs */
3010 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
3012 case 0x46: /* fsubd */
3013 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
3015 case 0x47: /* fsubq */
3016 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3017 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
3019 case 0x49: /* fmuls */
3020 CHECK_FPU_FEATURE(dc
, FMUL
);
3021 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3023 case 0x4a: /* fmuld */
3024 CHECK_FPU_FEATURE(dc
, FMUL
);
3025 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3027 case 0x4b: /* fmulq */
3028 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3029 CHECK_FPU_FEATURE(dc
, FMUL
);
3030 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3032 case 0x4d: /* fdivs */
3033 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3035 case 0x4e: /* fdivd */
3036 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3038 case 0x4f: /* fdivq */
3039 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3040 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3042 case 0x69: /* fsmuld */
3043 CHECK_FPU_FEATURE(dc
, FSMULD
);
3044 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3046 case 0x6e: /* fdmulq */
3047 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3048 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3050 case 0xc4: /* fitos */
3051 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3053 case 0xc6: /* fdtos */
3054 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3056 case 0xc7: /* fqtos */
3057 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3058 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3060 case 0xc8: /* fitod */
3061 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3063 case 0xc9: /* fstod */
3064 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3066 case 0xcb: /* fqtod */
3067 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3068 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3070 case 0xcc: /* fitoq */
3071 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3072 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3074 case 0xcd: /* fstoq */
3075 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3076 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3078 case 0xce: /* fdtoq */
3079 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3080 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3082 case 0xd1: /* fstoi */
3083 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3085 case 0xd2: /* fdtoi */
3086 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3088 case 0xd3: /* fqtoi */
3089 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3090 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3092 #ifdef TARGET_SPARC64
3093 case 0x2: /* V9 fmovd */
3094 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3095 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3097 case 0x3: /* V9 fmovq */
3098 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3099 gen_move_Q(rd
, rs2
);
3101 case 0x6: /* V9 fnegd */
3102 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3104 case 0x7: /* V9 fnegq */
3105 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3106 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3108 case 0xa: /* V9 fabsd */
3109 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3111 case 0xb: /* V9 fabsq */
3112 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3113 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3115 case 0x81: /* V9 fstox */
3116 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3118 case 0x82: /* V9 fdtox */
3119 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3121 case 0x83: /* V9 fqtox */
3122 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3123 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3125 case 0x84: /* V9 fxtos */
3126 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3128 case 0x88: /* V9 fxtod */
3129 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3131 case 0x8c: /* V9 fxtoq */
3132 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3133 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3139 } else if (xop
== 0x35) { /* FPU Operations */
3140 #ifdef TARGET_SPARC64
3143 if (gen_trap_ifnofpu(dc
)) {
3146 gen_op_clear_ieee_excp_and_FTT();
3147 rs1
= GET_FIELD(insn
, 13, 17);
3148 rs2
= GET_FIELD(insn
, 27, 31);
3149 xop
= GET_FIELD(insn
, 18, 26);
3152 #ifdef TARGET_SPARC64
3156 cond = GET_FIELD_SP(insn, 10, 12); \
3157 cpu_src1 = get_src1(dc, insn); \
3158 gen_compare_reg(&cmp, cond, cpu_src1); \
3159 gen_fmov##sz(dc, &cmp, rd, rs2); \
3160 free_compare(&cmp); \
3163 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3166 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3169 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3170 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3177 #ifdef TARGET_SPARC64
3178 #define FMOVCC(fcc, sz) \
3181 cond = GET_FIELD_SP(insn, 14, 17); \
3182 gen_fcompare(&cmp, fcc, cond); \
3183 gen_fmov##sz(dc, &cmp, rd, rs2); \
3184 free_compare(&cmp); \
3187 case 0x001: /* V9 fmovscc %fcc0 */
3190 case 0x002: /* V9 fmovdcc %fcc0 */
3193 case 0x003: /* V9 fmovqcc %fcc0 */
3194 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3197 case 0x041: /* V9 fmovscc %fcc1 */
3200 case 0x042: /* V9 fmovdcc %fcc1 */
3203 case 0x043: /* V9 fmovqcc %fcc1 */
3204 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3207 case 0x081: /* V9 fmovscc %fcc2 */
3210 case 0x082: /* V9 fmovdcc %fcc2 */
3213 case 0x083: /* V9 fmovqcc %fcc2 */
3214 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3217 case 0x0c1: /* V9 fmovscc %fcc3 */
3220 case 0x0c2: /* V9 fmovdcc %fcc3 */
3223 case 0x0c3: /* V9 fmovqcc %fcc3 */
3224 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3228 #define FMOVCC(xcc, sz) \
3231 cond = GET_FIELD_SP(insn, 14, 17); \
3232 gen_compare(&cmp, xcc, cond, dc); \
3233 gen_fmov##sz(dc, &cmp, rd, rs2); \
3234 free_compare(&cmp); \
3237 case 0x101: /* V9 fmovscc %icc */
3240 case 0x102: /* V9 fmovdcc %icc */
3243 case 0x103: /* V9 fmovqcc %icc */
3244 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3247 case 0x181: /* V9 fmovscc %xcc */