4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "exec/translator.h"
39 #define DYNAMIC_PC 1 /* dynamic pc value */
40 #define JUMP_PC 2 /* dynamic pc value which takes only two values
41 according to jump_pc[T2] */
43 #define DISAS_EXIT DISAS_TARGET_0
45 /* global register indexes */
46 static TCGv_ptr cpu_regwptr
;
47 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
48 static TCGv_i32 cpu_cc_op
;
49 static TCGv_i32 cpu_psr
;
50 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
51 static TCGv cpu_regs
[32];
53 #ifndef CONFIG_USER_ONLY
58 static TCGv_i32 cpu_xcc
, cpu_fprs
;
60 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
61 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
65 /* Floating point registers */
66 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
68 #include "exec/gen-icount.h"
70 typedef struct DisasContext
{
71 DisasContextBase base
;
72 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
73 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
74 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
77 bool address_mask_32bit
;
78 #ifndef CONFIG_USER_ONLY
85 uint32_t cc_op
; /* current CC operation */
104 // This function uses non-native bit order
105 #define GET_FIELD(X, FROM, TO) \
106 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
108 // This function uses the order in the manuals, i.e. bit 0 is 2^0
109 #define GET_FIELD_SP(X, FROM, TO) \
110 GET_FIELD(X, 31 - (TO), 31 - (FROM))
112 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
113 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
115 #ifdef TARGET_SPARC64
116 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
117 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
119 #define DFPREG(r) (r & 0x1e)
120 #define QFPREG(r) (r & 0x1c)
123 #define UA2005_HTRAP_MASK 0xff
124 #define V8_TRAP_MASK 0x7f
126 static int sign_extend(int x
, int len
)
129 return (x
<< len
) >> len
;
132 #define IS_IMM (insn & (1<<13))
134 static inline TCGv_i32
get_temp_i32(DisasContext
*dc
)
137 assert(dc
->n_t32
< ARRAY_SIZE(dc
->t32
));
138 dc
->t32
[dc
->n_t32
++] = t
= tcg_temp_new_i32();
142 static inline TCGv
get_temp_tl(DisasContext
*dc
)
145 assert(dc
->n_ttl
< ARRAY_SIZE(dc
->ttl
));
146 dc
->ttl
[dc
->n_ttl
++] = t
= tcg_temp_new();
150 static inline void gen_update_fprs_dirty(DisasContext
*dc
, int rd
)
152 #if defined(TARGET_SPARC64)
153 int bit
= (rd
< 32) ?
1 : 2;
154 /* If we know we've already set this bit within the TB,
155 we can avoid setting it again. */
156 if (!(dc
->fprs_dirty
& bit
)) {
157 dc
->fprs_dirty
|= bit
;
158 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, bit
);
163 /* floating point registers moves */
164 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
166 #if TCG_TARGET_REG_BITS == 32
168 return TCGV_LOW(cpu_fpr
[src
/ 2]);
170 return TCGV_HIGH(cpu_fpr
[src
/ 2]);
173 TCGv_i32 ret
= get_temp_i32(dc
);
175 tcg_gen_extrl_i64_i32(ret
, cpu_fpr
[src
/ 2]);
177 tcg_gen_extrh_i64_i32(ret
, cpu_fpr
[src
/ 2]);
183 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
185 #if TCG_TARGET_REG_BITS == 32
187 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr
[dst
/ 2]), v
);
189 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr
[dst
/ 2]), v
);
192 TCGv_i64 t
= (TCGv_i64
)v
;
193 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
194 (dst
& 1 ?
0 : 32), 32);
196 gen_update_fprs_dirty(dc
, dst
);
199 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
201 return get_temp_i32(dc
);
204 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
207 return cpu_fpr
[src
/ 2];
210 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
213 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
214 gen_update_fprs_dirty(dc
, dst
);
217 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
219 return cpu_fpr
[DFPREG(dst
) / 2];
222 static void gen_op_load_fpr_QT0(unsigned int src
)
224 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
225 offsetof(CPU_QuadU
, ll
.upper
));
226 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
227 offsetof(CPU_QuadU
, ll
.lower
));
230 static void gen_op_load_fpr_QT1(unsigned int src
)
232 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
233 offsetof(CPU_QuadU
, ll
.upper
));
234 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
235 offsetof(CPU_QuadU
, ll
.lower
));
238 static void gen_op_store_QT0_fpr(unsigned int dst
)
240 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
241 offsetof(CPU_QuadU
, ll
.upper
));
242 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
243 offsetof(CPU_QuadU
, ll
.lower
));
246 static void gen_store_fpr_Q(DisasContext
*dc
, unsigned int dst
,
247 TCGv_i64 v1
, TCGv_i64 v2
)
251 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v1
);
252 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2 + 1], v2
);
253 gen_update_fprs_dirty(dc
, dst
);
256 #ifdef TARGET_SPARC64
257 static TCGv_i64
gen_load_fpr_Q0(DisasContext
*dc
, unsigned int src
)
260 return cpu_fpr
[src
/ 2];
263 static TCGv_i64
gen_load_fpr_Q1(DisasContext
*dc
, unsigned int src
)
266 return cpu_fpr
[src
/ 2 + 1];
269 static void gen_move_Q(DisasContext
*dc
, unsigned int rd
, unsigned int rs
)
274 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
275 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
276 gen_update_fprs_dirty(dc
, rd
);
281 #ifdef CONFIG_USER_ONLY
282 #define supervisor(dc) 0
283 #ifdef TARGET_SPARC64
284 #define hypervisor(dc) 0
287 #ifdef TARGET_SPARC64
288 #define hypervisor(dc) (dc->hypervisor)
289 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
291 #define supervisor(dc) (dc->supervisor)
295 #ifdef TARGET_SPARC64
297 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
299 #define AM_CHECK(dc) (1)
303 static inline void gen_address_mask(DisasContext
*dc
, TCGv addr
)
305 #ifdef TARGET_SPARC64
307 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
311 static inline TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
315 return cpu_regs
[reg
];
317 TCGv t
= get_temp_tl(dc
);
318 tcg_gen_movi_tl(t
, 0);
323 static inline void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
327 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
331 static inline TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
335 return cpu_regs
[reg
];
337 return get_temp_tl(dc
);
341 static bool use_goto_tb(DisasContext
*s
, target_ulong pc
, target_ulong npc
)
343 return translator_use_goto_tb(&s
->base
, pc
) &&
344 translator_use_goto_tb(&s
->base
, npc
);
347 static void gen_goto_tb(DisasContext
*s
, int tb_num
,
348 target_ulong pc
, target_ulong npc
)
350 if (use_goto_tb(s
, pc
, npc
)) {
351 /* jump to same page: we can use a direct jump */
352 tcg_gen_goto_tb(tb_num
);
353 tcg_gen_movi_tl(cpu_pc
, pc
);
354 tcg_gen_movi_tl(cpu_npc
, npc
);
355 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
357 /* jump to another page: currently not optimized */
358 tcg_gen_movi_tl(cpu_pc
, pc
);
359 tcg_gen_movi_tl(cpu_npc
, npc
);
360 tcg_gen_exit_tb(NULL
, 0);
365 static inline void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
367 tcg_gen_extu_i32_tl(reg
, src
);
368 tcg_gen_extract_tl(reg
, reg
, PSR_NEG_SHIFT
, 1);
371 static inline void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
373 tcg_gen_extu_i32_tl(reg
, src
);
374 tcg_gen_extract_tl(reg
, reg
, PSR_ZERO_SHIFT
, 1);
377 static inline void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
379 tcg_gen_extu_i32_tl(reg
, src
);
380 tcg_gen_extract_tl(reg
, reg
, PSR_OVF_SHIFT
, 1);
383 static inline void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
385 tcg_gen_extu_i32_tl(reg
, src
);
386 tcg_gen_extract_tl(reg
, reg
, PSR_CARRY_SHIFT
, 1);
389 static inline void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
391 tcg_gen_mov_tl(cpu_cc_src
, src1
);
392 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
393 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
394 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
397 static TCGv_i32
gen_add32_carry32(void)
399 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
401 /* Carry is computed from a previous add: (dst < src) */
402 #if TARGET_LONG_BITS == 64
403 cc_src1_32
= tcg_temp_new_i32();
404 cc_src2_32
= tcg_temp_new_i32();
405 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
406 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
408 cc_src1_32
= cpu_cc_dst
;
409 cc_src2_32
= cpu_cc_src
;
412 carry_32
= tcg_temp_new_i32();
413 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
415 #if TARGET_LONG_BITS == 64
416 tcg_temp_free_i32(cc_src1_32
);
417 tcg_temp_free_i32(cc_src2_32
);
423 static TCGv_i32
gen_sub32_carry32(void)
425 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
427 /* Carry is computed from a previous borrow: (src1 < src2) */
428 #if TARGET_LONG_BITS == 64
429 cc_src1_32
= tcg_temp_new_i32();
430 cc_src2_32
= tcg_temp_new_i32();
431 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
432 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
434 cc_src1_32
= cpu_cc_src
;
435 cc_src2_32
= cpu_cc_src2
;
438 carry_32
= tcg_temp_new_i32();
439 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
441 #if TARGET_LONG_BITS == 64
442 tcg_temp_free_i32(cc_src1_32
);
443 tcg_temp_free_i32(cc_src2_32
);
449 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
450 TCGv src2
, int update_cc
)
458 /* Carry is known to be zero. Fall back to plain ADD. */
460 gen_op_add_cc(dst
, src1
, src2
);
462 tcg_gen_add_tl(dst
, src1
, src2
);
469 if (TARGET_LONG_BITS
== 32) {
470 /* We can re-use the host's hardware carry generation by using
471 an ADD2 opcode. We discard the low part of the output.
472 Ideally we'd combine this operation with the add that
473 generated the carry in the first place. */
474 carry
= tcg_temp_new();
475 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
476 tcg_temp_free(carry
);
479 carry_32
= gen_add32_carry32();
485 carry_32
= gen_sub32_carry32();
489 /* We need external help to produce the carry. */
490 carry_32
= tcg_temp_new_i32();
491 gen_helper_compute_C_icc(carry_32
, cpu_env
);
495 #if TARGET_LONG_BITS == 64
496 carry
= tcg_temp_new();
497 tcg_gen_extu_i32_i64(carry
, carry_32
);
502 tcg_gen_add_tl(dst
, src1
, src2
);
503 tcg_gen_add_tl(dst
, dst
, carry
);
505 tcg_temp_free_i32(carry_32
);
506 #if TARGET_LONG_BITS == 64
507 tcg_temp_free(carry
);
512 tcg_gen_mov_tl(cpu_cc_src
, src1
);
513 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
514 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
515 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
516 dc
->cc_op
= CC_OP_ADDX
;
520 static inline void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
522 tcg_gen_mov_tl(cpu_cc_src
, src1
);
523 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
524 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
525 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
528 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
529 TCGv src2
, int update_cc
)
537 /* Carry is known to be zero. Fall back to plain SUB. */
539 gen_op_sub_cc(dst
, src1
, src2
);
541 tcg_gen_sub_tl(dst
, src1
, src2
);
548 carry_32
= gen_add32_carry32();
554 if (TARGET_LONG_BITS
== 32) {
555 /* We can re-use the host's hardware carry generation by using
556 a SUB2 opcode. We discard the low part of the output.
557 Ideally we'd combine this operation with the add that
558 generated the carry in the first place. */
559 carry
= tcg_temp_new();
560 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
561 tcg_temp_free(carry
);
564 carry_32
= gen_sub32_carry32();
568 /* We need external help to produce the carry. */
569 carry_32
= tcg_temp_new_i32();
570 gen_helper_compute_C_icc(carry_32
, cpu_env
);
574 #if TARGET_LONG_BITS == 64
575 carry
= tcg_temp_new();
576 tcg_gen_extu_i32_i64(carry
, carry_32
);
581 tcg_gen_sub_tl(dst
, src1
, src2
);
582 tcg_gen_sub_tl(dst
, dst
, carry
);
584 tcg_temp_free_i32(carry_32
);
585 #if TARGET_LONG_BITS == 64
586 tcg_temp_free(carry
);
591 tcg_gen_mov_tl(cpu_cc_src
, src1
);
592 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
593 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
594 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
595 dc
->cc_op
= CC_OP_SUBX
;
599 static inline void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
601 TCGv r_temp
, zero
, t0
;
603 r_temp
= tcg_temp_new();
610 zero
= tcg_const_tl(0);
611 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
612 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
613 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
614 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
619 // env->y = (b2 << 31) | (env->y >> 1);
620 tcg_gen_extract_tl(t0
, cpu_y
, 1, 31);
621 tcg_gen_deposit_tl(cpu_y
, t0
, cpu_cc_src
, 31, 1);
624 gen_mov_reg_N(t0
, cpu_psr
);
625 gen_mov_reg_V(r_temp
, cpu_psr
);
626 tcg_gen_xor_tl(t0
, t0
, r_temp
);
627 tcg_temp_free(r_temp
);
629 // T0 = (b1 << 31) | (T0 >> 1);
631 tcg_gen_shli_tl(t0
, t0
, 31);
632 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
633 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
636 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
638 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
641 static inline void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
643 #if TARGET_LONG_BITS == 32
645 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
647 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
650 TCGv t0
= tcg_temp_new_i64();
651 TCGv t1
= tcg_temp_new_i64();
654 tcg_gen_ext32s_i64(t0
, src1
);
655 tcg_gen_ext32s_i64(t1
, src2
);
657 tcg_gen_ext32u_i64(t0
, src1
);
658 tcg_gen_ext32u_i64(t1
, src2
);
661 tcg_gen_mul_i64(dst
, t0
, t1
);
665 tcg_gen_shri_i64(cpu_y
, dst
, 32);
669 static inline void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
671 /* zero-extend truncated operands before multiplication */
672 gen_op_multiply(dst
, src1
, src2
, 0);
675 static inline void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
677 /* sign-extend truncated operands before multiplication */
678 gen_op_multiply(dst
, src1
, src2
, 1);
682 static inline void gen_op_eval_ba(TCGv dst
)
684 tcg_gen_movi_tl(dst
, 1);
688 static inline void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
690 gen_mov_reg_Z(dst
, src
);
694 static inline void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
696 TCGv t0
= tcg_temp_new();
697 gen_mov_reg_N(t0
, src
);
698 gen_mov_reg_V(dst
, src
);
699 tcg_gen_xor_tl(dst
, dst
, t0
);
700 gen_mov_reg_Z(t0
, src
);
701 tcg_gen_or_tl(dst
, dst
, t0
);
706 static inline void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
708 TCGv t0
= tcg_temp_new();
709 gen_mov_reg_V(t0
, src
);
710 gen_mov_reg_N(dst
, src
);
711 tcg_gen_xor_tl(dst
, dst
, t0
);
716 static inline void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
718 TCGv t0
= tcg_temp_new();
719 gen_mov_reg_Z(t0
, src
);
720 gen_mov_reg_C(dst
, src
);
721 tcg_gen_or_tl(dst
, dst
, t0
);
726 static inline void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
728 gen_mov_reg_C(dst
, src
);
732 static inline void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
734 gen_mov_reg_V(dst
, src
);
738 static inline void gen_op_eval_bn(TCGv dst
)
740 tcg_gen_movi_tl(dst
, 0);
744 static inline void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
746 gen_mov_reg_N(dst
, src
);
750 static inline void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
752 gen_mov_reg_Z(dst
, src
);
753 tcg_gen_xori_tl(dst
, dst
, 0x1);
757 static inline void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
759 gen_op_eval_ble(dst
, src
);
760 tcg_gen_xori_tl(dst
, dst
, 0x1);
764 static inline void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
766 gen_op_eval_bl(dst
, src
);
767 tcg_gen_xori_tl(dst
, dst
, 0x1);
771 static inline void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
773 gen_op_eval_bleu(dst
, src
);
774 tcg_gen_xori_tl(dst
, dst
, 0x1);
778 static inline void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
780 gen_mov_reg_C(dst
, src
);
781 tcg_gen_xori_tl(dst
, dst
, 0x1);
785 static inline void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
787 gen_mov_reg_N(dst
, src
);
788 tcg_gen_xori_tl(dst
, dst
, 0x1);
792 static inline void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
794 gen_mov_reg_V(dst
, src
);
795 tcg_gen_xori_tl(dst
, dst
, 0x1);
799 FPSR bit field FCC1 | FCC0:
805 static inline void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
806 unsigned int fcc_offset
)
808 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
809 tcg_gen_andi_tl(reg
, reg
, 0x1);
812 static inline void gen_mov_reg_FCC1(TCGv reg
, TCGv src
,
813 unsigned int fcc_offset
)
815 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
816 tcg_gen_andi_tl(reg
, reg
, 0x1);
820 static inline void gen_op_eval_fbne(TCGv dst
, TCGv src
,
821 unsigned int fcc_offset
)
823 TCGv t0
= tcg_temp_new();
824 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
825 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
826 tcg_gen_or_tl(dst
, dst
, t0
);
830 // 1 or 2: FCC0 ^ FCC1
831 static inline void gen_op_eval_fblg(TCGv dst
, TCGv src
,
832 unsigned int fcc_offset
)
834 TCGv t0
= tcg_temp_new();
835 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
836 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
837 tcg_gen_xor_tl(dst
, dst
, t0
);
842 static inline void gen_op_eval_fbul(TCGv dst
, TCGv src
,
843 unsigned int fcc_offset
)
845 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
849 static inline void gen_op_eval_fbl(TCGv dst
, TCGv src
,
850 unsigned int fcc_offset
)
852 TCGv t0
= tcg_temp_new();
853 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
854 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
855 tcg_gen_andc_tl(dst
, dst
, t0
);
860 static inline void gen_op_eval_fbug(TCGv dst
, TCGv src
,
861 unsigned int fcc_offset
)
863 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
867 static inline void gen_op_eval_fbg(TCGv dst
, TCGv src
,
868 unsigned int fcc_offset
)
870 TCGv t0
= tcg_temp_new();
871 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
872 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
873 tcg_gen_andc_tl(dst
, t0
, dst
);
878 static inline void gen_op_eval_fbu(TCGv dst
, TCGv src
,
879 unsigned int fcc_offset
)
881 TCGv t0
= tcg_temp_new();
882 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
883 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
884 tcg_gen_and_tl(dst
, dst
, t0
);
889 static inline void gen_op_eval_fbe(TCGv dst
, TCGv src
,
890 unsigned int fcc_offset
)
892 TCGv t0
= tcg_temp_new();
893 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
894 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
895 tcg_gen_or_tl(dst
, dst
, t0
);
896 tcg_gen_xori_tl(dst
, dst
, 0x1);
900 // 0 or 3: !(FCC0 ^ FCC1)
901 static inline void gen_op_eval_fbue(TCGv dst
, TCGv src
,
902 unsigned int fcc_offset
)
904 TCGv t0
= tcg_temp_new();
905 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
906 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
907 tcg_gen_xor_tl(dst
, dst
, t0
);
908 tcg_gen_xori_tl(dst
, dst
, 0x1);
913 static inline void gen_op_eval_fbge(TCGv dst
, TCGv src
,
914 unsigned int fcc_offset
)
916 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
917 tcg_gen_xori_tl(dst
, dst
, 0x1);
920 // !1: !(FCC0 & !FCC1)
921 static inline void gen_op_eval_fbuge(TCGv dst
, TCGv src
,
922 unsigned int fcc_offset
)
924 TCGv t0
= tcg_temp_new();
925 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
926 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
927 tcg_gen_andc_tl(dst
, dst
, t0
);
928 tcg_gen_xori_tl(dst
, dst
, 0x1);
933 static inline void gen_op_eval_fble(TCGv dst
, TCGv src
,
934 unsigned int fcc_offset
)
936 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
937 tcg_gen_xori_tl(dst
, dst
, 0x1);
940 // !2: !(!FCC0 & FCC1)
941 static inline void gen_op_eval_fbule(TCGv dst
, TCGv src
,
942 unsigned int fcc_offset
)
944 TCGv t0
= tcg_temp_new();
945 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
946 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
947 tcg_gen_andc_tl(dst
, t0
, dst
);
948 tcg_gen_xori_tl(dst
, dst
, 0x1);
952 // !3: !(FCC0 & FCC1)
953 static inline void gen_op_eval_fbo(TCGv dst
, TCGv src
,
954 unsigned int fcc_offset
)
956 TCGv t0
= tcg_temp_new();
957 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
958 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
959 tcg_gen_and_tl(dst
, dst
, t0
);
960 tcg_gen_xori_tl(dst
, dst
, 0x1);
964 static inline void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
965 target_ulong pc2
, TCGv r_cond
)
967 TCGLabel
*l1
= gen_new_label();
969 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
971 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
974 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
977 static void gen_branch_a(DisasContext
*dc
, target_ulong pc1
)
979 TCGLabel
*l1
= gen_new_label();
980 target_ulong npc
= dc
->npc
;
982 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cond
, 0, l1
);
984 gen_goto_tb(dc
, 0, npc
, pc1
);
987 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
989 dc
->base
.is_jmp
= DISAS_NORETURN
;
992 static void gen_branch_n(DisasContext
*dc
, target_ulong pc1
)
994 target_ulong npc
= dc
->npc
;
996 if (likely(npc
!= DYNAMIC_PC
)) {
998 dc
->jump_pc
[0] = pc1
;
999 dc
->jump_pc
[1] = npc
+ 4;
1004 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1006 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1007 t
= tcg_const_tl(pc1
);
1008 z
= tcg_const_tl(0);
1009 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, z
, t
, cpu_npc
);
1013 dc
->pc
= DYNAMIC_PC
;
1017 static inline void gen_generic_branch(DisasContext
*dc
)
1019 TCGv npc0
= tcg_const_tl(dc
->jump_pc
[0]);
1020 TCGv npc1
= tcg_const_tl(dc
->jump_pc
[1]);
1021 TCGv zero
= tcg_const_tl(0);
1023 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1025 tcg_temp_free(npc0
);
1026 tcg_temp_free(npc1
);
1027 tcg_temp_free(zero
);
1030 /* call this function before using the condition register as it may
1031 have been set for a jump */
1032 static inline void flush_cond(DisasContext
*dc
)
1034 if (dc
->npc
== JUMP_PC
) {
1035 gen_generic_branch(dc
);
1036 dc
->npc
= DYNAMIC_PC
;
1040 static inline void save_npc(DisasContext
*dc
)
1042 if (dc
->npc
== JUMP_PC
) {
1043 gen_generic_branch(dc
);
1044 dc
->npc
= DYNAMIC_PC
;
1045 } else if (dc
->npc
!= DYNAMIC_PC
) {
1046 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1050 static inline void update_psr(DisasContext
*dc
)
1052 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1053 dc
->cc_op
= CC_OP_FLAGS
;
1054 gen_helper_compute_psr(cpu_env
);
1058 static inline void save_state(DisasContext
*dc
)
1060 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1064 static void gen_exception(DisasContext
*dc
, int which
)
1069 t
= tcg_const_i32(which
);
1070 gen_helper_raise_exception(cpu_env
, t
);
1071 tcg_temp_free_i32(t
);
1072 dc
->base
.is_jmp
= DISAS_NORETURN
;
1075 static void gen_check_align(TCGv addr
, int mask
)
1077 TCGv_i32 r_mask
= tcg_const_i32(mask
);
1078 gen_helper_check_align(cpu_env
, addr
, r_mask
);
1079 tcg_temp_free_i32(r_mask
);
1082 static inline void gen_mov_pc_npc(DisasContext
*dc
)
1084 if (dc
->npc
== JUMP_PC
) {
1085 gen_generic_branch(dc
);
1086 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1087 dc
->pc
= DYNAMIC_PC
;
1088 } else if (dc
->npc
== DYNAMIC_PC
) {
1089 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1090 dc
->pc
= DYNAMIC_PC
;
1096 static inline void gen_op_next_insn(void)
1098 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1099 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1102 static void free_compare(DisasCompare
*cmp
)
1105 tcg_temp_free(cmp
->c1
);
1108 tcg_temp_free(cmp
->c2
);
1112 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1115 static int subcc_cond
[16] = {
1131 -1, /* no overflow */
1134 static int logic_cond
[16] = {
1136 TCG_COND_EQ
, /* eq: Z */
1137 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1138 TCG_COND_LT
, /* lt: N ^ V -> N */
1139 TCG_COND_EQ
, /* leu: C | Z -> Z */
1140 TCG_COND_NEVER
, /* ltu: C -> 0 */
1141 TCG_COND_LT
, /* neg: N */
1142 TCG_COND_NEVER
, /* vs: V -> 0 */
1144 TCG_COND_NE
, /* ne: !Z */
1145 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1146 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1147 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1148 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1149 TCG_COND_GE
, /* pos: !N */
1150 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1156 #ifdef TARGET_SPARC64
1166 switch (dc
->cc_op
) {
1168 cmp
->cond
= logic_cond
[cond
];
1170 cmp
->is_bool
= false;
1172 cmp
->c2
= tcg_const_tl(0);
1173 #ifdef TARGET_SPARC64
1176 cmp
->c1
= tcg_temp_new();
1177 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1182 cmp
->c1
= cpu_cc_dst
;
1189 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1190 goto do_compare_dst_0
;
1192 case 7: /* overflow */
1193 case 15: /* !overflow */
1197 cmp
->cond
= subcc_cond
[cond
];
1198 cmp
->is_bool
= false;
1199 #ifdef TARGET_SPARC64
1201 /* Note that sign-extension works for unsigned compares as
1202 long as both operands are sign-extended. */
1203 cmp
->g1
= cmp
->g2
= false;
1204 cmp
->c1
= tcg_temp_new();
1205 cmp
->c2
= tcg_temp_new();
1206 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1207 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1211 cmp
->g1
= cmp
->g2
= true;
1212 cmp
->c1
= cpu_cc_src
;
1213 cmp
->c2
= cpu_cc_src2
;
1220 gen_helper_compute_psr(cpu_env
);
1221 dc
->cc_op
= CC_OP_FLAGS
;
1225 /* We're going to generate a boolean result. */
1226 cmp
->cond
= TCG_COND_NE
;
1227 cmp
->is_bool
= true;
1228 cmp
->g1
= cmp
->g2
= false;
1229 cmp
->c1
= r_dst
= tcg_temp_new();
1230 cmp
->c2
= tcg_const_tl(0);
1234 gen_op_eval_bn(r_dst
);
1237 gen_op_eval_be(r_dst
, r_src
);
1240 gen_op_eval_ble(r_dst
, r_src
);
1243 gen_op_eval_bl(r_dst
, r_src
);
1246 gen_op_eval_bleu(r_dst
, r_src
);
1249 gen_op_eval_bcs(r_dst
, r_src
);
1252 gen_op_eval_bneg(r_dst
, r_src
);
1255 gen_op_eval_bvs(r_dst
, r_src
);
1258 gen_op_eval_ba(r_dst
);
1261 gen_op_eval_bne(r_dst
, r_src
);
1264 gen_op_eval_bg(r_dst
, r_src
);
1267 gen_op_eval_bge(r_dst
, r_src
);
1270 gen_op_eval_bgu(r_dst
, r_src
);
1273 gen_op_eval_bcc(r_dst
, r_src
);
1276 gen_op_eval_bpos(r_dst
, r_src
);
1279 gen_op_eval_bvc(r_dst
, r_src
);
1286 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1288 unsigned int offset
;
1291 /* For now we still generate a straight boolean result. */
1292 cmp
->cond
= TCG_COND_NE
;
1293 cmp
->is_bool
= true;
1294 cmp
->g1
= cmp
->g2
= false;
1295 cmp
->c1
= r_dst
= tcg_temp_new();
1296 cmp
->c2
= tcg_const_tl(0);
1316 gen_op_eval_bn(r_dst
);
1319 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1322 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1325 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1328 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1331 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1334 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1337 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1340 gen_op_eval_ba(r_dst
);
1343 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1346 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1349 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1352 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1355 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1358 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1361 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1366 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1370 gen_compare(&cmp
, cc
, cond
, dc
);
1372 /* The interface is to return a boolean in r_dst. */
1374 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1376 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1382 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1385 gen_fcompare(&cmp
, cc
, cond
);
1387 /* The interface is to return a boolean in r_dst. */
1389 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1391 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1397 #ifdef TARGET_SPARC64
1399 static const int gen_tcg_cond_reg
[8] = {
1410 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1412 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1413 cmp
->is_bool
= false;
1417 cmp
->c2
= tcg_const_tl(0);
1420 static inline void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1423 gen_compare_reg(&cmp
, cond
, r_src
);
1425 /* The interface is to return a boolean in r_dst. */
1426 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1432 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1434 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1435 target_ulong target
= dc
->pc
+ offset
;
1437 #ifdef TARGET_SPARC64
1438 if (unlikely(AM_CHECK(dc
))) {
1439 target
&= 0xffffffffULL
;
1443 /* unconditional not taken */
1445 dc
->pc
= dc
->npc
+ 4;
1446 dc
->npc
= dc
->pc
+ 4;
1449 dc
->npc
= dc
->pc
+ 4;
1451 } else if (cond
== 0x8) {
1452 /* unconditional taken */
1455 dc
->npc
= dc
->pc
+ 4;
1459 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1463 gen_cond(cpu_cond
, cc
, cond
, dc
);
1465 gen_branch_a(dc
, target
);
1467 gen_branch_n(dc
, target
);
1472 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1474 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1475 target_ulong target
= dc
->pc
+ offset
;
1477 #ifdef TARGET_SPARC64
1478 if (unlikely(AM_CHECK(dc
))) {
1479 target
&= 0xffffffffULL
;
1483 /* unconditional not taken */
1485 dc
->pc
= dc
->npc
+ 4;
1486 dc
->npc
= dc
->pc
+ 4;
1489 dc
->npc
= dc
->pc
+ 4;
1491 } else if (cond
== 0x8) {
1492 /* unconditional taken */
1495 dc
->npc
= dc
->pc
+ 4;
1499 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1503 gen_fcond(cpu_cond
, cc
, cond
);
1505 gen_branch_a(dc
, target
);
1507 gen_branch_n(dc
, target
);
1512 #ifdef TARGET_SPARC64
1513 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1516 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1517 target_ulong target
= dc
->pc
+ offset
;
1519 if (unlikely(AM_CHECK(dc
))) {
1520 target
&= 0xffffffffULL
;
1523 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1525 gen_branch_a(dc
, target
);
1527 gen_branch_n(dc
, target
);
1531 static inline void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1535 gen_helper_fcmps(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1538 gen_helper_fcmps_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1541 gen_helper_fcmps_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1544 gen_helper_fcmps_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1549 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1553 gen_helper_fcmpd(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1556 gen_helper_fcmpd_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1559 gen_helper_fcmpd_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1562 gen_helper_fcmpd_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1567 static inline void gen_op_fcmpq(int fccno
)
1571 gen_helper_fcmpq(cpu_fsr
, cpu_env
);
1574 gen_helper_fcmpq_fcc1(cpu_fsr
, cpu_env
);
1577 gen_helper_fcmpq_fcc2(cpu_fsr
, cpu_env
);
1580 gen_helper_fcmpq_fcc3(cpu_fsr
, cpu_env
);
1585 static inline void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1589 gen_helper_fcmpes(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1592 gen_helper_fcmpes_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1595 gen_helper_fcmpes_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1598 gen_helper_fcmpes_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1603 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1607 gen_helper_fcmped(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1610 gen_helper_fcmped_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1613 gen_helper_fcmped_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1616 gen_helper_fcmped_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1621 static inline void gen_op_fcmpeq(int fccno
)
1625 gen_helper_fcmpeq(cpu_fsr
, cpu_env
);
1628 gen_helper_fcmpeq_fcc1(cpu_fsr
, cpu_env
);
1631 gen_helper_fcmpeq_fcc2(cpu_fsr
, cpu_env
);
1634 gen_helper_fcmpeq_fcc3(cpu_fsr
, cpu_env
);
1641 static inline void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1643 gen_helper_fcmps(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1646 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1648 gen_helper_fcmpd(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1651 static inline void gen_op_fcmpq(int fccno
)
1653 gen_helper_fcmpq(cpu_fsr
, cpu_env
);
1656 static inline void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1658 gen_helper_fcmpes(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1661 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1663 gen_helper_fcmped(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1666 static inline void gen_op_fcmpeq(int fccno
)
1668 gen_helper_fcmpeq(cpu_fsr
, cpu_env
);
1672 static void gen_op_fpexception_im(DisasContext
*dc
, int fsr_flags
)
1674 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1675 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1676 gen_exception(dc
, TT_FP_EXCP
);
1679 static int gen_trap_ifnofpu(DisasContext
*dc
)
1681 #if !defined(CONFIG_USER_ONLY)
1682 if (!dc
->fpu_enabled
) {
1683 gen_exception(dc
, TT_NFPU_INSN
);
1690 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1692 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1695 static inline void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1696 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1700 src
= gen_load_fpr_F(dc
, rs
);
1701 dst
= gen_dest_fpr_F(dc
);
1703 gen(dst
, cpu_env
, src
);
1704 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1706 gen_store_fpr_F(dc
, rd
, dst
);
1709 static inline void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1710 void (*gen
)(TCGv_i32
, TCGv_i32
))
1714 src
= gen_load_fpr_F(dc
, rs
);
1715 dst
= gen_dest_fpr_F(dc
);
1719 gen_store_fpr_F(dc
, rd
, dst
);
1722 static inline void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1723 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1725 TCGv_i32 dst
, src1
, src2
;
1727 src1
= gen_load_fpr_F(dc
, rs1
);
1728 src2
= gen_load_fpr_F(dc
, rs2
);
1729 dst
= gen_dest_fpr_F(dc
);
1731 gen(dst
, cpu_env
, src1
, src2
);
1732 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1734 gen_store_fpr_F(dc
, rd
, dst
);
1737 #ifdef TARGET_SPARC64
1738 static inline void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1739 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1741 TCGv_i32 dst
, src1
, src2
;
1743 src1
= gen_load_fpr_F(dc
, rs1
);
1744 src2
= gen_load_fpr_F(dc
, rs2
);
1745 dst
= gen_dest_fpr_F(dc
);
1747 gen(dst
, src1
, src2
);
1749 gen_store_fpr_F(dc
, rd
, dst
);
1753 static inline void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1754 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1758 src
= gen_load_fpr_D(dc
, rs
);
1759 dst
= gen_dest_fpr_D(dc
, rd
);
1761 gen(dst
, cpu_env
, src
);
1762 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1764 gen_store_fpr_D(dc
, rd
, dst
);
1767 #ifdef TARGET_SPARC64
1768 static inline void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1769 void (*gen
)(TCGv_i64
, TCGv_i64
))
1773 src
= gen_load_fpr_D(dc
, rs
);
1774 dst
= gen_dest_fpr_D(dc
, rd
);
1778 gen_store_fpr_D(dc
, rd
, dst
);
1782 static inline void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1783 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1785 TCGv_i64 dst
, src1
, src2
;
1787 src1
= gen_load_fpr_D(dc
, rs1
);
1788 src2
= gen_load_fpr_D(dc
, rs2
);
1789 dst
= gen_dest_fpr_D(dc
, rd
);
1791 gen(dst
, cpu_env
, src1
, src2
);
1792 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1794 gen_store_fpr_D(dc
, rd
, dst
);
1797 #ifdef TARGET_SPARC64
1798 static inline void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1799 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1801 TCGv_i64 dst
, src1
, src2
;
1803 src1
= gen_load_fpr_D(dc
, rs1
);
1804 src2
= gen_load_fpr_D(dc
, rs2
);
1805 dst
= gen_dest_fpr_D(dc
, rd
);
1807 gen(dst
, src1
, src2
);
1809 gen_store_fpr_D(dc
, rd
, dst
);
1812 static inline void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1813 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1815 TCGv_i64 dst
, src1
, src2
;
1817 src1
= gen_load_fpr_D(dc
, rs1
);
1818 src2
= gen_load_fpr_D(dc
, rs2
);
1819 dst
= gen_dest_fpr_D(dc
, rd
);
1821 gen(dst
, cpu_gsr
, src1
, src2
);
1823 gen_store_fpr_D(dc
, rd
, dst
);
1826 static inline void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1827 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1829 TCGv_i64 dst
, src0
, src1
, src2
;
1831 src1
= gen_load_fpr_D(dc
, rs1
);
1832 src2
= gen_load_fpr_D(dc
, rs2
);
1833 src0
= gen_load_fpr_D(dc
, rd
);
1834 dst
= gen_dest_fpr_D(dc
, rd
);
1836 gen(dst
, src0
, src1
, src2
);
1838 gen_store_fpr_D(dc
, rd
, dst
);
1842 static inline void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1843 void (*gen
)(TCGv_ptr
))
1845 gen_op_load_fpr_QT1(QFPREG(rs
));
1848 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1850 gen_op_store_QT0_fpr(QFPREG(rd
));
1851 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1854 #ifdef TARGET_SPARC64
1855 static inline void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1856 void (*gen
)(TCGv_ptr
))
1858 gen_op_load_fpr_QT1(QFPREG(rs
));
1862 gen_op_store_QT0_fpr(QFPREG(rd
));
1863 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1867 static inline void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1868 void (*gen
)(TCGv_ptr
))
1870 gen_op_load_fpr_QT0(QFPREG(rs1
));
1871 gen_op_load_fpr_QT1(QFPREG(rs2
));
1874 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1876 gen_op_store_QT0_fpr(QFPREG(rd
));
1877 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1880 static inline void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1881 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1884 TCGv_i32 src1
, src2
;
1886 src1
= gen_load_fpr_F(dc
, rs1
);
1887 src2
= gen_load_fpr_F(dc
, rs2
);
1888 dst
= gen_dest_fpr_D(dc
, rd
);
1890 gen(dst
, cpu_env
, src1
, src2
);
1891 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1893 gen_store_fpr_D(dc
, rd
, dst
);
1896 static inline void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1897 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1899 TCGv_i64 src1
, src2
;
1901 src1
= gen_load_fpr_D(dc
, rs1
);
1902 src2
= gen_load_fpr_D(dc
, rs2
);
1904 gen(cpu_env
, src1
, src2
);
1905 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1907 gen_op_store_QT0_fpr(QFPREG(rd
));
1908 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1911 #ifdef TARGET_SPARC64
1912 static inline void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1913 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1918 src
= gen_load_fpr_F(dc
, rs
);
1919 dst
= gen_dest_fpr_D(dc
, rd
);
1921 gen(dst
, cpu_env
, src
);
1922 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1924 gen_store_fpr_D(dc
, rd
, dst
);
1928 static inline void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1929 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1934 src
= gen_load_fpr_F(dc
, rs
);
1935 dst
= gen_dest_fpr_D(dc
, rd
);
1937 gen(dst
, cpu_env
, src
);
1939 gen_store_fpr_D(dc
, rd
, dst
);
1942 static inline void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1943 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1948 src
= gen_load_fpr_D(dc
, rs
);
1949 dst
= gen_dest_fpr_F(dc
);
1951 gen(dst
, cpu_env
, src
);
1952 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1954 gen_store_fpr_F(dc
, rd
, dst
);
1957 static inline void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1958 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1962 gen_op_load_fpr_QT1(QFPREG(rs
));
1963 dst
= gen_dest_fpr_F(dc
);
1966 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1968 gen_store_fpr_F(dc
, rd
, dst
);
1971 static inline void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1972 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1976 gen_op_load_fpr_QT1(QFPREG(rs
));
1977 dst
= gen_dest_fpr_D(dc
, rd
);
1980 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1982 gen_store_fpr_D(dc
, rd
, dst
);
1985 static inline void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1986 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1990 src
= gen_load_fpr_F(dc
, rs
);
1994 gen_op_store_QT0_fpr(QFPREG(rd
));
1995 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1998 static inline void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1999 void (*gen
)(TCGv_ptr
, TCGv_i64
))
2003 src
= gen_load_fpr_D(dc
, rs
);
2007 gen_op_store_QT0_fpr(QFPREG(rd
));
2008 gen_update_fprs_dirty(dc
, QFPREG(rd
));
2011 static void gen_swap(DisasContext
*dc
, TCGv dst
, TCGv src
,
2012 TCGv addr
, int mmu_idx
, MemOp memop
)
2014 gen_address_mask(dc
, addr
);
2015 tcg_gen_atomic_xchg_tl(dst
, addr
, src
, mmu_idx
, memop
);
2018 static void gen_ldstub(DisasContext
*dc
, TCGv dst
, TCGv addr
, int mmu_idx
)
2020 TCGv m1
= tcg_const_tl(0xff);
2021 gen_address_mask(dc
, addr
);
2022 tcg_gen_atomic_xchg_tl(dst
, addr
, m1
, mmu_idx
, MO_UB
);
2027 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2046 static DisasASI
get_asi(DisasContext
*dc
, int insn
, MemOp memop
)
2048 int asi
= GET_FIELD(insn
, 19, 26);
2049 ASIType type
= GET_ASI_HELPER
;
2050 int mem_idx
= dc
->mem_idx
;
2052 #ifndef TARGET_SPARC64
2053 /* Before v9, all asis are immediate and privileged. */
2055 gen_exception(dc
, TT_ILL_INSN
);
2056 type
= GET_ASI_EXCP
;
2057 } else if (supervisor(dc
)
2058 /* Note that LEON accepts ASI_USERDATA in user mode, for
2059 use with CASA. Also note that previous versions of
2060 QEMU allowed (and old versions of gcc emitted) ASI_P
2061 for LEON, which is incorrect. */
2062 || (asi
== ASI_USERDATA
2063 && (dc
->def
->features
& CPU_FEATURE_CASA
))) {
2065 case ASI_USERDATA
: /* User data access */
2066 mem_idx
= MMU_USER_IDX
;
2067 type
= GET_ASI_DIRECT
;
2069 case ASI_KERNELDATA
: /* Supervisor data access */
2070 mem_idx
= MMU_KERNEL_IDX
;
2071 type
= GET_ASI_DIRECT
;
2073 case ASI_M_BYPASS
: /* MMU passthrough */
2074 case ASI_LEON_BYPASS
: /* LEON MMU passthrough */
2075 mem_idx
= MMU_PHYS_IDX
;
2076 type
= GET_ASI_DIRECT
;
2078 case ASI_M_BCOPY
: /* Block copy, sta access */
2079 mem_idx
= MMU_KERNEL_IDX
;
2080 type
= GET_ASI_BCOPY
;
2082 case ASI_M_BFILL
: /* Block fill, stda access */
2083 mem_idx
= MMU_KERNEL_IDX
;
2084 type
= GET_ASI_BFILL
;
2088 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
2089 * permissions check in get_physical_address(..).
2091 mem_idx
= (dc
->mem_idx
== MMU_PHYS_IDX
) ? MMU_PHYS_IDX
: mem_idx
;
2093 gen_exception(dc
, TT_PRIV_INSN
);
2094 type
= GET_ASI_EXCP
;
2100 /* With v9, all asis below 0x80 are privileged. */
2101 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
2102 down that bit into DisasContext. For the moment that's ok,
2103 since the direct implementations below doesn't have any ASIs
2104 in the restricted [0x30, 0x7f] range, and the check will be
2105 done properly in the helper. */
2106 if (!supervisor(dc
) && asi
< 0x80) {
2107 gen_exception(dc
, TT_PRIV_ACT
);
2108 type
= GET_ASI_EXCP
;
2111 case ASI_REAL
: /* Bypass */
2112 case ASI_REAL_IO
: /* Bypass, non-cacheable */
2113 case ASI_REAL_L
: /* Bypass LE */
2114 case ASI_REAL_IO_L
: /* Bypass, non-cacheable LE */
2115 case ASI_TWINX_REAL
: /* Real address, twinx */
2116 case ASI_TWINX_REAL_L
: /* Real address, twinx, LE */
2117 case ASI_QUAD_LDD_PHYS
:
2118 case ASI_QUAD_LDD_PHYS_L
:
2119 mem_idx
= MMU_PHYS_IDX
;
2121 case ASI_N
: /* Nucleus */
2122 case ASI_NL
: /* Nucleus LE */
2125 case ASI_NUCLEUS_QUAD_LDD
:
2126 case ASI_NUCLEUS_QUAD_LDD_L
:
2127 if (hypervisor(dc
)) {
2128 mem_idx
= MMU_PHYS_IDX
;
2130 mem_idx
= MMU_NUCLEUS_IDX
;
2133 case ASI_AIUP
: /* As if user primary */
2134 case ASI_AIUPL
: /* As if user primary LE */
2135 case ASI_TWINX_AIUP
:
2136 case ASI_TWINX_AIUP_L
:
2137 case ASI_BLK_AIUP_4V
:
2138 case ASI_BLK_AIUP_L_4V
:
2141 mem_idx
= MMU_USER_IDX
;
2143 case ASI_AIUS
: /* As if user secondary */
2144 case ASI_AIUSL
: /* As if user secondary LE */
2145 case ASI_TWINX_AIUS
:
2146 case ASI_TWINX_AIUS_L
:
2147 case ASI_BLK_AIUS_4V
:
2148 case ASI_BLK_AIUS_L_4V
:
2151 mem_idx
= MMU_USER_SECONDARY_IDX
;
2153 case ASI_S
: /* Secondary */
2154 case ASI_SL
: /* Secondary LE */
2157 case ASI_BLK_COMMIT_S
:
2164 if (mem_idx
== MMU_USER_IDX
) {
2165 mem_idx
= MMU_USER_SECONDARY_IDX
;
2166 } else if (mem_idx
== MMU_KERNEL_IDX
) {
2167 mem_idx
= MMU_KERNEL_SECONDARY_IDX
;
2170 case ASI_P
: /* Primary */
2171 case ASI_PL
: /* Primary LE */
2174 case ASI_BLK_COMMIT_P
:
2198 type
= GET_ASI_DIRECT
;
2200 case ASI_TWINX_REAL
:
2201 case ASI_TWINX_REAL_L
:
2204 case ASI_TWINX_AIUP
:
2205 case ASI_TWINX_AIUP_L
:
2206 case ASI_TWINX_AIUS
:
2207 case ASI_TWINX_AIUS_L
:
2212 case ASI_QUAD_LDD_PHYS
:
2213 case ASI_QUAD_LDD_PHYS_L
:
2214 case ASI_NUCLEUS_QUAD_LDD
:
2215 case ASI_NUCLEUS_QUAD_LDD_L
:
2216 type
= GET_ASI_DTWINX
;
2218 case ASI_BLK_COMMIT_P
:
2219 case ASI_BLK_COMMIT_S
:
2220 case ASI_BLK_AIUP_4V
:
2221 case ASI_BLK_AIUP_L_4V
:
2224 case ASI_BLK_AIUS_4V
:
2225 case ASI_BLK_AIUS_L_4V
:
2232 type
= GET_ASI_BLOCK
;
2239 type
= GET_ASI_SHORT
;
2246 type
= GET_ASI_SHORT
;
2249 /* The little-endian asis all have bit 3 set. */
2256 return (DisasASI
){ type
, asi
, mem_idx
, memop
};
2259 static void gen_ld_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
,
2260 int insn
, MemOp memop
)
2262 DisasASI da
= get_asi(dc
, insn
, memop
);
2267 case GET_ASI_DTWINX
: /* Reserved for ldda. */
2268 gen_exception(dc
, TT_ILL_INSN
);
2270 case GET_ASI_DIRECT
:
2271 gen_address_mask(dc
, addr
);
2272 tcg_gen_qemu_ld_tl(dst
, addr
, da
.mem_idx
, da
.memop
);
2276 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2277 TCGv_i32 r_mop
= tcg_const_i32(memop
);
2280 #ifdef TARGET_SPARC64
2281 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_mop
);
2284 TCGv_i64 t64
= tcg_temp_new_i64();
2285 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_mop
);
2286 tcg_gen_trunc_i64_tl(dst
, t64
);
2287 tcg_temp_free_i64(t64
);
2290 tcg_temp_free_i32(r_mop
);
2291 tcg_temp_free_i32(r_asi
);
2297 static void gen_st_asi(DisasContext
*dc
, TCGv src
, TCGv addr
,
2298 int insn
, MemOp memop
)
2300 DisasASI da
= get_asi(dc
, insn
, memop
);
2305 case GET_ASI_DTWINX
: /* Reserved for stda. */
2306 #ifndef TARGET_SPARC64
2307 gen_exception(dc
, TT_ILL_INSN
);
2310 if (!(dc
->def
->features
& CPU_FEATURE_HYPV
)) {
2311 /* Pre OpenSPARC CPUs don't have these */
2312 gen_exception(dc
, TT_ILL_INSN
);
2315 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2316 * are ST_BLKINIT_ ASIs */
2319 case GET_ASI_DIRECT
:
2320 gen_address_mask(dc
, addr
);
2321 tcg_gen_qemu_st_tl(src
, addr
, da
.mem_idx
, da
.memop
);
2323 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2325 /* Copy 32 bytes from the address in SRC to ADDR. */
2326 /* ??? The original qemu code suggests 4-byte alignment, dropping
2327 the low bits, but the only place I can see this used is in the
2328 Linux kernel with 32 byte alignment, which would make more sense
2329 as a cacheline-style operation. */
2331 TCGv saddr
= tcg_temp_new();
2332 TCGv daddr
= tcg_temp_new();
2333 TCGv four
= tcg_const_tl(4);
2334 TCGv_i32 tmp
= tcg_temp_new_i32();
2337 tcg_gen_andi_tl(saddr
, src
, -4);
2338 tcg_gen_andi_tl(daddr
, addr
, -4);
2339 for (i
= 0; i
< 32; i
+= 4) {
2340 /* Since the loads and stores are paired, allow the
2341 copy to happen in the host endianness. */
2342 tcg_gen_qemu_ld_i32(tmp
, saddr
, da
.mem_idx
, MO_UL
);
2343 tcg_gen_qemu_st_i32(tmp
, daddr
, da
.mem_idx
, MO_UL
);
2344 tcg_gen_add_tl(saddr
, saddr
, four
);
2345 tcg_gen_add_tl(daddr
, daddr
, four
);
2348 tcg_temp_free(saddr
);
2349 tcg_temp_free(daddr
);
2350 tcg_temp_free(four
);
2351 tcg_temp_free_i32(tmp
);
2357 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2358 TCGv_i32 r_mop
= tcg_const_i32(memop
& MO_SIZE
);
2361 #ifdef TARGET_SPARC64
2362 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_mop
);
2365 TCGv_i64 t64
= tcg_temp_new_i64();
2366 tcg_gen_extu_tl_i64(t64
, src
);
2367 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_mop
);
2368 tcg_temp_free_i64(t64
);
2371 tcg_temp_free_i32(r_mop
);
2372 tcg_temp_free_i32(r_asi
);
2374 /* A write to a TLB register may alter page maps. End the TB. */
2375 dc
->npc
= DYNAMIC_PC
;
2381 static void gen_swap_asi(DisasContext
*dc
, TCGv dst
, TCGv src
,
2382 TCGv addr
, int insn
)
2384 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2389 case GET_ASI_DIRECT
:
2390 gen_swap(dc
, dst
, src
, addr
, da
.mem_idx
, da
.memop
);
2393 /* ??? Should be DAE_invalid_asi. */
2394 gen_exception(dc
, TT_DATA_ACCESS
);
2399 static void gen_cas_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2402 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2408 case GET_ASI_DIRECT
:
2409 oldv
= tcg_temp_new();
2410 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2411 da
.mem_idx
, da
.memop
);
2412 gen_store_gpr(dc
, rd
, oldv
);
2413 tcg_temp_free(oldv
);
2416 /* ??? Should be DAE_invalid_asi. */
2417 gen_exception(dc
, TT_DATA_ACCESS
);
2422 static void gen_ldstub_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
, int insn
)
2424 DisasASI da
= get_asi(dc
, insn
, MO_UB
);
2429 case GET_ASI_DIRECT
:
2430 gen_ldstub(dc
, dst
, addr
, da
.mem_idx
);
2433 /* ??? In theory, this should be raise DAE_invalid_asi.
2434 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2435 if (tb_cflags(dc
->base
.tb
) & CF_PARALLEL
) {
2436 gen_helper_exit_atomic(cpu_env
);
2438 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2439 TCGv_i32 r_mop
= tcg_const_i32(MO_UB
);
2443 t64
= tcg_temp_new_i64();
2444 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_mop
);
2446 s64
= tcg_const_i64(0xff);
2447 gen_helper_st_asi(cpu_env
, addr
, s64
, r_asi
, r_mop
);
2448 tcg_temp_free_i64(s64
);
2449 tcg_temp_free_i32(r_mop
);
2450 tcg_temp_free_i32(r_asi
);
2452 tcg_gen_trunc_i64_tl(dst
, t64
);
2453 tcg_temp_free_i64(t64
);
2456 dc
->npc
= DYNAMIC_PC
;
2463 #ifdef TARGET_SPARC64
2464 static void gen_ldf_asi(DisasContext
*dc
, TCGv addr
,
2465 int insn
, int size
, int rd
)
2467 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2475 case GET_ASI_DIRECT
:
2476 gen_address_mask(dc
, addr
);
2479 d32
= gen_dest_fpr_F(dc
);
2480 tcg_gen_qemu_ld_i32(d32
, addr
, da
.mem_idx
, da
.memop
);
2481 gen_store_fpr_F(dc
, rd
, d32
);
2484 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2485 da
.memop
| MO_ALIGN_4
);
2488 d64
= tcg_temp_new_i64();
2489 tcg_gen_qemu_ld_i64(d64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_4
);
2490 tcg_gen_addi_tl(addr
, addr
, 8);
2491 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
,
2492 da
.memop
| MO_ALIGN_4
);
2493 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2494 tcg_temp_free_i64(d64
);
2497 g_assert_not_reached();
2502 /* Valid for lddfa on aligned registers only. */
2503 if (size
== 8 && (rd
& 7) == 0) {
2508 gen_address_mask(dc
, addr
);
2510 /* The first operation checks required alignment. */
2511 memop
= da
.memop
| MO_ALIGN_64
;
2512 eight
= tcg_const_tl(8);
2513 for (i
= 0; ; ++i
) {
2514 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2519 tcg_gen_add_tl(addr
, addr
, eight
);
2522 tcg_temp_free(eight
);
2524 gen_exception(dc
, TT_ILL_INSN
);
2529 /* Valid for lddfa only. */
2531 gen_address_mask(dc
, addr
);
2532 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
, da
.memop
);
2534 gen_exception(dc
, TT_ILL_INSN
);
2540 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2541 TCGv_i32 r_mop
= tcg_const_i32(da
.memop
);
2544 /* According to the table in the UA2011 manual, the only
2545 other asis that are valid for ldfa/lddfa/ldqfa are
2546 the NO_FAULT asis. We still need a helper for these,
2547 but we can just use the integer asi helper for them. */
2550 d64
= tcg_temp_new_i64();
2551 gen_helper_ld_asi(d64
, cpu_env
, addr
, r_asi
, r_mop
);
2552 d32
= gen_dest_fpr_F(dc
);
2553 tcg_gen_extrl_i64_i32(d32
, d64
);
2554 tcg_temp_free_i64(d64
);
2555 gen_store_fpr_F(dc
, rd
, d32
);
2558 gen_helper_ld_asi(cpu_fpr
[rd
/ 2], cpu_env
, addr
, r_asi
, r_mop
);
2561 d64
= tcg_temp_new_i64();
2562 gen_helper_ld_asi(d64
, cpu_env
, addr
, r_asi
, r_mop
);
2563 tcg_gen_addi_tl(addr
, addr
, 8);
2564 gen_helper_ld_asi(cpu_fpr
[rd
/2+1], cpu_env
, addr
, r_asi
, r_mop
);
2565 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2566 tcg_temp_free_i64(d64
);
2569 g_assert_not_reached();
2571 tcg_temp_free_i32(r_mop
);
2572 tcg_temp_free_i32(r_asi
);
2578 static void gen_stf_asi(DisasContext
*dc
, TCGv addr
,
2579 int insn
, int size
, int rd
)
2581 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2588 case GET_ASI_DIRECT
:
2589 gen_address_mask(dc
, addr
);
2592 d32
= gen_load_fpr_F(dc
, rd
);
2593 tcg_gen_qemu_st_i32(d32
, addr
, da
.mem_idx
, da
.memop
);
2596 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2597 da
.memop
| MO_ALIGN_4
);
2600 /* Only 4-byte alignment required. However, it is legal for the
2601 cpu to signal the alignment fault, and the OS trap handler is
2602 required to fix it up. Requiring 16-byte alignment here avoids
2603 having to probe the second page before performing the first
2605 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2606 da
.memop
| MO_ALIGN_16
);
2607 tcg_gen_addi_tl(addr
, addr
, 8);
2608 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
, da
.memop
);
2611 g_assert_not_reached();
2616 /* Valid for stdfa on aligned registers only. */
2617 if (size
== 8 && (rd
& 7) == 0) {
2622 gen_address_mask(dc
, addr
);
2624 /* The first operation checks required alignment. */
2625 memop
= da
.memop
| MO_ALIGN_64
;
2626 eight
= tcg_const_tl(8);
2627 for (i
= 0; ; ++i
) {
2628 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2633 tcg_gen_add_tl(addr
, addr
, eight
);
2636 tcg_temp_free(eight
);
2638 gen_exception(dc
, TT_ILL_INSN
);
2643 /* Valid for stdfa only. */
2645 gen_address_mask(dc
, addr
);
2646 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
, da
.memop
);
2648 gen_exception(dc
, TT_ILL_INSN
);
2653 /* According to the table in the UA2011 manual, the only
2654 other asis that are valid for ldfa/lddfa/ldqfa are
2655 the PST* asis, which aren't currently handled. */
2656 gen_exception(dc
, TT_ILL_INSN
);
2661 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2663 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2664 TCGv_i64 hi
= gen_dest_gpr(dc
, rd
);
2665 TCGv_i64 lo
= gen_dest_gpr(dc
, rd
+ 1);
2671 case GET_ASI_DTWINX
:
2672 gen_address_mask(dc
, addr
);
2673 tcg_gen_qemu_ld_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2674 tcg_gen_addi_tl(addr
, addr
, 8);
2675 tcg_gen_qemu_ld_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2678 case GET_ASI_DIRECT
:
2680 TCGv_i64 tmp
= tcg_temp_new_i64();
2682 gen_address_mask(dc
, addr
);
2683 tcg_gen_qemu_ld_i64(tmp
, addr
, da
.mem_idx
, da
.memop
);
2685 /* Note that LE ldda acts as if each 32-bit register
2686 result is byte swapped. Having just performed one
2687 64-bit bswap, we need now to swap the writebacks. */
2688 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2689 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2691 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2693 tcg_temp_free_i64(tmp
);
2698 /* ??? In theory we've handled all of the ASIs that are valid
2699 for ldda, and this should raise DAE_invalid_asi. However,
2700 real hardware allows others. This can be seen with e.g.
2701 FreeBSD 10.3 wrt ASI_IC_TAG. */
2703 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2704 TCGv_i32 r_mop
= tcg_const_i32(da
.memop
);
2705 TCGv_i64 tmp
= tcg_temp_new_i64();
2708 gen_helper_ld_asi(tmp
, cpu_env
, addr
, r_asi
, r_mop
);
2709 tcg_temp_free_i32(r_asi
);
2710 tcg_temp_free_i32(r_mop
);
2713 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2714 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2716 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2718 tcg_temp_free_i64(tmp
);
2723 gen_store_gpr(dc
, rd
, hi
);
2724 gen_store_gpr(dc
, rd
+ 1, lo
);
2727 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2730 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2731 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2737 case GET_ASI_DTWINX
:
2738 gen_address_mask(dc
, addr
);
2739 tcg_gen_qemu_st_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2740 tcg_gen_addi_tl(addr
, addr
, 8);
2741 tcg_gen_qemu_st_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2744 case GET_ASI_DIRECT
:
2746 TCGv_i64 t64
= tcg_temp_new_i64();
2748 /* Note that LE stda acts as if each 32-bit register result is
2749 byte swapped. We will perform one 64-bit LE store, so now
2750 we must swap the order of the construction. */
2751 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2752 tcg_gen_concat32_i64(t64
, lo
, hi
);
2754 tcg_gen_concat32_i64(t64
, hi
, lo
);
2756 gen_address_mask(dc
, addr
);
2757 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
);
2758 tcg_temp_free_i64(t64
);
2763 /* ??? In theory we've handled all of the ASIs that are valid
2764 for stda, and this should raise DAE_invalid_asi. */
2766 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2767 TCGv_i32 r_mop
= tcg_const_i32(da
.memop
);
2768 TCGv_i64 t64
= tcg_temp_new_i64();
2771 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2772 tcg_gen_concat32_i64(t64
, lo
, hi
);
2774 tcg_gen_concat32_i64(t64
, hi
, lo
);
2778 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_mop
);
2779 tcg_temp_free_i32(r_mop
);
2780 tcg_temp_free_i32(r_asi
);
2781 tcg_temp_free_i64(t64
);
2787 static void gen_casx_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2790 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2796 case GET_ASI_DIRECT
:
2797 oldv
= tcg_temp_new();
2798 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2799 da
.mem_idx
, da
.memop
);
2800 gen_store_gpr(dc
, rd
, oldv
);
2801 tcg_temp_free(oldv
);
2804 /* ??? Should be DAE_invalid_asi. */
2805 gen_exception(dc
, TT_DATA_ACCESS
);
2810 #elif !defined(CONFIG_USER_ONLY)
2811 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2813 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2814 whereby "rd + 1" elicits "error: array subscript is above array".
2815 Since we have already asserted that rd is even, the semantics
2817 TCGv lo
= gen_dest_gpr(dc
, rd
| 1);
2818 TCGv hi
= gen_dest_gpr(dc
, rd
);
2819 TCGv_i64 t64
= tcg_temp_new_i64();
2820 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2824 tcg_temp_free_i64(t64
);
2826 case GET_ASI_DIRECT
:
2827 gen_address_mask(dc
, addr
);
2828 tcg_gen_qemu_ld_i64(t64
, addr
, da
.mem_idx
, da
.memop
);
2832 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2833 TCGv_i32 r_mop
= tcg_const_i32(MO_UQ
);
2836 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_mop
);
2837 tcg_temp_free_i32(r_mop
);
2838 tcg_temp_free_i32(r_asi
);
2843 tcg_gen_extr_i64_i32(lo
, hi
, t64
);
2844 tcg_temp_free_i64(t64
);
2845 gen_store_gpr(dc
, rd
| 1, lo
);
2846 gen_store_gpr(dc
, rd
, hi
);
2849 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2852 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2853 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2854 TCGv_i64 t64
= tcg_temp_new_i64();
2856 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2861 case GET_ASI_DIRECT
:
2862 gen_address_mask(dc
, addr
);
2863 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
);
2866 /* Store 32 bytes of T64 to ADDR. */
2867 /* ??? The original qemu code suggests 8-byte alignment, dropping
2868 the low bits, but the only place I can see this used is in the
2869 Linux kernel with 32 byte alignment, which would make more sense
2870 as a cacheline-style operation. */
2872 TCGv d_addr
= tcg_temp_new();
2873 TCGv eight
= tcg_const_tl(8);
2876 tcg_gen_andi_tl(d_addr
, addr
, -8);
2877 for (i
= 0; i
< 32; i
+= 8) {
2878 tcg_gen_qemu_st_i64(t64
, d_addr
, da
.mem_idx
, da
.memop
);
2879 tcg_gen_add_tl(d_addr
, d_addr
, eight
);
2882 tcg_temp_free(d_addr
);
2883 tcg_temp_free(eight
);
2888 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2889 TCGv_i32 r_mop
= tcg_const_i32(MO_UQ
);
2892 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_mop
);
2893 tcg_temp_free_i32(r_mop
);
2894 tcg_temp_free_i32(r_asi
);
2899 tcg_temp_free_i64(t64
);
2903 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2905 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2906 return gen_load_gpr(dc
, rs1
);
2909 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2911 if (IS_IMM
) { /* immediate */
2912 target_long simm
= GET_FIELDs(insn
, 19, 31);
2913 TCGv t
= get_temp_tl(dc
);
2914 tcg_gen_movi_tl(t
, simm
);
2916 } else { /* register */
2917 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2918 return gen_load_gpr(dc
, rs2
);
2922 #ifdef TARGET_SPARC64
2923 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2925 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2927 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2928 or fold the comparison down to 32 bits and use movcond_i32. Choose
2930 c32
= tcg_temp_new_i32();
2932 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2934 TCGv_i64 c64
= tcg_temp_new_i64();
2935 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2936 tcg_gen_extrl_i64_i32(c32
, c64
);
2937 tcg_temp_free_i64(c64
);
2940 s1
= gen_load_fpr_F(dc
, rs
);
2941 s2
= gen_load_fpr_F(dc
, rd
);
2942 dst
= gen_dest_fpr_F(dc
);
2943 zero
= tcg_const_i32(0);
2945 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2947 tcg_temp_free_i32(c32
);
2948 tcg_temp_free_i32(zero
);
2949 gen_store_fpr_F(dc
, rd
, dst
);
2952 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2954 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2955 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2956 gen_load_fpr_D(dc
, rs
),
2957 gen_load_fpr_D(dc
, rd
));
2958 gen_store_fpr_D(dc
, rd
, dst
);
2961 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2963 int qd
= QFPREG(rd
);
2964 int qs
= QFPREG(rs
);
2966 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2967 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2968 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2969 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2971 gen_update_fprs_dirty(dc
, qd
);
2974 #ifndef CONFIG_USER_ONLY
2975 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_env cpu_env
)
2977 TCGv_i32 r_tl
= tcg_temp_new_i32();
2979 /* load env->tl into r_tl */
2980 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2982 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2983 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2985 /* calculate offset to current trap state from env->ts, reuse r_tl */
2986 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2987 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2989 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2991 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2992 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2993 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2994 tcg_temp_free_ptr(r_tl_tmp
);
2997 tcg_temp_free_i32(r_tl
);
3001 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
3002 int width
, bool cc
, bool left
)
3004 TCGv lo1
, lo2
, t1
, t2
;
3005 uint64_t amask
, tabl
, tabr
;
3006 int shift
, imask
, omask
;
3009 tcg_gen_mov_tl(cpu_cc_src
, s1
);
3010 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
3011 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
3012 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3013 dc
->cc_op
= CC_OP_SUB
;
3016 /* Theory of operation: there are two tables, left and right (not to
3017 be confused with the left and right versions of the opcode). These
3018 are indexed by the low 3 bits of the inputs. To make things "easy",
3019 these tables are loaded into two constants, TABL and TABR below.
3020 The operation index = (input & imask) << shift calculates the index
3021 into the constant, while val = (table >> index) & omask calculates
3022 the value we're looking for. */
3029 tabl
= 0x80c0e0f0f8fcfeffULL
;
3030 tabr
= 0xff7f3f1f0f070301ULL
;
3032 tabl
= 0x0103070f1f3f7fffULL
;
3033 tabr
= 0xfffefcf8f0e0c080ULL
;
3053 tabl
= (2 << 2) | 3;
3054 tabr
= (3 << 2) | 1;
3056 tabl
= (1 << 2) | 3;
3057 tabr
= (3 << 2) | 2;
3064 lo1
= tcg_temp_new();
3065 lo2
= tcg_temp_new();
3066 tcg_gen_andi_tl(lo1
, s1
, imask
);
3067 tcg_gen_andi_tl(lo2
, s2
, imask
);
3068 tcg_gen_shli_tl(lo1
, lo1
, shift
);
3069 tcg_gen_shli_tl(lo2
, lo2
, shift
);
3071 t1
= tcg_const_tl(tabl
);
3072 t2
= tcg_const_tl(tabr
);
3073 tcg_gen_shr_tl(lo1
, t1
, lo1
);
3074 tcg_gen_shr_tl(lo2
, t2
, lo2
);
3075 tcg_gen_andi_tl(dst
, lo1
, omask
);
3076 tcg_gen_andi_tl(lo2
, lo2
, omask
);
3080 amask
&= 0xffffffffULL
;
3082 tcg_gen_andi_tl(s1
, s1
, amask
);
3083 tcg_gen_andi_tl(s2
, s2
, amask
);
3085 /* We want to compute
3086 dst = (s1 == s2 ? lo1 : lo1 & lo2).
3087 We've already done dst = lo1, so this reduces to
3088 dst &= (s1 == s2 ? -1 : lo2)
3093 tcg_gen_setcond_tl(TCG_COND_EQ
, t1
, s1
, s2
);
3094 tcg_gen_neg_tl(t1
, t1
);
3095 tcg_gen_or_tl(lo2
, lo2
, t1
);
3096 tcg_gen_and_tl(dst
, dst
, lo2
);
3104 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
3106 TCGv tmp
= tcg_temp_new();
3108 tcg_gen_add_tl(tmp
, s1
, s2
);
3109 tcg_gen_andi_tl(dst
, tmp
, -8);
3111 tcg_gen_neg_tl(tmp
, tmp
);
3113 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
3118 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
3122 t1
= tcg_temp_new();
3123 t2
= tcg_temp_new();
3124 shift
= tcg_temp_new();
3126 tcg_gen_andi_tl(shift
, gsr
, 7);
3127 tcg_gen_shli_tl(shift
, shift
, 3);
3128 tcg_gen_shl_tl(t1
, s1
, shift
);
3130 /* A shift of 64 does not produce 0 in TCG. Divide this into a
3131 shift of (up to 63) followed by a constant shift of 1. */
3132 tcg_gen_xori_tl(shift
, shift
, 63);
3133 tcg_gen_shr_tl(t2
, s2
, shift
);
3134 tcg_gen_shri_tl(t2
, t2
, 1);
3136 tcg_gen_or_tl(dst
, t1
, t2
);
3140 tcg_temp_free(shift
);
3144 #define CHECK_IU_FEATURE(dc, FEATURE) \
3145 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3147 #define CHECK_FPU_FEATURE(dc, FEATURE) \
3148 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3151 /* before an instruction, dc->pc must be static */
3152 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
3154 unsigned int opc
, rs1
, rs2
, rd
;
3155 TCGv cpu_src1
, cpu_src2
;
3156 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
3157 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
3160 opc
= GET_FIELD(insn
, 0, 1);
3161 rd
= GET_FIELD(insn
, 2, 6);
3164 case 0: /* branches/sethi */
3166 unsigned int xop
= GET_FIELD(insn
, 7, 9);
3169 #ifdef TARGET_SPARC64
3170 case 0x1: /* V9 BPcc */
3174 target
= GET_FIELD_SP(insn
, 0, 18);
3175 target
= sign_extend(target
, 19);
3177 cc
= GET_FIELD_SP(insn
, 20, 21);
3179 do_branch(dc
, target
, insn
, 0);
3181 do_branch(dc
, target
, insn
, 1);
3186 case 0x3: /* V9 BPr */
3188 target
= GET_FIELD_SP(insn
, 0, 13) |
3189 (GET_FIELD_SP(insn
, 20, 21) << 14);
3190 target
= sign_extend(target
, 16);
3192 cpu_src1
= get_src1(dc
, insn
);
3193 do_branch_reg(dc
, target
, insn
, cpu_src1
);
3196 case 0x5: /* V9 FBPcc */
3198 int cc
= GET_FIELD_SP(insn
, 20, 21);
3199 if (gen_trap_ifnofpu(dc
)) {
3202 target
= GET_FIELD_SP(insn
, 0, 18);
3203 target
= sign_extend(target
, 19);
3205 do_fbranch(dc
, target
, insn
, cc
);
3209 case 0x7: /* CBN+x */
3214 case 0x2: /* BN+x */
3216 target
= GET_FIELD(insn
, 10, 31);
3217 target
= sign_extend(target
, 22);
3219 do_branch(dc
, target
, insn
, 0);
3222 case 0x6: /* FBN+x */
3224 if (gen_trap_ifnofpu(dc
)) {
3227 target
= GET_FIELD(insn
, 10, 31);
3228 target
= sign_extend(target
, 22);
3230 do_fbranch(dc
, target
, insn
, 0);
3233 case 0x4: /* SETHI */
3234 /* Special-case %g0 because that's the canonical nop. */
3236 uint32_t value
= GET_FIELD(insn
, 10, 31);
3237 TCGv t
= gen_dest_gpr(dc
, rd
);
3238 tcg_gen_movi_tl(t
, value
<< 10);
3239 gen_store_gpr(dc
, rd
, t
);
3242 case 0x0: /* UNIMPL */
3251 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
3252 TCGv o7
= gen_dest_gpr(dc
, 15);
3254 tcg_gen_movi_tl(o7
, dc
->pc
);
3255 gen_store_gpr(dc
, 15, o7
);
3258 #ifdef TARGET_SPARC64
3259 if (unlikely(AM_CHECK(dc
))) {
3260 target
&= 0xffffffffULL
;
3266 case 2: /* FPU & Logical Operations */
3268 unsigned int xop
= GET_FIELD(insn
, 7, 12);
3269 TCGv cpu_dst
= get_temp_tl(dc
);
3272 if (xop
== 0x3a) { /* generate trap */
3273 int cond
= GET_FIELD(insn
, 3, 6);
3275 TCGLabel
*l1
= NULL
;
3286 /* Conditional trap. */
3288 #ifdef TARGET_SPARC64
3290 int cc
= GET_FIELD_SP(insn
, 11, 12);
3292 gen_compare(&cmp
, 0, cond
, dc
);
3293 } else if (cc
== 2) {
3294 gen_compare(&cmp
, 1, cond
, dc
);
3299 gen_compare(&cmp
, 0, cond
, dc
);
3301 l1
= gen_new_label();
3302 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
3303 cmp
.c1
, cmp
.c2
, l1
);
3307 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
3308 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
3310 /* Don't use the normal temporaries, as they may well have
3311 gone out of scope with the branch above. While we're
3312 doing that we might as well pre-truncate to 32-bit. */
3313 trap
= tcg_temp_new_i32();
3315 rs1
= GET_FIELD_SP(insn
, 14, 18);
3317 rs2
= GET_FIELD_SP(insn
, 0, 7);
3319 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
3320 /* Signal that the trap value is fully constant. */
3323 TCGv t1
= gen_load_gpr(dc
, rs1
);
3324 tcg_gen_trunc_tl_i32(trap
, t1
);
3325 tcg_gen_addi_i32(trap
, trap
, rs2
);
3329 rs2
= GET_FIELD_SP(insn
, 0, 4);
3330 t1
= gen_load_gpr(dc
, rs1
);
3331 t2
= gen_load_gpr(dc
, rs2
);
3332 tcg_gen_add_tl(t1
, t1
, t2
);
3333 tcg_gen_trunc_tl_i32(trap
, t1
);
3336 tcg_gen_andi_i32(trap
, trap
, mask
);
3337 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
3340 gen_helper_raise_exception(cpu_env
, trap
);
3341 tcg_temp_free_i32(trap
);
3344 /* An unconditional trap ends the TB. */
3345 dc
->base
.is_jmp
= DISAS_NORETURN
;
3348 /* A conditional trap falls through to the next insn. */
3352 } else if (xop
== 0x28) {
3353 rs1
= GET_FIELD(insn
, 13, 17);
3356 #ifndef TARGET_SPARC64
3357 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3358 manual, rdy on the microSPARC
3360 case 0x0f: /* stbar in the SPARCv8 manual,
3361 rdy on the microSPARC II */
3362 case 0x10 ... 0x1f: /* implementation-dependent in the
3363 SPARCv8 manual, rdy on the
3366 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
3367 TCGv t
= gen_dest_gpr(dc
, rd
);
3368 /* Read Asr17 for a Leon3 monoprocessor */
3369 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
3370 gen_store_gpr(dc
, rd
, t
);
3374 gen_store_gpr(dc
, rd
, cpu_y
);
3376 #ifdef TARGET_SPARC64
3377 case 0x2: /* V9 rdccr */
3379 gen_helper_rdccr(cpu_dst
, cpu_env
);
3380 gen_store_gpr(dc
, rd
, cpu_dst
);
3382 case 0x3: /* V9 rdasi */
3383 tcg_gen_movi_tl(cpu_dst
, dc
->asi
);
3384 gen_store_gpr(dc
, rd
, cpu_dst
);
3386 case 0x4: /* V9 rdtick */
3391 r_tickptr
= tcg_temp_new_ptr();
3392 r_const
= tcg_const_i32(dc
->mem_idx
);
3393 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3394 offsetof(CPUSPARCState
, tick
));
3395 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3398 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
3400 tcg_temp_free_ptr(r_tickptr
);
3401 tcg_temp_free_i32(r_const
);
3402 gen_store_gpr(dc
, rd
, cpu_dst
);
3403 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3404 /* I/O operations in icount mode must end the TB */
3405 dc
->base
.is_jmp
= DISAS_EXIT
;
3409 case 0x5: /* V9 rdpc */
3411 TCGv t
= gen_dest_gpr(dc
, rd
);
3412 if (unlikely(AM_CHECK(dc
))) {
3413 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
3415 tcg_gen_movi_tl(t
, dc
->pc
);
3417 gen_store_gpr(dc
, rd
, t
);
3420 case 0x6: /* V9 rdfprs */
3421 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
3422 gen_store_gpr(dc
, rd
, cpu_dst
);
3424 case 0xf: /* V9 membar */
3425 break; /* no effect */
3426 case 0x13: /* Graphics Status */
3427 if (gen_trap_ifnofpu(dc
)) {
3430 gen_store_gpr(dc
, rd
, cpu_gsr
);
3432 case 0x16: /* Softint */
3433 tcg_gen_ld32s_tl(cpu_dst
, cpu_env
,
3434 offsetof(CPUSPARCState
, softint
));
3435 gen_store_gpr(dc
, rd
, cpu_dst
);
3437 case 0x17: /* Tick compare */
3438 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
3440 case 0x18: /* System tick */
3445 r_tickptr
= tcg_temp_new_ptr();
3446 r_const
= tcg_const_i32(dc
->mem_idx
);
3447 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3448 offsetof(CPUSPARCState
, stick
));
3449 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3452 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
3454 tcg_temp_free_ptr(r_tickptr
);
3455 tcg_temp_free_i32(r_const
);
3456 gen_store_gpr(dc
, rd
, cpu_dst
);
3457 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3458 /* I/O operations in icount mode must end the TB */
3459 dc
->base
.is_jmp
= DISAS_EXIT
;
3463 case 0x19: /* System tick compare */
3464 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
3466 case 0x1a: /* UltraSPARC-T1 Strand status */
3467 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3468 * this ASR as impl. dep
3470 CHECK_IU_FEATURE(dc
, HYPV
);
3472 TCGv t
= gen_dest_gpr(dc
, rd
);
3473 tcg_gen_movi_tl(t
, 1UL);
3474 gen_store_gpr(dc
, rd
, t
);
3477 case 0x10: /* Performance Control */
3478 case 0x11: /* Performance Instrumentation Counter */
3479 case 0x12: /* Dispatch Control */
3480 case 0x14: /* Softint set, WO */
3481 case 0x15: /* Softint clear, WO */
3486 #if !defined(CONFIG_USER_ONLY)
3487 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
3488 #ifndef TARGET_SPARC64
3489 if (!supervisor(dc
)) {
3493 gen_helper_rdpsr(cpu_dst
, cpu_env
);
3495 CHECK_IU_FEATURE(dc
, HYPV
);
3496 if (!hypervisor(dc
))
3498 rs1
= GET_FIELD(insn
, 13, 17);
3501 tcg_gen_ld_i64(cpu_dst
, cpu_env
,
3502 offsetof(CPUSPARCState
, hpstate
));
3505 // gen_op_rdhtstate();
3508 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
3511 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
3514 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
3516 case 31: // hstick_cmpr
3517 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
3523 gen_store_gpr(dc
, rd
, cpu_dst
);
3525 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
3526 if (!supervisor(dc
)) {
3529 cpu_tmp0
= get_temp_tl(dc
);
3530 #ifdef TARGET_SPARC64
3531 rs1
= GET_FIELD(insn
, 13, 17);
3537 r_tsptr
= tcg_temp_new_ptr();
3538 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3539 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3540 offsetof(trap_state
, tpc
));
3541 tcg_temp_free_ptr(r_tsptr
);
3548 r_tsptr
= tcg_temp_new_ptr();
3549 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3550 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3551 offsetof(trap_state
, tnpc
));
3552 tcg_temp_free_ptr(r_tsptr
);
3559 r_tsptr
= tcg_temp_new_ptr();
3560 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3561 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3562 offsetof(trap_state
, tstate
));
3563 tcg_temp_free_ptr(r_tsptr
);
3568 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3570 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3571 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
3572 offsetof(trap_state
, tt
));
3573 tcg_temp_free_ptr(r_tsptr
);
3581 r_tickptr
= tcg_temp_new_ptr();
3582 r_const
= tcg_const_i32(dc
->mem_idx
);
3583 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3584 offsetof(CPUSPARCState
, tick
));
3585 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3588 gen_helper_tick_get_count(cpu_tmp0
, cpu_env
,
3589 r_tickptr
, r_const
);
3590 tcg_temp_free_ptr(r_tickptr
);
3591 tcg_temp_free_i32(r_const
);
3592 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3593 /* I/O operations in icount mode must end the TB */
3594 dc
->base
.is_jmp
= DISAS_EXIT
;
3599 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
3602 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3603 offsetof(CPUSPARCState
, pstate
));
3606 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3607 offsetof(CPUSPARCState
, tl
));
3610 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3611 offsetof(CPUSPARCState
, psrpil
));
3614 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
3617 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3618 offsetof(CPUSPARCState
, cansave
));
3620 case 11: // canrestore
3621 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3622 offsetof(CPUSPARCState
, canrestore
));
3624 case 12: // cleanwin
3625 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3626 offsetof(CPUSPARCState
, cleanwin
));
3628 case 13: // otherwin
3629 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3630 offsetof(CPUSPARCState
, otherwin
));
3633 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3634 offsetof(CPUSPARCState
, wstate
));
3636 case 16: // UA2005 gl
3637 CHECK_IU_FEATURE(dc
, GL
);
3638 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3639 offsetof(CPUSPARCState
, gl
));
3641 case 26: // UA2005 strand status
3642 CHECK_IU_FEATURE(dc
, HYPV
);
3643 if (!hypervisor(dc
))
3645 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
3648 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
3655 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
3657 gen_store_gpr(dc
, rd
, cpu_tmp0
);
3660 #if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
3661 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
3662 #ifdef TARGET_SPARC64
3663 gen_helper_flushw(cpu_env
);
3665 if (!supervisor(dc
))
3667 gen_store_gpr(dc
, rd
, cpu_tbr
);
3671 } else if (xop
== 0x34) { /* FPU Operations */
3672 if (gen_trap_ifnofpu(dc
)) {
3675 gen_op_clear_ieee_excp_and_FTT();
3676 rs1
= GET_FIELD(insn
, 13, 17);
3677 rs2
= GET_FIELD(insn
, 27, 31);
3678 xop
= GET_FIELD(insn
, 18, 26);
3681 case 0x1: /* fmovs */
3682 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
3683 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
3685 case 0x5: /* fnegs */
3686 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
3688 case 0x9: /* fabss */
3689 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
3691 case 0x29: /* fsqrts */
3692 CHECK_FPU_FEATURE(dc
, FSQRT
);
3693 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
3695 case 0x2a: /* fsqrtd */
3696 CHECK_FPU_FEATURE(dc
, FSQRT
);
3697 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
3699 case 0x2b: /* fsqrtq */
3700 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3701 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
3703 case 0x41: /* fadds */
3704 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);