2 * PowerPC emulation for qemu: main translation routines.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "qemu/host-utils.h"
29 #include "qemu/main-loop.h"
30 #include "exec/cpu_ldst.h"
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
35 #include "trace-tcg.h"
36 #include "exec/translator.h"
38 #include "qemu/atomic128.h"
41 #define CPU_SINGLE_STEP 0x1
42 #define CPU_BRANCH_STEP 0x2
43 #define GDBSTUB_SINGLE_STEP 0x4
45 /* Include definitions for instructions classes and implementations flags */
46 /* #define PPC_DEBUG_DISAS */
47 /* #define DO_PPC_STATISTICS */
49 #ifdef PPC_DEBUG_DISAS
50 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
52 # define LOG_DISAS(...) do { } while (0)
54 /*****************************************************************************/
55 /* Code translation helpers */
57 /* global register indexes */
58 static char cpu_reg_names
[10 * 3 + 22 * 4 /* GPR */
59 + 10 * 4 + 22 * 5 /* SPE GPRh */
61 static TCGv cpu_gpr
[32];
62 static TCGv cpu_gprh
[32];
63 static TCGv_i32 cpu_crf
[8];
68 #if defined(TARGET_PPC64)
71 static TCGv cpu_xer
, cpu_so
, cpu_ov
, cpu_ca
, cpu_ov32
, cpu_ca32
;
72 static TCGv cpu_reserve
;
73 static TCGv cpu_reserve_val
;
74 static TCGv cpu_fpscr
;
75 static TCGv_i32 cpu_access_type
;
77 #include "exec/gen-icount.h"
79 void ppc_translate_init(void)
83 size_t cpu_reg_names_size
;
86 cpu_reg_names_size
= sizeof(cpu_reg_names
);
88 for (i
= 0; i
< 8; i
++) {
89 snprintf(p
, cpu_reg_names_size
, "crf%d", i
);
90 cpu_crf
[i
] = tcg_global_mem_new_i32(cpu_env
,
91 offsetof(CPUPPCState
, crf
[i
]), p
);
93 cpu_reg_names_size
-= 5;
96 for (i
= 0; i
< 32; i
++) {
97 snprintf(p
, cpu_reg_names_size
, "r%d", i
);
98 cpu_gpr
[i
] = tcg_global_mem_new(cpu_env
,
99 offsetof(CPUPPCState
, gpr
[i
]), p
);
100 p
+= (i
< 10) ?
3 : 4;
101 cpu_reg_names_size
-= (i
< 10) ?
3 : 4;
102 snprintf(p
, cpu_reg_names_size
, "r%dH", i
);
103 cpu_gprh
[i
] = tcg_global_mem_new(cpu_env
,
104 offsetof(CPUPPCState
, gprh
[i
]), p
);
105 p
+= (i
< 10) ?
4 : 5;
106 cpu_reg_names_size
-= (i
< 10) ?
4 : 5;
109 cpu_nip
= tcg_global_mem_new(cpu_env
,
110 offsetof(CPUPPCState
, nip
), "nip");
112 cpu_msr
= tcg_global_mem_new(cpu_env
,
113 offsetof(CPUPPCState
, msr
), "msr");
115 cpu_ctr
= tcg_global_mem_new(cpu_env
,
116 offsetof(CPUPPCState
, ctr
), "ctr");
118 cpu_lr
= tcg_global_mem_new(cpu_env
,
119 offsetof(CPUPPCState
, lr
), "lr");
121 #if defined(TARGET_PPC64)
122 cpu_cfar
= tcg_global_mem_new(cpu_env
,
123 offsetof(CPUPPCState
, cfar
), "cfar");
126 cpu_xer
= tcg_global_mem_new(cpu_env
,
127 offsetof(CPUPPCState
, xer
), "xer");
128 cpu_so
= tcg_global_mem_new(cpu_env
,
129 offsetof(CPUPPCState
, so
), "SO");
130 cpu_ov
= tcg_global_mem_new(cpu_env
,
131 offsetof(CPUPPCState
, ov
), "OV");
132 cpu_ca
= tcg_global_mem_new(cpu_env
,
133 offsetof(CPUPPCState
, ca
), "CA");
134 cpu_ov32
= tcg_global_mem_new(cpu_env
,
135 offsetof(CPUPPCState
, ov32
), "OV32");
136 cpu_ca32
= tcg_global_mem_new(cpu_env
,
137 offsetof(CPUPPCState
, ca32
), "CA32");
139 cpu_reserve
= tcg_global_mem_new(cpu_env
,
140 offsetof(CPUPPCState
, reserve_addr
),
142 cpu_reserve_val
= tcg_global_mem_new(cpu_env
,
143 offsetof(CPUPPCState
, reserve_val
),
146 cpu_fpscr
= tcg_global_mem_new(cpu_env
,
147 offsetof(CPUPPCState
, fpscr
), "fpscr");
149 cpu_access_type
= tcg_global_mem_new_i32(cpu_env
,
150 offsetof(CPUPPCState
, access_type
),
154 /* internal defines */
155 struct DisasContext
{
156 DisasContextBase base
;
159 /* Routine used to access memory */
160 bool pr
, hv
, dr
, le_mode
;
162 bool need_access_type
;
165 /* Translation flags */
166 MemOp default_tcg_memop_mask
;
167 #if defined(TARGET_PPC64)
172 bool altivec_enabled
;
178 ppc_spr_t
*spr_cb
; /* Needed to check rights for mfspr/mtspr */
179 int singlestep_enabled
;
181 uint64_t insns_flags
;
182 uint64_t insns_flags2
;
185 /* Return true iff byteswap is needed in a scalar memop */
186 static inline bool need_byteswap(const DisasContext
*ctx
)
188 #if defined(TARGET_WORDS_BIGENDIAN)
191 return !ctx
->le_mode
;
195 /* True when active word size < size of target_long. */
197 # define NARROW_MODE(C) (!(C)->sf_mode)
199 # define NARROW_MODE(C) 0
202 struct opc_handler_t
{
203 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
205 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
207 /* instruction type */
209 /* extended instruction type */
212 void (*handler
)(DisasContext
*ctx
);
213 #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
216 #if defined(DO_PPC_STATISTICS)
221 /* SPR load/store helpers */
222 static inline void gen_load_spr(TCGv t
, int reg
)
224 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUPPCState
, spr
[reg
]));
227 static inline void gen_store_spr(int reg
, TCGv t
)
229 tcg_gen_st_tl(t
, cpu_env
, offsetof(CPUPPCState
, spr
[reg
]));
232 static inline void gen_set_access_type(DisasContext
*ctx
, int access_type
)
234 if (ctx
->need_access_type
&& ctx
->access_type
!= access_type
) {
235 tcg_gen_movi_i32(cpu_access_type
, access_type
);
236 ctx
->access_type
= access_type
;
240 static inline void gen_update_nip(DisasContext
*ctx
, target_ulong nip
)
242 if (NARROW_MODE(ctx
)) {
245 tcg_gen_movi_tl(cpu_nip
, nip
);
248 static void gen_exception_err(DisasContext
*ctx
, uint32_t excp
, uint32_t error
)
253 * These are all synchronous exceptions, we set the PC back to the
254 * faulting instruction
256 if (ctx
->exception
== POWERPC_EXCP_NONE
) {
257 gen_update_nip(ctx
, ctx
->base
.pc_next
- 4);
259 t0
= tcg_const_i32(excp
);
260 t1
= tcg_const_i32(error
);
261 gen_helper_raise_exception_err(cpu_env
, t0
, t1
);
262 tcg_temp_free_i32(t0
);
263 tcg_temp_free_i32(t1
);
264 ctx
->exception
= (excp
);
267 static void gen_exception(DisasContext
*ctx
, uint32_t excp
)
272 * These are all synchronous exceptions, we set the PC back to the
273 * faulting instruction
275 if (ctx
->exception
== POWERPC_EXCP_NONE
) {
276 gen_update_nip(ctx
, ctx
->base
.pc_next
- 4);
278 t0
= tcg_const_i32(excp
);
279 gen_helper_raise_exception(cpu_env
, t0
);
280 tcg_temp_free_i32(t0
);
281 ctx
->exception
= (excp
);
284 static void gen_exception_nip(DisasContext
*ctx
, uint32_t excp
,
289 gen_update_nip(ctx
, nip
);
290 t0
= tcg_const_i32(excp
);
291 gen_helper_raise_exception(cpu_env
, t0
);
292 tcg_temp_free_i32(t0
);
293 ctx
->exception
= (excp
);
297 * Tells the caller what is the appropriate exception to generate and prepares
298 * SPR registers for this exception.
300 * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or
301 * POWERPC_EXCP_DEBUG (on BookE).
303 static uint32_t gen_prep_dbgex(DisasContext
*ctx
)
305 if (ctx
->flags
& POWERPC_FLAG_DE
) {
306 target_ulong dbsr
= 0;
307 if (ctx
->singlestep_enabled
& CPU_SINGLE_STEP
) {
310 /* Must have been branch */
313 TCGv t0
= tcg_temp_new();
314 gen_load_spr(t0
, SPR_BOOKE_DBSR
);
315 tcg_gen_ori_tl(t0
, t0
, dbsr
);
316 gen_store_spr(SPR_BOOKE_DBSR
, t0
);
318 return POWERPC_EXCP_DEBUG
;
320 return POWERPC_EXCP_TRACE
;
324 static void gen_debug_exception(DisasContext
*ctx
)
329 * These are all synchronous exceptions, we set the PC back to the
330 * faulting instruction
332 if ((ctx
->exception
!= POWERPC_EXCP_BRANCH
) &&
333 (ctx
->exception
!= POWERPC_EXCP_SYNC
)) {
334 gen_update_nip(ctx
, ctx
->base
.pc_next
);
336 t0
= tcg_const_i32(EXCP_DEBUG
);
337 gen_helper_raise_exception(cpu_env
, t0
);
338 tcg_temp_free_i32(t0
);
341 static inline void gen_inval_exception(DisasContext
*ctx
, uint32_t error
)
343 /* Will be converted to program check if needed */
344 gen_exception_err(ctx
, POWERPC_EXCP_HV_EMU
, POWERPC_EXCP_INVAL
| error
);
347 static inline void gen_priv_exception(DisasContext
*ctx
, uint32_t error
)
349 gen_exception_err(ctx
, POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_PRIV
| error
);
352 static inline void gen_hvpriv_exception(DisasContext
*ctx
, uint32_t error
)
354 /* Will be converted to program check if needed */
355 gen_exception_err(ctx
, POWERPC_EXCP_HV_EMU
, POWERPC_EXCP_PRIV
| error
);
358 /* Stop translation */
359 static inline void gen_stop_exception(DisasContext
*ctx
)
361 gen_update_nip(ctx
, ctx
->base
.pc_next
);
362 ctx
->exception
= POWERPC_EXCP_STOP
;
365 #ifndef CONFIG_USER_ONLY
366 /* No need to update nip here, as execution flow will change */
367 static inline void gen_sync_exception(DisasContext
*ctx
)
369 ctx
->exception
= POWERPC_EXCP_SYNC
;
373 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
374 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
376 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
377 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
379 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
380 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
382 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
383 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
385 #define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2) \
386 GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2)
388 #define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \
389 GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2)
391 typedef struct opcode_t
{
392 unsigned char opc1
, opc2
, opc3
, opc4
;
393 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
394 unsigned char pad
[4];
396 opc_handler_t handler
;
400 /* Helpers for priv. check */
403 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; \
406 #if defined(CONFIG_USER_ONLY)
407 #define CHK_HV GEN_PRIV
408 #define CHK_SV GEN_PRIV
409 #define CHK_HVRM GEN_PRIV
413 if (unlikely(ctx->pr || !ctx->hv)) { \
419 if (unlikely(ctx->pr)) { \
425 if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \
433 /*****************************************************************************/
434 /* PowerPC instructions table */
436 #if defined(DO_PPC_STATISTICS)
437 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
447 .handler = &gen_##name, \
448 .oname = stringify(name), \
450 .oname = stringify(name), \
452 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
463 .handler = &gen_##name, \
464 .oname = stringify(name), \
466 .oname = stringify(name), \
468 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
478 .handler = &gen_##name, \
483 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \
493 .handler = &gen_##name, \
494 .oname = stringify(name), \
496 .oname = stringify(name), \
498 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \
508 .handler = &gen_##name, \
514 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
524 .handler = &gen_##name, \
526 .oname = stringify(name), \
528 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
539 .handler = &gen_##name, \
541 .oname = stringify(name), \
543 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
553 .handler = &gen_##name, \
557 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \
567 .handler = &gen_##name, \
569 .oname = stringify(name), \
571 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \
581 .handler = &gen_##name, \
587 /* Invalid instruction */
588 static void gen_invalid(DisasContext
*ctx
)
590 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
593 static opc_handler_t invalid_handler
= {
594 .inval1
= 0xFFFFFFFF,
595 .inval2
= 0xFFFFFFFF,
598 .handler
= gen_invalid
,
601 /*** Integer comparison ***/
603 static inline void gen_op_cmp(TCGv arg0
, TCGv arg1
, int s
, int crf
)
605 TCGv t0
= tcg_temp_new();
606 TCGv t1
= tcg_temp_new();
607 TCGv_i32 t
= tcg_temp_new_i32();
609 tcg_gen_movi_tl(t0
, CRF_EQ
);
610 tcg_gen_movi_tl(t1
, CRF_LT
);
611 tcg_gen_movcond_tl((s ? TCG_COND_LT
: TCG_COND_LTU
),
612 t0
, arg0
, arg1
, t1
, t0
);
613 tcg_gen_movi_tl(t1
, CRF_GT
);
614 tcg_gen_movcond_tl((s ? TCG_COND_GT
: TCG_COND_GTU
),
615 t0
, arg0
, arg1
, t1
, t0
);
617 tcg_gen_trunc_tl_i32(t
, t0
);
618 tcg_gen_trunc_tl_i32(cpu_crf
[crf
], cpu_so
);
619 tcg_gen_or_i32(cpu_crf
[crf
], cpu_crf
[crf
], t
);
623 tcg_temp_free_i32(t
);
626 static inline void gen_op_cmpi(TCGv arg0
, target_ulong arg1
, int s
, int crf
)
628 TCGv t0
= tcg_const_tl(arg1
);
629 gen_op_cmp(arg0
, t0
, s
, crf
);
633 static inline void gen_op_cmp32(TCGv arg0
, TCGv arg1
, int s
, int crf
)
639 tcg_gen_ext32s_tl(t0
, arg0
);
640 tcg_gen_ext32s_tl(t1
, arg1
);
642 tcg_gen_ext32u_tl(t0
, arg0
);
643 tcg_gen_ext32u_tl(t1
, arg1
);
645 gen_op_cmp(t0
, t1
, s
, crf
);
650 static inline void gen_op_cmpi32(TCGv arg0
, target_ulong arg1
, int s
, int crf
)
652 TCGv t0
= tcg_const_tl(arg1
);
653 gen_op_cmp32(arg0
, t0
, s
, crf
);
657 static inline void gen_set_Rc0(DisasContext
*ctx
, TCGv reg
)
659 if (NARROW_MODE(ctx
)) {
660 gen_op_cmpi32(reg
, 0, 1, 0);
662 gen_op_cmpi(reg
, 0, 1, 0);
667 static void gen_cmp(DisasContext
*ctx
)
669 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
670 gen_op_cmp(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
671 1, crfD(ctx
->opcode
));
673 gen_op_cmp32(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
674 1, crfD(ctx
->opcode
));
679 static void gen_cmpi(DisasContext
*ctx
)
681 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
682 gen_op_cmpi(cpu_gpr
[rA(ctx
->opcode
)], SIMM(ctx
->opcode
),
683 1, crfD(ctx
->opcode
));
685 gen_op_cmpi32(cpu_gpr
[rA(ctx
->opcode
)], SIMM(ctx
->opcode
),
686 1, crfD(ctx
->opcode
));
691 static void gen_cmpl(DisasContext
*ctx
)
693 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
694 gen_op_cmp(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
695 0, crfD(ctx
->opcode
));
697 gen_op_cmp32(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
698 0, crfD(ctx
->opcode
));
703 static void gen_cmpli(DisasContext
*ctx
)
705 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
706 gen_op_cmpi(cpu_gpr
[rA(ctx
->opcode
)], UIMM(ctx
->opcode
),
707 0, crfD(ctx
->opcode
));
709 gen_op_cmpi32(cpu_gpr
[rA(ctx
->opcode
)], UIMM(ctx
->opcode
),
710 0, crfD(ctx
->opcode
));
714 /* cmprb - range comparison: isupper, isaplha, islower*/
715 static void gen_cmprb(DisasContext
*ctx
)
717 TCGv_i32 src1
= tcg_temp_new_i32();
718 TCGv_i32 src2
= tcg_temp_new_i32();
719 TCGv_i32 src2lo
= tcg_temp_new_i32();
720 TCGv_i32 src2hi
= tcg_temp_new_i32();
721 TCGv_i32 crf
= cpu_crf
[crfD(ctx
->opcode
)];
723 tcg_gen_trunc_tl_i32(src1
, cpu_gpr
[rA(ctx
->opcode
)]);
724 tcg_gen_trunc_tl_i32(src2
, cpu_gpr
[rB(ctx
->opcode
)]);
726 tcg_gen_andi_i32(src1
, src1
, 0xFF);
727 tcg_gen_ext8u_i32(src2lo
, src2
);
728 tcg_gen_shri_i32(src2
, src2
, 8);
729 tcg_gen_ext8u_i32(src2hi
, src2
);
731 tcg_gen_setcond_i32(TCG_COND_LEU
, src2lo
, src2lo
, src1
);
732 tcg_gen_setcond_i32(TCG_COND_LEU
, src2hi
, src1
, src2hi
);
733 tcg_gen_and_i32(crf
, src2lo
, src2hi
);
735 if (ctx
->opcode
& 0x00200000) {
736 tcg_gen_shri_i32(src2
, src2
, 8);
737 tcg_gen_ext8u_i32(src2lo
, src2
);
738 tcg_gen_shri_i32(src2
, src2
, 8);
739 tcg_gen_ext8u_i32(src2hi
, src2
);
740 tcg_gen_setcond_i32(TCG_COND_LEU
, src2lo
, src2lo
, src1
);
741 tcg_gen_setcond_i32(TCG_COND_LEU
, src2hi
, src1
, src2hi
);
742 tcg_gen_and_i32(src2lo
, src2lo
, src2hi
);
743 tcg_gen_or_i32(crf
, crf
, src2lo
);
745 tcg_gen_shli_i32(crf
, crf
, CRF_GT_BIT
);
746 tcg_temp_free_i32(src1
);
747 tcg_temp_free_i32(src2
);
748 tcg_temp_free_i32(src2lo
);
749 tcg_temp_free_i32(src2hi
);
752 #if defined(TARGET_PPC64)
754 static void gen_cmpeqb(DisasContext
*ctx
)
756 gen_helper_cmpeqb(cpu_crf
[crfD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
757 cpu_gpr
[rB(ctx
->opcode
)]);
761 /* isel (PowerPC 2.03 specification) */
762 static void gen_isel(DisasContext
*ctx
)
764 uint32_t bi
= rC(ctx
->opcode
);
765 uint32_t mask
= 0x08 >> (bi
& 0x03);
766 TCGv t0
= tcg_temp_new();
769 tcg_gen_extu_i32_tl(t0
, cpu_crf
[bi
>> 2]);
770 tcg_gen_andi_tl(t0
, t0
, mask
);
772 zr
= tcg_const_tl(0);
773 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr
[rD(ctx
->opcode
)], t0
, zr
,
774 rA(ctx
->opcode
) ? cpu_gpr
[rA(ctx
->opcode
)] : zr
,
775 cpu_gpr
[rB(ctx
->opcode
)]);
780 /* cmpb: PowerPC 2.05 specification */
781 static void gen_cmpb(DisasContext
*ctx
)
783 gen_helper_cmpb(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)],
784 cpu_gpr
[rB(ctx
->opcode
)]);
787 /*** Integer arithmetic ***/
789 static inline void gen_op_arith_compute_ov(DisasContext
*ctx
, TCGv arg0
,
790 TCGv arg1
, TCGv arg2
, int sub
)
792 TCGv t0
= tcg_temp_new();
794 tcg_gen_xor_tl(cpu_ov
, arg0
, arg2
);
795 tcg_gen_xor_tl(t0
, arg1
, arg2
);
797 tcg_gen_and_tl(cpu_ov
, cpu_ov
, t0
);
799 tcg_gen_andc_tl(cpu_ov
, cpu_ov
, t0
);
802 if (NARROW_MODE(ctx
)) {
803 tcg_gen_extract_tl(cpu_ov
, cpu_ov
, 31, 1);
804 if (is_isa300(ctx
)) {
805 tcg_gen_mov_tl(cpu_ov32
, cpu_ov
);
808 if (is_isa300(ctx
)) {
809 tcg_gen_extract_tl(cpu_ov32
, cpu_ov
, 31, 1);
811 tcg_gen_extract_tl(cpu_ov
, cpu_ov
, TARGET_LONG_BITS
- 1, 1);
813 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
816 static inline void gen_op_arith_compute_ca32(DisasContext
*ctx
,
817 TCGv res
, TCGv arg0
, TCGv arg1
,
822 if (!is_isa300(ctx
)) {
828 tcg_gen_eqv_tl(t0
, arg0
, arg1
);
830 tcg_gen_xor_tl(t0
, arg0
, arg1
);
832 tcg_gen_xor_tl(t0
, t0
, res
);
833 tcg_gen_extract_tl(ca32
, t0
, 32, 1);
837 /* Common add function */
838 static inline void gen_op_arith_add(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
839 TCGv arg2
, TCGv ca
, TCGv ca32
,
840 bool add_ca
, bool compute_ca
,
841 bool compute_ov
, bool compute_rc0
)
845 if (compute_ca
|| compute_ov
) {
850 if (NARROW_MODE(ctx
)) {
852 * Caution: a non-obvious corner case of the spec is that
853 * we must produce the *entire* 64-bit addition, but
854 * produce the carry into bit 32.
856 TCGv t1
= tcg_temp_new();
857 tcg_gen_xor_tl(t1
, arg1
, arg2
); /* add without carry */
858 tcg_gen_add_tl(t0
, arg1
, arg2
);
860 tcg_gen_add_tl(t0
, t0
, ca
);
862 tcg_gen_xor_tl(ca
, t0
, t1
); /* bits changed w/ carry */
864 tcg_gen_extract_tl(ca
, ca
, 32, 1);
865 if (is_isa300(ctx
)) {
866 tcg_gen_mov_tl(ca32
, ca
);
869 TCGv zero
= tcg_const_tl(0);
871 tcg_gen_add2_tl(t0
, ca
, arg1
, zero
, ca
, zero
);
872 tcg_gen_add2_tl(t0
, ca
, t0
, ca
, arg2
, zero
);
874 tcg_gen_add2_tl(t0
, ca
, arg1
, zero
, arg2
, zero
);
876 gen_op_arith_compute_ca32(ctx
, t0
, arg1
, arg2
, ca32
, 0);
880 tcg_gen_add_tl(t0
, arg1
, arg2
);
882 tcg_gen_add_tl(t0
, t0
, ca
);
887 gen_op_arith_compute_ov(ctx
, t0
, arg1
, arg2
, 0);
889 if (unlikely(compute_rc0
)) {
890 gen_set_Rc0(ctx
, t0
);
894 tcg_gen_mov_tl(ret
, t0
);
898 /* Add functions with two operands */
899 #define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov) \
900 static void glue(gen_, name)(DisasContext *ctx) \
902 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
903 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
905 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
907 /* Add functions with one operand and one immediate */
908 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca, \
909 add_ca, compute_ca, compute_ov) \
910 static void glue(gen_, name)(DisasContext *ctx) \
912 TCGv t0 = tcg_const_tl(const_val); \
913 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
914 cpu_gpr[rA(ctx->opcode)], t0, \
916 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
920 /* add add. addo addo. */
921 GEN_INT_ARITH_ADD(add
, 0x08, cpu_ca
, 0, 0, 0)
922 GEN_INT_ARITH_ADD(addo
, 0x18, cpu_ca
, 0, 0, 1)
923 /* addc addc. addco addco. */
924 GEN_INT_ARITH_ADD(addc
, 0x00, cpu_ca
, 0, 1, 0)
925 GEN_INT_ARITH_ADD(addco
, 0x10, cpu_ca
, 0, 1, 1)
926 /* adde adde. addeo addeo. */
927 GEN_INT_ARITH_ADD(adde
, 0x04, cpu_ca
, 1, 1, 0)
928 GEN_INT_ARITH_ADD(addeo
, 0x14, cpu_ca
, 1, 1, 1)
929 /* addme addme. addmeo addmeo. */
930 GEN_INT_ARITH_ADD_CONST(addme
, 0x07, -1LL, cpu_ca
, 1, 1, 0)
931 GEN_INT_ARITH_ADD_CONST(addmeo
, 0x17, -1LL, cpu_ca
, 1, 1, 1)
933 GEN_INT_ARITH_ADD(addex
, 0x05, cpu_ov
, 1, 1, 0);
934 /* addze addze. addzeo addzeo.*/
935 GEN_INT_ARITH_ADD_CONST(addze
, 0x06, 0, cpu_ca
, 1, 1, 0)
936 GEN_INT_ARITH_ADD_CONST(addzeo
, 0x16, 0, cpu_ca
, 1, 1, 1)
938 static void gen_addi(DisasContext
*ctx
)
940 target_long simm
= SIMM(ctx
->opcode
);
942 if (rA(ctx
->opcode
) == 0) {
944 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], simm
);
946 tcg_gen_addi_tl(cpu_gpr
[rD(ctx
->opcode
)],
947 cpu_gpr
[rA(ctx
->opcode
)], simm
);
951 static inline void gen_op_addic(DisasContext
*ctx
, bool compute_rc0
)
953 TCGv c
= tcg_const_tl(SIMM(ctx
->opcode
));
954 gen_op_arith_add(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
955 c
, cpu_ca
, cpu_ca32
, 0, 1, 0, compute_rc0
);
959 static void gen_addic(DisasContext
*ctx
)
961 gen_op_addic(ctx
, 0);
964 static void gen_addic_(DisasContext
*ctx
)
966 gen_op_addic(ctx
, 1);
970 static void gen_addis(DisasContext
*ctx
)
972 target_long simm
= SIMM(ctx
->opcode
);
974 if (rA(ctx
->opcode
) == 0) {
976 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], simm
<< 16);
978 tcg_gen_addi_tl(cpu_gpr
[rD(ctx
->opcode
)],
979 cpu_gpr
[rA(ctx
->opcode
)], simm
<< 16);
984 static void gen_addpcis(DisasContext
*ctx
)
986 target_long d
= DX(ctx
->opcode
);
988 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], ctx
->base
.pc_next
+ (d
<< 16));
991 static inline void gen_op_arith_divw(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
992 TCGv arg2
, int sign
, int compute_ov
)
994 TCGv_i32 t0
= tcg_temp_new_i32();
995 TCGv_i32 t1
= tcg_temp_new_i32();
996 TCGv_i32 t2
= tcg_temp_new_i32();
997 TCGv_i32 t3
= tcg_temp_new_i32();
999 tcg_gen_trunc_tl_i32(t0
, arg1
);
1000 tcg_gen_trunc_tl_i32(t1
, arg2
);
1002 tcg_gen_setcondi_i32(TCG_COND_EQ
, t2
, t0
, INT_MIN
);
1003 tcg_gen_setcondi_i32(TCG_COND_EQ
, t3
, t1
, -1);
1004 tcg_gen_and_i32(t2
, t2
, t3
);
1005 tcg_gen_setcondi_i32(TCG_COND_EQ
, t3
, t1
, 0);
1006 tcg_gen_or_i32(t2
, t2
, t3
);
1007 tcg_gen_movi_i32(t3
, 0);
1008 tcg_gen_movcond_i32(TCG_COND_NE
, t1
, t2
, t3
, t2
, t1
);
1009 tcg_gen_div_i32(t3
, t0
, t1
);
1010 tcg_gen_extu_i32_tl(ret
, t3
);
1012 tcg_gen_setcondi_i32(TCG_COND_EQ
, t2
, t1
, 0);
1013 tcg_gen_movi_i32(t3
, 0);
1014 tcg_gen_movcond_i32(TCG_COND_NE
, t1
, t2
, t3
, t2
, t1
);
1015 tcg_gen_divu_i32(t3
, t0
, t1
);
1016 tcg_gen_extu_i32_tl(ret
, t3
);
1019 tcg_gen_extu_i32_tl(cpu_ov
, t2
);
1020 if (is_isa300(ctx
)) {
1021 tcg_gen_extu_i32_tl(cpu_ov32
, t2
);
1023 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
1025 tcg_temp_free_i32(t0
);
1026 tcg_temp_free_i32(t1
);
1027 tcg_temp_free_i32(t2
);
1028 tcg_temp_free_i32(t3
);
1030 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1031 gen_set_Rc0(ctx
, ret
);
1035 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
1036 static void glue(gen_, name)(DisasContext *ctx) \
1038 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
1039 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1040 sign, compute_ov); \
1042 /* divwu divwu. divwuo divwuo. */
1043 GEN_INT_ARITH_DIVW(divwu
, 0x0E, 0, 0);
1044 GEN_INT_ARITH_DIVW(divwuo
, 0x1E, 0, 1);
1045 /* divw divw. divwo divwo. */
1046 GEN_INT_ARITH_DIVW(divw
, 0x0F, 1, 0);
1047 GEN_INT_ARITH_DIVW(divwo
, 0x1F, 1, 1);
1049 /* div[wd]eu[o][.] */
1050 #define GEN_DIVE(name, hlpr, compute_ov) \
1051 static void gen_##name(DisasContext *ctx) \
1053 TCGv_i32 t0 = tcg_const_i32(compute_ov); \
1054 gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
1055 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1056 tcg_temp_free_i32(t0); \
1057 if (unlikely(Rc(ctx->opcode) != 0)) { \
1058 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
1062 GEN_DIVE(divweu
, divweu
, 0);
1063 GEN_DIVE(divweuo
, divweu
, 1);
1064 GEN_DIVE(divwe
, divwe
, 0);
1065 GEN_DIVE(divweo
, divwe
, 1);
1067 #if defined(TARGET_PPC64)
1068 static inline void gen_op_arith_divd(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
1069 TCGv arg2
, int sign
, int compute_ov
)
1071 TCGv_i64 t0
= tcg_temp_new_i64();
1072 TCGv_i64 t1
= tcg_temp_new_i64();
1073 TCGv_i64 t2
= tcg_temp_new_i64();
1074 TCGv_i64 t3
= tcg_temp_new_i64();
1076 tcg_gen_mov_i64(t0
, arg1
);
1077 tcg_gen_mov_i64(t1
, arg2
);
1079 tcg_gen_setcondi_i64(TCG_COND_EQ
, t2
, t0
, INT64_MIN
);
1080 tcg_gen_setcondi_i64(TCG_COND_EQ
, t3
, t1
, -1);
1081 tcg_gen_and_i64(t2
, t2
, t3
);
1082 tcg_gen_setcondi_i64(TCG_COND_EQ
, t3
, t1
, 0);
1083 tcg_gen_or_i64(t2
, t2
, t3
);
1084 tcg_gen_movi_i64(t3
, 0);
1085 tcg_gen_movcond_i64(TCG_COND_NE
, t1
, t2
, t3
, t2
, t1
);
1086 tcg_gen_div_i64(ret
, t0
, t1
);
1088 tcg_gen_setcondi_i64(TCG_COND_EQ
, t2
, t1
, 0);
1089 tcg_gen_movi_i64(t3
, 0);
1090 tcg_gen_movcond_i64(TCG_COND_NE
, t1
, t2
, t3
, t2
, t1
);
1091 tcg_gen_divu_i64(ret
, t0
, t1
);
1094 tcg_gen_mov_tl(cpu_ov
, t2
);
1095 if (is_isa300(ctx
)) {
1096 tcg_gen_mov_tl(cpu_ov32
, t2
);
1098 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
1100 tcg_temp_free_i64(t0
);
1101 tcg_temp_free_i64(t1
);
1102 tcg_temp_free_i64(t2
);
1103 tcg_temp_free_i64(t3
);
1105 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1106 gen_set_Rc0(ctx
, ret
);
1110 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1111 static void glue(gen_, name)(DisasContext *ctx) \
1113 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1114 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1115 sign, compute_ov); \
1117 /* divdu divdu. divduo divduo. */
1118 GEN_INT_ARITH_DIVD(divdu
, 0x0E, 0, 0);
1119 GEN_INT_ARITH_DIVD(divduo
, 0x1E, 0, 1);
1120 /* divd divd. divdo divdo. */
1121 GEN_INT_ARITH_DIVD(divd
, 0x0F, 1, 0);
1122 GEN_INT_ARITH_DIVD(divdo
, 0x1F, 1, 1);
1124 GEN_DIVE(divdeu
, divdeu
, 0);
1125 GEN_DIVE(divdeuo
, divdeu
, 1);
1126 GEN_DIVE(divde
, divde
, 0);
1127 GEN_DIVE(divdeo
, divde
, 1);
1130 static inline void gen_op_arith_modw(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
1131 TCGv arg2
, int sign
)
1133 TCGv_i32 t0
= tcg_temp_new_i32();
1134 TCGv_i32 t1
= tcg_temp_new_i32();
1136 tcg_gen_trunc_tl_i32(t0
, arg1
);
1137 tcg_gen_trunc_tl_i32(t1
, arg2
);
1139 TCGv_i32 t2
= tcg_temp_new_i32();
1140 TCGv_i32 t3
= tcg_temp_new_i32();
1141 tcg_gen_setcondi_i32(TCG_COND_EQ
, t2
, t0
, INT_MIN
);
1142 tcg_gen_setcondi_i32(TCG_COND_EQ
, t3
, t1
, -1);
1143 tcg_gen_and_i32(t2
, t2
, t3
);
1144 tcg_gen_setcondi_i32(TCG_COND_EQ
, t3
, t1
, 0);
1145 tcg_gen_or_i32(t2
, t2
, t3
);
1146 tcg_gen_movi_i32(t3
, 0);
1147 tcg_gen_movcond_i32(TCG_COND_NE
, t1
, t2
, t3
, t2
, t1
);
1148 tcg_gen_rem_i32(t3
, t0
, t1
);
1149 tcg_gen_ext_i32_tl(ret
, t3
);
1150 tcg_temp_free_i32(t2
);
1151 tcg_temp_free_i32(t3
);
1153 TCGv_i32 t2
= tcg_const_i32(1);
1154 TCGv_i32 t3
= tcg_const_i32(0);
1155 tcg_gen_movcond_i32(TCG_COND_EQ
, t1
, t1
, t3
, t2
, t1
);
1156 tcg_gen_remu_i32(t3
, t0
, t1
);
1157 tcg_gen_extu_i32_tl(ret
, t3
);
1158 tcg_temp_free_i32(t2
);
1159 tcg_temp_free_i32(t3
);
1161 tcg_temp_free_i32(t0
);
1162 tcg_temp_free_i32(t1
);
1165 #define GEN_INT_ARITH_MODW(name, opc3, sign) \
1166 static void glue(gen_, name)(DisasContext *ctx) \
1168 gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)], \
1169 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1173 GEN_INT_ARITH_MODW(moduw
, 0x08, 0);
1174 GEN_INT_ARITH_MODW(modsw
, 0x18, 1);
1176 #if defined(TARGET_PPC64)
1177 static inline void gen_op_arith_modd(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
1178 TCGv arg2
, int sign
)
1180 TCGv_i64 t0
= tcg_temp_new_i64();
1181 TCGv_i64 t1
= tcg_temp_new_i64();
1183 tcg_gen_mov_i64(t0
, arg1
);
1184 tcg_gen_mov_i64(t1
, arg2
);
1186 TCGv_i64 t2
= tcg_temp_new_i64();
1187 TCGv_i64 t3
= tcg_temp_new_i64();
1188 tcg_gen_setcondi_i64(TCG_COND_EQ
, t2
, t0
, INT64_MIN
);
1189 tcg_gen_setcondi_i64(TCG_COND_EQ
, t3
, t1
, -1);
1190 tcg_gen_and_i64(t2
, t2
, t3
);
1191 tcg_gen_setcondi_i64(TCG_COND_EQ
, t3
, t1
, 0);
1192 tcg_gen_or_i64(t2
, t2
, t3
);
1193 tcg_gen_movi_i64(t3
, 0);
1194 tcg_gen_movcond_i64(TCG_COND_NE
, t1
, t2
, t3
, t2
, t1
);
1195 tcg_gen_rem_i64(ret
, t0
, t1
);
1196 tcg_temp_free_i64(t2
);
1197 tcg_temp_free_i64(t3
);
1199 TCGv_i64 t2
= tcg_const_i64(1);
1200 TCGv_i64 t3
= tcg_const_i64(0);
1201 tcg_gen_movcond_i64(TCG_COND_EQ
, t1
, t1
, t3
, t2
, t1
);
1202 tcg_gen_remu_i64(ret
, t0
, t1
);
1203 tcg_temp_free_i64(t2
);
1204 tcg_temp_free_i64(t3
);
1206 tcg_temp_free_i64(t0
);
1207 tcg_temp_free_i64(t1
);
1210 #define GEN_INT_ARITH_MODD(name, opc3, sign) \
1211 static void glue(gen_, name)(DisasContext *ctx) \
1213 gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)], \
1214 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1218 GEN_INT_ARITH_MODD(modud
, 0x08, 0);
1219 GEN_INT_ARITH_MODD(modsd
, 0x18, 1);
1223 static void gen_mulhw(DisasContext
*ctx
)
1225 TCGv_i32 t0
= tcg_temp_new_i32();
1226 TCGv_i32 t1
= tcg_temp_new_i32();
1228 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1229 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1230 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
1231 tcg_gen_extu_i32_tl(cpu_gpr
[rD(ctx
->opcode
)], t1
);
1232 tcg_temp_free_i32(t0
);
1233 tcg_temp_free_i32(t1
);
1234 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1235 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1239 /* mulhwu mulhwu. */
1240 static void gen_mulhwu(DisasContext
*ctx
)
1242 TCGv_i32 t0
= tcg_temp_new_i32();
1243 TCGv_i32 t1
= tcg_temp_new_i32();
1245 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1246 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1247 tcg_gen_mulu2_i32(t0
, t1
, t0
, t1
);
1248 tcg_gen_extu_i32_tl(cpu_gpr
[rD(ctx
->opcode
)], t1
);
1249 tcg_temp_free_i32(t0
);
1250 tcg_temp_free_i32(t1
);
1251 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1252 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1257 static void gen_mullw(DisasContext
*ctx
)
1259 #if defined(TARGET_PPC64)
1261 t0
= tcg_temp_new_i64();
1262 t1
= tcg_temp_new_i64();
1263 tcg_gen_ext32s_tl(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1264 tcg_gen_ext32s_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1265 tcg_gen_mul_i64(cpu_gpr
[rD(ctx
->opcode
)], t0
, t1
);
1269 tcg_gen_mul_i32(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1270 cpu_gpr
[rB(ctx
->opcode
)]);
1272 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1273 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1277 /* mullwo mullwo. */
1278 static void gen_mullwo(DisasContext
*ctx
)
1280 TCGv_i32 t0
= tcg_temp_new_i32();
1281 TCGv_i32 t1
= tcg_temp_new_i32();
1283 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1284 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1285 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
1286 #if defined(TARGET_PPC64)
1287 tcg_gen_concat_i32_i64(cpu_gpr
[rD(ctx
->opcode
)], t0
, t1
);
1289 tcg_gen_mov_i32(cpu_gpr
[rD(ctx
->opcode
)], t0
);
1292 tcg_gen_sari_i32(t0
, t0
, 31);
1293 tcg_gen_setcond_i32(TCG_COND_NE
, t0
, t0
, t1
);
1294 tcg_gen_extu_i32_tl(cpu_ov
, t0
);
1295 if (is_isa300(ctx
)) {
1296 tcg_gen_mov_tl(cpu_ov32
, cpu_ov
);
1298 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
1300 tcg_temp_free_i32(t0
);
1301 tcg_temp_free_i32(t1
);
1302 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1303 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1308 static void gen_mulli(DisasContext
*ctx
)
1310 tcg_gen_muli_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1314 #if defined(TARGET_PPC64)
1316 static void gen_mulhd(DisasContext
*ctx
)
1318 TCGv lo
= tcg_temp_new();
1319 tcg_gen_muls2_tl(lo
, cpu_gpr
[rD(ctx
->opcode
)],
1320 cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1322 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1323 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1327 /* mulhdu mulhdu. */
1328 static void gen_mulhdu(DisasContext
*ctx
)
1330 TCGv lo
= tcg_temp_new();
1331 tcg_gen_mulu2_tl(lo
, cpu_gpr
[rD(ctx
->opcode
)],
1332 cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1334 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1335 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1340 static void gen_mulld(DisasContext
*ctx
)
1342 tcg_gen_mul_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1343 cpu_gpr
[rB(ctx
->opcode
)]);
1344 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1345 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1349 /* mulldo mulldo. */
1350 static void gen_mulldo(DisasContext
*ctx
)
1352 TCGv_i64 t0
= tcg_temp_new_i64();
1353 TCGv_i64 t1
= tcg_temp_new_i64();
1355 tcg_gen_muls2_i64(t0
, t1
, cpu_gpr
[rA(ctx
->opcode
)],
1356 cpu_gpr
[rB(ctx
->opcode
)]);
1357 tcg_gen_mov_i64(cpu_gpr
[rD(ctx
->opcode
)], t0
);
1359 tcg_gen_sari_i64(t0
, t0
, 63);
1360 tcg_gen_setcond_i64(TCG_COND_NE
, cpu_ov
, t0
, t1
);
1361 if (is_isa300(ctx
)) {
1362 tcg_gen_mov_tl(cpu_ov32
, cpu_ov
);
1364 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
1366 tcg_temp_free_i64(t0
);
1367 tcg_temp_free_i64(t1
);
1369 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1370 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1375 /* Common subf function */
1376 static inline void gen_op_arith_subf(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
1377 TCGv arg2
, bool add_ca
, bool compute_ca
,
1378 bool compute_ov
, bool compute_rc0
)
1382 if (compute_ca
|| compute_ov
) {
1383 t0
= tcg_temp_new();
1387 /* dest = ~arg1 + arg2 [+ ca]. */
1388 if (NARROW_MODE(ctx
)) {
1390 * Caution: a non-obvious corner case of the spec is that
1391 * we must produce the *entire* 64-bit addition, but
1392 * produce the carry into bit 32.
1394 TCGv inv1
= tcg_temp_new();
1395 TCGv t1
= tcg_temp_new();
1396 tcg_gen_not_tl(inv1
, arg1
);
1398 tcg_gen_add_tl(t0
, arg2
, cpu_ca
);
1400 tcg_gen_addi_tl(t0
, arg2
, 1);
1402 tcg_gen_xor_tl(t1
, arg2
, inv1
); /* add without carry */
1403 tcg_gen_add_tl(t0
, t0
, inv1
);
1404 tcg_temp_free(inv1
);
1405 tcg_gen_xor_tl(cpu_ca
, t0
, t1
); /* bits changes w/ carry */
1407 tcg_gen_extract_tl(cpu_ca
, cpu_ca
, 32, 1);
1408 if (is_isa300(ctx
)) {
1409 tcg_gen_mov_tl(cpu_ca32
, cpu_ca
);
1411 } else if (add_ca
) {
1412 TCGv zero
, inv1
= tcg_temp_new();
1413 tcg_gen_not_tl(inv1
, arg1
);
1414 zero
= tcg_const_tl(0);
1415 tcg_gen_add2_tl(t0
, cpu_ca
, arg2
, zero
, cpu_ca
, zero
);
1416 tcg_gen_add2_tl(t0
, cpu_ca
, t0
, cpu_ca
, inv1
, zero
);
1417 gen_op_arith_compute_ca32(ctx
, t0
, inv1
, arg2
, cpu_ca32
, 0);
1418 tcg_temp_free(zero
);
1419 tcg_temp_free(inv1
);
1421 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_ca
, arg2
, arg1
);
1422 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1423 gen_op_arith_compute_ca32(ctx
, t0
, arg1
, arg2
, cpu_ca32
, 1);
1425 } else if (add_ca
) {
1427 * Since we're ignoring carry-out, we can simplify the
1428 * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1.
1430 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1431 tcg_gen_add_tl(t0
, t0
, cpu_ca
);
1432 tcg_gen_subi_tl(t0
, t0
, 1);
1434 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1438 gen_op_arith_compute_ov(ctx
, t0
, arg1
, arg2
, 1);
1440 if (unlikely(compute_rc0
)) {
1441 gen_set_Rc0(ctx
, t0
);
1445 tcg_gen_mov_tl(ret
, t0
);
1449 /* Sub functions with Two operands functions */
1450 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
1451 static void glue(gen_, name)(DisasContext *ctx) \
1453 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1454 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1455 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1457 /* Sub functions with one operand and one immediate */
1458 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
1459 add_ca, compute_ca, compute_ov) \
1460 static void glue(gen_, name)(DisasContext *ctx) \
1462 TCGv t0 = tcg_const_tl(const_val); \
1463 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1464 cpu_gpr[rA(ctx->opcode)], t0, \
1465 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1466 tcg_temp_free(t0); \
1468 /* subf subf. subfo subfo. */
1469 GEN_INT_ARITH_SUBF(subf
, 0x01, 0, 0, 0)
1470 GEN_INT_ARITH_SUBF(subfo
, 0x11, 0, 0, 1)
1471 /* subfc subfc. subfco subfco. */
1472 GEN_INT_ARITH_SUBF(subfc
, 0x00, 0, 1, 0)
1473 GEN_INT_ARITH_SUBF(subfco
, 0x10, 0, 1, 1)
1474 /* subfe subfe. subfeo subfo. */
1475 GEN_INT_ARITH_SUBF(subfe
, 0x04, 1, 1, 0)
1476 GEN_INT_ARITH_SUBF(subfeo
, 0x14, 1, 1, 1)
1477 /* subfme subfme. subfmeo subfmeo. */
1478 GEN_INT_ARITH_SUBF_CONST(subfme
, 0x07, -1LL, 1, 1, 0)
1479 GEN_INT_ARITH_SUBF_CONST(subfmeo
, 0x17, -1LL, 1, 1, 1)
1480 /* subfze subfze. subfzeo subfzeo.*/
1481 GEN_INT_ARITH_SUBF_CONST(subfze
, 0x06, 0, 1, 1, 0)
1482 GEN_INT_ARITH_SUBF_CONST(subfzeo
, 0x16, 0, 1, 1, 1)
1485 static void gen_subfic(DisasContext
*ctx
)
1487 TCGv c
= tcg_const_tl(SIMM(ctx
->opcode
));
1488 gen_op_arith_subf(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1493 /* neg neg. nego nego. */
1494 static inline void gen_op_arith_neg(DisasContext
*ctx
, bool compute_ov
)
1496 TCGv zero
= tcg_const_tl(0);
1497 gen_op_arith_subf(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1498 zero
, 0, 0, compute_ov
, Rc(ctx
->opcode
));
1499 tcg_temp_free(zero
);
1502 static void gen_neg(DisasContext
*ctx
)
1504 tcg_gen_neg_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)]);
1505 if (unlikely(Rc(ctx
->opcode
))) {
1506 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1510 static void gen_nego(DisasContext
*ctx
)
1512 gen_op_arith_neg(ctx
, 1);
1515 /*** Integer logical ***/
1516 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
1517 static void glue(gen_, name)(DisasContext *ctx) \
1519 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
1520 cpu_gpr[rB(ctx->opcode)]); \
1521 if (unlikely(Rc(ctx->opcode) != 0)) \
1522 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1525 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
1526 static void glue(gen_, name)(DisasContext *ctx) \
1528 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
1529 if (unlikely(Rc(ctx->opcode) != 0)) \
1530 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1534 GEN_LOGICAL2(and, tcg_gen_and_tl
, 0x00, PPC_INTEGER
);
1536 GEN_LOGICAL2(andc
, tcg_gen_andc_tl
, 0x01, PPC_INTEGER
);
1539 static void gen_andi_(DisasContext
*ctx
)
1541 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)],
1543 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1547 static void gen_andis_(DisasContext
*ctx
)
1549 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)],
1550 UIMM(ctx
->opcode
) << 16);
1551 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1555 static void gen_cntlzw(DisasContext
*ctx
)
1557 TCGv_i32 t
= tcg_temp_new_i32();
1559 tcg_gen_trunc_tl_i32(t
, cpu_gpr
[rS(ctx
->opcode
)]);
1560 tcg_gen_clzi_i32(t
, t
, 32);
1561 tcg_gen_extu_i32_tl(cpu_gpr
[rA(ctx
->opcode
)], t
);
1562 tcg_temp_free_i32(t
);
1564 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1565 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1570 static void gen_cnttzw(DisasContext
*ctx
)
1572 TCGv_i32 t
= tcg_temp_new_i32();
1574 tcg_gen_trunc_tl_i32(t
, cpu_gpr
[rS(ctx
->opcode
)]);
1575 tcg_gen_ctzi_i32(t
, t
, 32);
1576 tcg_gen_extu_i32_tl(cpu_gpr
[rA(ctx
->opcode
)], t
);
1577 tcg_temp_free_i32(t
);
1579 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1580 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1585 GEN_LOGICAL2(eqv
, tcg_gen_eqv_tl
, 0x08, PPC_INTEGER
);
1586 /* extsb & extsb. */
1587 GEN_LOGICAL1(extsb
, tcg_gen_ext8s_tl
, 0x1D, PPC_INTEGER
);
1588 /* extsh & extsh. */
1589 GEN_LOGICAL1(extsh
, tcg_gen_ext16s_tl
, 0x1C, PPC_INTEGER
);
1591 GEN_LOGICAL2(nand
, tcg_gen_nand_tl
, 0x0E, PPC_INTEGER
);
1593 GEN_LOGICAL2(nor
, tcg_gen_nor_tl
, 0x03, PPC_INTEGER
);
1595 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
1596 static void gen_pause(DisasContext
*ctx
)
1598 TCGv_i32 t0
= tcg_const_i32(0);
1599 tcg_gen_st_i32(t0
, cpu_env
,
1600 -offsetof(PowerPCCPU
, env
) + offsetof(CPUState
, halted
));
1601 tcg_temp_free_i32(t0
);
1603 /* Stop translation, this gives other CPUs a chance to run */
1604 gen_exception_nip(ctx
, EXCP_HLT
, ctx
->base
.pc_next
);
1606 #endif /* defined(TARGET_PPC64) */
1609 static void gen_or(DisasContext
*ctx
)
1613 rs
= rS(ctx
->opcode
);
1614 ra
= rA(ctx
->opcode
);
1615 rb
= rB(ctx
->opcode
);
1616 /* Optimisation for mr. ri case */
1617 if (rs
!= ra
|| rs
!= rb
) {
1619 tcg_gen_or_tl(cpu_gpr
[ra
], cpu_gpr
[rs
], cpu_gpr
[rb
]);
1621 tcg_gen_mov_tl(cpu_gpr
[ra
], cpu_gpr
[rs
]);
1623 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1624 gen_set_Rc0(ctx
, cpu_gpr
[ra
]);
1626 } else if (unlikely(Rc(ctx
->opcode
) != 0)) {
1627 gen_set_Rc0(ctx
, cpu_gpr
[rs
]);
1628 #if defined(TARGET_PPC64)
1629 } else if (rs
!= 0) { /* 0 is nop */
1634 /* Set process priority to low */
1638 /* Set process priority to medium-low */
1642 /* Set process priority to normal */
1645 #if !defined(CONFIG_USER_ONLY)
1648 /* Set process priority to very low */
1654 /* Set process priority to medium-hight */
1660 /* Set process priority to high */
1665 if (ctx
->hv
&& !ctx
->pr
) {
1666 /* Set process priority to very high */
1675 TCGv t0
= tcg_temp_new();
1676 gen_load_spr(t0
, SPR_PPR
);
1677 tcg_gen_andi_tl(t0
, t0
, ~0x001C000000000000ULL
);
1678 tcg_gen_ori_tl(t0
, t0
, ((uint64_t)prio
) << 50);
1679 gen_store_spr(SPR_PPR
, t0
);
1682 #if !defined(CONFIG_USER_ONLY)
1684 * Pause out of TCG otherwise spin loops with smt_low eat too
1685 * much CPU and the kernel hangs. This applies to all
1686 * encodings other than no-op, e.g., miso(rs=26), yield(27),
1687 * mdoio(29), mdoom(30), and all currently undefined.
1695 GEN_LOGICAL2(orc
, tcg_gen_orc_tl
, 0x0C, PPC_INTEGER
);
1698 static void gen_xor(DisasContext
*ctx
)
1700 /* Optimisation for "set to zero" case */
1701 if (rS(ctx
->opcode
) != rB(ctx
->opcode
)) {
1702 tcg_gen_xor_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)],
1703 cpu_gpr
[rB(ctx
->opcode
)]);
1705 tcg_gen_movi_tl(cpu_gpr
[rA(ctx
->opcode
)], 0);
1707 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1708 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1713 static void gen_ori(DisasContext
*ctx
)
1715 target_ulong uimm
= UIMM(ctx
->opcode
);
1717 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1720 tcg_gen_ori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
);
1724 static void gen_oris(DisasContext
*ctx
)
1726 target_ulong uimm
= UIMM(ctx
->opcode
);
1728 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1732 tcg_gen_ori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)],
1737 static void gen_xori(DisasContext
*ctx
)
1739 target_ulong uimm
= UIMM(ctx
->opcode
);
1741 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1745 tcg_gen_xori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
);
1749 static void gen_xoris(DisasContext
*ctx
)
1751 target_ulong uimm
= UIMM(ctx
->opcode
);
1753 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1757 tcg_gen_xori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)],
1761 /* popcntb : PowerPC 2.03 specification */
1762 static void gen_popcntb(DisasContext
*ctx
)
1764 gen_helper_popcntb(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1767 static void gen_popcntw(DisasContext
*ctx
)
1769 #if defined(TARGET_PPC64)
1770 gen_helper_popcntw(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1772 tcg_gen_ctpop_i32(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1776 #if defined(TARGET_PPC64)
1777 /* popcntd: PowerPC 2.06 specification */
1778 static void gen_popcntd(DisasContext
*ctx
)
1780 tcg_gen_ctpop_i64(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1784 /* prtyw: PowerPC 2.05 specification */
1785 static void gen_prtyw(DisasContext
*ctx
)
1787 TCGv ra
= cpu_gpr
[rA(ctx
->opcode
)];
1788 TCGv rs
= cpu_gpr
[rS(ctx
->opcode
)];
1789 TCGv t0
= tcg_temp_new();
1790 tcg_gen_shri_tl(t0
, rs
, 16);
1791 tcg_gen_xor_tl(ra
, rs
, t0
);
1792 tcg_gen_shri_tl(t0
, ra
, 8);
1793 tcg_gen_xor_tl(ra
, ra
, t0
);
1794 tcg_gen_andi_tl(ra
, ra
, (target_ulong
)0x100000001ULL
);
1798 #if defined(TARGET_PPC64)
1799 /* prtyd: PowerPC 2.05 specification */
1800 static void gen_prtyd(DisasContext
*ctx
)
1802 TCGv ra
= cpu_gpr
[rA(ctx
->opcode
)];
1803 TCGv rs
= cpu_gpr
[rS(ctx
->opcode
)];
1804 TCGv t0
= tcg_temp_new();
1805 tcg_gen_shri_tl(t0
, rs
, 32);
1806 tcg_gen_xor_tl(ra
, rs
, t0
);
1807 tcg_gen_shri_tl(t0
, ra
, 16);
1808 tcg_gen_xor_tl(ra
, ra
, t0
);
1809 tcg_gen_shri_tl(t0
, ra
, 8);
1810 tcg_gen_xor_tl(ra
, ra
, t0
);
1811 tcg_gen_andi_tl(ra
, ra
, 1);
1816 #if defined(TARGET_PPC64)
1818 static void gen_bpermd(DisasContext
*ctx
)
1820 gen_helper_bpermd(cpu_gpr
[rA(ctx
->opcode
)],
1821 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1825 #if defined(TARGET_PPC64)
1826 /* extsw & extsw. */
1827 GEN_LOGICAL1(extsw
, tcg_gen_ext32s_tl
, 0x1E, PPC_64B
);
1830 static void gen_cntlzd(DisasContext
*ctx
)
1832 tcg_gen_clzi_i64(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], 64);
1833 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1834 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1839 static void gen_cnttzd(DisasContext
*ctx
)
1841 tcg_gen_ctzi_i64(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], 64);
1842 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1843 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1848 static void gen_darn(DisasContext
*ctx
)
1850 int l
= L(ctx
->opcode
);
1853 tcg_gen_movi_i64(cpu_gpr
[rD(ctx
->opcode
)], -1);
1855 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
1859 gen_helper_darn32(cpu_gpr
[rD(ctx
->opcode
)]);
1861 /* Return 64-bit random for both CRN and RRN */
1862 gen_helper_darn64(cpu_gpr
[rD(ctx
->opcode
)]);
1864 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
1865 gen_stop_exception(ctx
);
1871 /*** Integer rotate ***/
1873 /* rlwimi & rlwimi. */
1874 static void gen_rlwimi(DisasContext
*ctx
)
1876 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
1877 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
1878 uint32_t sh
= SH(ctx
->opcode
);
1879 uint32_t mb
= MB(ctx
->opcode
);
1880 uint32_t me
= ME(ctx
->opcode
);
1882 if (sh
== (31 - me
) && mb
<= me
) {
1883 tcg_gen_deposit_tl(t_ra
, t_ra
, t_rs
, sh
, me
- mb
+ 1);
1886 bool mask_in_32b
= true;
1889 #if defined(TARGET_PPC64)
1893 mask
= MASK(mb
, me
);
1895 #if defined(TARGET_PPC64)
1896 if (mask
> 0xffffffffu
) {
1897 mask_in_32b
= false;
1900 t1
= tcg_temp_new();
1902 TCGv_i32 t0
= tcg_temp_new_i32();
1903 tcg_gen_trunc_tl_i32(t0
, t_rs
);
1904 tcg_gen_rotli_i32(t0
, t0
, sh
);
1905 tcg_gen_extu_i32_tl(t1
, t0
);
1906 tcg_temp_free_i32(t0
);
1908 #if defined(TARGET_PPC64)
1909 tcg_gen_deposit_i64(t1
, t_rs
, t_rs
, 32, 32);
1910 tcg_gen_rotli_i64(t1
, t1
, sh
);
1912 g_assert_not_reached();
1916 tcg_gen_andi_tl(t1
, t1
, mask
);
1917 tcg_gen_andi_tl(t_ra
, t_ra
, ~mask
);
1918 tcg_gen_or_tl(t_ra
, t_ra
, t1
);
1921 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1922 gen_set_Rc0(ctx
, t_ra
);
1926 /* rlwinm & rlwinm. */
1927 static void gen_rlwinm(DisasContext
*ctx
)
1929 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
1930 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
1931 int sh
= SH(ctx
->opcode
);
1932 int mb
= MB(ctx
->opcode
);
1933 int me
= ME(ctx
->opcode
);
1934 int len
= me
- mb
+ 1;
1935 int rsh
= (32 - sh
) & 31;
1937 if (sh
!= 0 && len
> 0 && me
== (31 - sh
)) {
1938 tcg_gen_deposit_z_tl(t_ra
, t_rs
, sh
, len
);
1939 } else if (me
== 31 && rsh
+ len
<= 32) {
1940 tcg_gen_extract_tl(t_ra
, t_rs
, rsh
, len
);
1943 bool mask_in_32b
= true;
1944 #if defined(TARGET_PPC64)
1948 mask
= MASK(mb
, me
);
1949 #if defined(TARGET_PPC64)
1950 if (mask
> 0xffffffffu
) {
1951 mask_in_32b
= false;
1956 tcg_gen_andi_tl(t_ra
, t_rs
, mask
);
1958 TCGv_i32 t0
= tcg_temp_new_i32();
1959 tcg_gen_trunc_tl_i32(t0
, t_rs
);
1960 tcg_gen_rotli_i32(t0
, t0
, sh
);
1961 tcg_gen_andi_i32(t0
, t0
, mask
);
1962 tcg_gen_extu_i32_tl(t_ra
, t0
);
1963 tcg_temp_free_i32(t0
);
1966 #if defined(TARGET_PPC64)
1967 tcg_gen_deposit_i64(t_ra
, t_rs
, t_rs
, 32, 32);
1968 tcg_gen_rotli_i64(t_ra
, t_ra
, sh
);
1969 tcg_gen_andi_i64(t_ra
, t_ra
, mask
);
1971 g_assert_not_reached();
1975 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1976 gen_set_Rc0(ctx
, t_ra
);
1980 /* rlwnm & rlwnm. */
1981 static void gen_rlwnm(DisasContext
*ctx
)
1983 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
1984 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
1985 TCGv t_rb
= cpu_gpr
[rB(ctx
->opcode
)];
1986 uint32_t mb
= MB(ctx
->opcode
);
1987 uint32_t me
= ME(ctx
->opcode
);
1989 bool mask_in_32b
= true;
1991 #if defined(TARGET_PPC64)
1995 mask
= MASK(mb
, me
);
1997 #if defined(TARGET_PPC64)
1998 if (mask
> 0xffffffffu
) {
1999 mask_in_32b
= false;
2003 TCGv_i32 t0
= tcg_temp_new_i32();
2004 TCGv_i32 t1
= tcg_temp_new_i32();
2005 tcg_gen_trunc_tl_i32(t0
, t_rb
);
2006 tcg_gen_trunc_tl_i32(t1
, t_rs
);
2007 tcg_gen_andi_i32(t0
, t0
, 0x1f);
2008 tcg_gen_rotl_i32(t1
, t1
, t0
);
2009 tcg_gen_extu_i32_tl(t_ra
, t1
);
2010 tcg_temp_free_i32(t0
);
2011 tcg_temp_free_i32(t1
);
2013 #if defined(TARGET_PPC64)
2014 TCGv_i64 t0
= tcg_temp_new_i64();
2015 tcg_gen_andi_i64(t0
, t_rb
, 0x1f);
2016 tcg_gen_deposit_i64(t_ra
, t_rs
, t_rs
, 32, 32);
2017 tcg_gen_rotl_i64(t_ra
, t_ra
, t0
);
2018 tcg_temp_free_i64(t0
);
2020 g_assert_not_reached();
2024 tcg_gen_andi_tl(t_ra
, t_ra
, mask
);
2026 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2027 gen_set_Rc0(ctx
, t_ra
);
2031 #if defined(TARGET_PPC64)
2032 #define GEN_PPC64_R2(name, opc1, opc2) \
2033 static void glue(gen_, name##0)(DisasContext *ctx) \
2035 gen_##name(ctx, 0); \
2038 static void glue(gen_, name##1)(DisasContext *ctx) \
2040 gen_##name(ctx, 1); \
2042 #define GEN_PPC64_R4(name, opc1, opc2) \
2043 static void glue(gen_, name##0)(DisasContext *ctx) \
2045 gen_##name(ctx, 0, 0); \
2048 static void glue(gen_, name##1)(DisasContext *ctx) \
2050 gen_##name(ctx, 0, 1); \
2053 static void glue(gen_, name##2)(DisasContext *ctx) \
2055 gen_##name(ctx, 1, 0); \
2058 static void glue(gen_, name##3)(DisasContext *ctx) \
2060 gen_##name(ctx, 1, 1); \
2063 static void gen_rldinm(DisasContext
*ctx
, int mb
, int me
, int sh
)
2065 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
2066 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
2067 int len
= me
- mb
+ 1;
2068 int rsh
= (64 - sh
) & 63;
2070 if (sh
!= 0 && len
> 0 && me
== (63 - sh
)) {
2071 tcg_gen_deposit_z_tl(t_ra
, t_rs
, sh
, len
);
2072 } else if (me
== 63 && rsh
+ len
<= 64) {
2073 tcg_gen_extract_tl(t_ra
, t_rs
, rsh
, len
);
2075 tcg_gen_rotli_tl(t_ra
, t_rs
, sh
);
2076 tcg_gen_andi_tl(t_ra
, t_ra
, MASK(mb
, me
));
2078 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2079 gen_set_Rc0(ctx
, t_ra
);
2083 /* rldicl - rldicl. */
2084 static inline void gen_rldicl(DisasContext
*ctx
, int mbn
, int shn
)
2088 sh
= SH(ctx
->opcode
) | (shn
<< 5);
2089 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
2090 gen_rldinm(ctx
, mb
, 63, sh
);
2092 GEN_PPC64_R4(rldicl
, 0x1E, 0x00);
2094 /* rldicr - rldicr. */
2095 static inline void gen_rldicr(DisasContext
*ctx
, int men
, int shn
)
2099 sh
= SH(ctx
->opcode
) | (shn
<< 5);
2100 me
= MB(ctx
->opcode
) | (men
<< 5);
2101 gen_rldinm(ctx
, 0, me
, sh
);
2103 GEN_PPC64_R4(rldicr
, 0x1E, 0x02);
2105 /* rldic - rldic. */
2106 static inline void gen_rldic(DisasContext
*ctx
, int mbn
, int shn
)
2110 sh
= SH(ctx
->opcode
) | (shn
<< 5);
2111 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
2112 gen_rldinm(ctx
, mb
, 63 - sh
, sh
);
2114 GEN_PPC64_R4(rldic
, 0x1E, 0x04);
2116 static void gen_rldnm(DisasContext
*ctx
, int mb
, int me
)
2118 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
2119 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
2120 TCGv t_rb
= cpu_gpr
[rB(ctx
->opcode
)];
2123 t0
= tcg_temp_new();
2124 tcg_gen_andi_tl(t0
, t_rb
, 0x3f);
2125 tcg_gen_rotl_tl(t_ra
, t_rs
, t0
);
2128 tcg_gen_andi_tl(t_ra
, t_ra
, MASK(mb
, me
));
2129 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2130 gen_set_Rc0(ctx
, t_ra
);
2134 /* rldcl - rldcl. */
2135 static inline void gen_rldcl(DisasContext
*ctx
, int mbn
)
2139 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
2140 gen_rldnm(ctx
, mb
, 63);
2142 GEN_PPC64_R2(rldcl
, 0x1E, 0x08);
2144 /* rldcr - rldcr. */
2145 static inline void gen_rldcr(DisasContext
*ctx
, int men
)
2149 me
= MB(ctx
->opcode
) | (men
<< 5);
2150 gen_rldnm(ctx
, 0, me
);
2152 GEN_PPC64_R2(rldcr
, 0x1E, 0x09);
2154 /* rldimi - rldimi. */
2155 static void gen_rldimi(DisasContext
*ctx
, int mbn
, int shn
)
2157 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
2158 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
2159 uint32_t sh
= SH(ctx
->opcode
) | (shn
<< 5);
2160 uint32_t mb
= MB(ctx
->opcode
) | (mbn
<< 5);
2161 uint32_t me
= 63 - sh
;
2164 tcg_gen_deposit_tl(t_ra
, t_ra
, t_rs
, sh
, me
- mb
+ 1);
2166 target_ulong mask
= MASK(mb
, me
);
2167 TCGv t1
= tcg_temp_new();
2169 tcg_gen_rotli_tl(t1
, t_rs
, sh
);
2170 tcg_gen_andi_tl(t1
, t1
, mask
);
2171 tcg_gen_andi_tl(t_ra
, t_ra
, ~mask
);
2172 tcg_gen_or_tl(t_ra
, t_ra
, t1
);
2175 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2176 gen_set_Rc0(ctx
, t_ra
);
2179 GEN_PPC64_R4(rldimi
, 0x1E, 0x06);
2182 /*** Integer shift ***/
2185 static void gen_slw(DisasContext
*ctx
)
2189 t0
= tcg_temp_new();
2190 /* AND rS with a mask that is 0 when rB >= 0x20 */
2191 #if defined(TARGET_PPC64)
2192 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x3a);
2193 tcg_gen_sari_tl(t0
, t0
, 0x3f);
2195 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x1a);
2196 tcg_gen_sari_tl(t0
, t0
, 0x1f);
2198 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
2199 t1
= tcg_temp_new();
2200 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x1f);
2201 tcg_gen_shl_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
2204 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)]);
2205 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2206 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
2211 static void gen_sraw(DisasContext
*ctx
)
2213 gen_helper_sraw(cpu_gpr
[rA(ctx
->opcode
)], cpu_env
,
2214 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
2215 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2216 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
2220 /* srawi & srawi. */
2221 static void gen_srawi(DisasContext
*ctx
)
2223 int sh
= SH(ctx
->opcode
);
2224 TCGv dst
= cpu_gpr
[rA(ctx
->opcode
)];
2225 TCGv src
= cpu_gpr
[rS(ctx
->opcode
)];
2227 tcg_gen_ext32s_tl(dst
, src
);
2228 tcg_gen_movi_tl(cpu_ca
, 0);
2229 if (is_isa300(ctx
)) {
2230 tcg_gen_movi_tl(cpu_ca32
, 0);
2234 tcg_gen_ext32s_tl(dst
, src
);
2235 tcg_gen_andi_tl(cpu_ca
, dst
, (1ULL << sh
) - 1);
2236 t0
= tcg_temp_new();
2237 tcg_gen_sari_tl(t0
, dst
, TARGET_LONG_BITS
- 1);
2238 tcg_gen_and_tl(cpu_ca
, cpu_ca
, t0
);
2240 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_ca
, cpu_ca
, 0);
2241 if (is_isa300(ctx
)) {
2242 tcg_gen_mov_tl(cpu_ca32
, cpu_ca
);
2244 tcg_gen_sari_tl(dst
, dst
, sh
);
2246 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2247 gen_set_Rc0(ctx
, dst
);
2252 static void gen_srw(DisasContext
*ctx
)
2256 t0
= tcg_temp_new();
2257 /* AND rS with a mask that is 0 when rB >= 0x20 */
2258 #if defined(TARGET_PPC64)
2259 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x3a);
2260 tcg_gen_sari_tl(t0
, t0
, 0x3f);
2262 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x1a);
2263 tcg_gen_sari_tl(t0
, t0
, 0x1f);
2265 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
2266 tcg_gen_ext32u_tl(t0
, t0
);
2267 t1
= tcg_temp_new();
2268 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x1f);
2269 tcg_gen_shr_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
2272 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2273 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
2277 #if defined(TARGET_PPC64)
2279 static void gen_sld(DisasContext
*ctx
)
2283 t0
= tcg_temp_new();
2284 /* AND rS with a mask that is 0 when rB >= 0x40 */
2285 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x39);
2286 tcg_gen_sari_tl(t0
, t0
, 0x3f);
2287 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
2288 t1
= tcg_temp_new();
2289 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x3f);
2290 tcg_gen_shl_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
2293 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2294 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
2299 static void gen_srad(DisasContext
*ctx
)
2301 gen_helper_srad(cpu_gpr
[rA(ctx
->opcode
)], cpu_env
,
2302 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
2303 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2304 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
2307 /* sradi & sradi. */
2308 static inline void gen_sradi(DisasContext
*ctx
, int n
)
2310 int sh
= SH(ctx
->opcode
) + (n
<< 5);
2311 TCGv dst
= cpu_gpr
[rA(ctx
->opcode
)];
2312 TCGv src
= cpu_gpr
[rS(ctx
->opcode
)];
2314 tcg_gen_mov_tl(dst
, src
);
2315 tcg_gen_movi_tl(cpu_ca
, 0);
2316 if (is_isa300(ctx
)) {
2317 tcg_gen_movi_tl(cpu_ca32
, 0);
2321 tcg_gen_andi_tl(cpu_ca
, src
, (1ULL << sh
) - 1);
2322 t0
= tcg_temp_new();
2323 tcg_gen_sari_tl(t0
, src
, TARGET_LONG_BITS
- 1);
2324 tcg_gen_and_tl(cpu_ca
, cpu_ca
, t0
);
2326 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_ca
, cpu_ca
, 0);
2327 if (is_isa300(ctx
)) {
2328 tcg_gen_mov_tl(cpu_ca32
, cpu_ca
);
2330 tcg_gen_sari_tl(dst
, src
, sh
);
2332 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2333 gen_set_Rc0(ctx
, dst
);
2337 static void gen_sradi0(DisasContext
*ctx
)
2342 static void gen_sradi1(DisasContext
*ctx
)
2347 /* extswsli & extswsli. */
2348 static inline void gen_extswsli(DisasContext
*ctx
, int n
)
2350 int sh
= SH(ctx
->opcode
) + (n
<< 5);
2351 TCGv dst
= cpu_gpr
[rA(ctx
->opcode
)];
2352 TCGv src
= cpu_gpr
[rS(ctx
->opcode
)];
2354 tcg_gen_ext32s_tl(dst
, src
);
2355 tcg_gen_shli_tl(dst
, dst
, sh
);
2356 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2357 gen_set_Rc0(ctx
, dst
);
2361 static void gen_extswsli0(DisasContext
*ctx
)
2363 gen_extswsli(ctx
, 0);
2366 static void gen_extswsli1(DisasContext
*ctx
)
2368 gen_extswsli(ctx
, 1);
2372 static void gen_srd(DisasContext
*ctx
)
2376 t0
= tcg_temp_new();
2377 /* AND rS with a mask that is 0 when rB >= 0x40 */
2378 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x39);
2379 tcg_gen_sari_tl(t0
, t0
, 0x3f);
2380 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
2381 t1
= tcg_temp_new();
2382 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x3f);
2383 tcg_gen_shr_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
2386 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2387 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
2392 /*** Addressing modes ***/
2393 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2394 static inline void gen_addr_imm_index(DisasContext
*ctx
, TCGv EA
,
2397 target_long simm
= SIMM(ctx
->opcode
);
2400 if (rA(ctx
->opcode
) == 0) {
2401 if (NARROW_MODE(ctx
)) {
2402 simm
= (uint32_t)simm
;
2404 tcg_gen_movi_tl(EA
, simm
);
2405 } else if (likely(simm
!= 0)) {
2406 tcg_gen_addi_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)], simm
);
2407 if (NARROW_MODE(ctx
)) {
2408 tcg_gen_ext32u_tl(EA
, EA
);
2411 if (NARROW_MODE(ctx
)) {
2412 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2414 tcg_gen_mov_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2419 static inline void gen_addr_reg_index(DisasContext
*ctx
, TCGv EA
)
2421 if (rA(ctx
->opcode
) == 0) {
2422 if (NARROW_MODE(ctx
)) {
2423 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rB(ctx
->opcode
)]);
2425 tcg_gen_mov_tl(EA
, cpu_gpr
[rB(ctx
->opcode
)]);
2428 tcg_gen_add_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
2429 if (NARROW_MODE(ctx
)) {
2430 tcg_gen_ext32u_tl(EA
, EA
);
2435 static inline void gen_addr_register(DisasContext
*ctx
, TCGv EA
)
2437 if (rA(ctx
->opcode
) == 0) {
2438 tcg_gen_movi_tl(EA
, 0);
2439 } else if (NARROW_MODE(ctx
)) {
2440 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2442 tcg_gen_mov_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2446 static inline void gen_addr_add(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
2449 tcg_gen_addi_tl(ret
, arg1
, val
);
2450 if (NARROW_MODE(ctx
)) {
2451 tcg_gen_ext32u_tl(ret
, ret
);
2455 static inline void gen_align_no_le(DisasContext
*ctx
)
2457 gen_exception_err(ctx
, POWERPC_EXCP_ALIGN
,
2458 (ctx
->opcode
& 0x03FF0000) | POWERPC_EXCP_ALIGN_LE
);
2461 /*** Integer load ***/
2462 #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
2463 #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
2465 #define GEN_QEMU_LOAD_TL(ldop, op) \
2466 static void glue(gen_qemu_, ldop)(DisasContext *ctx, \
2470 tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op); \
2473 GEN_QEMU_LOAD_TL(ld8u
, DEF_MEMOP(MO_UB
))
2474 GEN_QEMU_LOAD_TL(ld16u
, DEF_MEMOP(MO_UW
))
2475 GEN_QEMU_LOAD_TL(ld16s
, DEF_MEMOP(MO_SW
))
2476 GEN_QEMU_LOAD_TL(ld32u
, DEF_MEMOP(MO_UL
))
2477 GEN_QEMU_LOAD_TL(ld32s
, DEF_MEMOP(MO_SL
))
2479 GEN_QEMU_LOAD_TL(ld16ur
, BSWAP_MEMOP(MO_UW
))
2480 GEN_QEMU_LOAD_TL(ld32ur
, BSWAP_MEMOP(MO_UL
))
2482 #define GEN_QEMU_LOAD_64(ldop, op) \
2483 static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \
2487 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op); \
2490 GEN_QEMU_LOAD_64(ld8u
, DEF_MEMOP(MO_UB
))
2491 GEN_QEMU_LOAD_64(ld16u
, DEF_MEMOP(MO_UW
))
2492 GEN_QEMU_LOAD_64(ld32u
, DEF_MEMOP(MO_UL
))
2493 GEN_QEMU_LOAD_64(ld32s
, DEF_MEMOP(MO_SL
))
2494 GEN_QEMU_LOAD_64(ld64
, DEF_MEMOP(MO_Q
))
2496 #if defined(TARGET_PPC64)
2497 GEN_QEMU_LOAD_64(ld64ur
, BSWAP_MEMOP(MO_Q
))
2500 #define GEN_QEMU_STORE_TL(stop, op) \
2501 static void glue(gen_qemu_, stop)(DisasContext *ctx, \
2505 tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op); \
2508 GEN_QEMU_STORE_TL(st8
, DEF_MEMOP(MO_UB
))
2509 GEN_QEMU_STORE_TL(st16
, DEF_MEMOP(MO_UW
))
2510 GEN_QEMU_STORE_TL(st32
, DEF_MEMOP(MO_UL
))
2512 GEN_QEMU_STORE_TL(st16r
, BSWAP_MEMOP(MO_UW
))
2513 GEN_QEMU_STORE_TL(st32r
, BSWAP_MEMOP(MO_UL
))
2515 #define GEN_QEMU_STORE_64(stop, op) \
2516 static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
2520 tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op); \
2523 GEN_QEMU_STORE_64(st8
, DEF_MEMOP(MO_UB
))
2524 GEN_QEMU_STORE_64(st16
, DEF_MEMOP(MO_UW
))
2525 GEN_QEMU_STORE_64(st32
, DEF_MEMOP(MO_UL
))
2526 GEN_QEMU_STORE_64(st64
, DEF_MEMOP(MO_Q
))
2528 #if defined(TARGET_PPC64)
2529 GEN_QEMU_STORE_64(st64r
, BSWAP_MEMOP(MO_Q
))
2532 #define GEN_LD(name, ldop, opc, type) \
2533 static void glue(gen_, name)(DisasContext *ctx) \
2536 gen_set_access_type(ctx, ACCESS_INT); \
2537 EA = tcg_temp_new(); \
2538 gen_addr_imm_index(ctx, EA, 0); \
2539 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2540 tcg_temp_free(EA); \
2543 #define GEN_LDU(name, ldop, opc, type) \
2544 static void glue(gen_, name##u)(DisasContext *ctx) \
2547 if (unlikely(rA(ctx->opcode) == 0 || \
2548 rA(ctx->opcode) == rD(ctx->opcode))) { \
2549 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2552 gen_set_access_type(ctx, ACCESS_INT); \
2553 EA = tcg_temp_new(); \
2554 if (type == PPC_64B) \
2555 gen_addr_imm_index(ctx, EA, 0x03); \
2557 gen_addr_imm_index(ctx, EA, 0); \
2558 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2559 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2560 tcg_temp_free(EA); \
2563 #define GEN_LDUX(name, ldop, opc2, opc3, type) \
2564 static void glue(gen_, name##ux)(DisasContext *ctx) \
2567 if (unlikely(rA(ctx->opcode) == 0 || \
2568 rA(ctx->opcode) == rD(ctx->opcode))) { \
2569 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2572 gen_set_access_type(ctx, ACCESS_INT); \
2573 EA = tcg_temp_new(); \
2574 gen_addr_reg_index(ctx, EA); \
2575 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2576 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2577 tcg_temp_free(EA); \
2580 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
2581 static void glue(gen_, name##x)(DisasContext *ctx) \
2585 gen_set_access_type(ctx, ACCESS_INT); \
2586 EA = tcg_temp_new(); \
2587 gen_addr_reg_index(ctx, EA); \
2588 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2589 tcg_temp_free(EA); \
2592 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2593 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE)
2595 #define GEN_LDX_HVRM(name, ldop, opc2, opc3, type) \
2596 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
2598 #define GEN_LDS(name, ldop, op, type) \
2599 GEN_LD(name, ldop, op | 0x20, type); \
2600 GEN_LDU(name, ldop, op | 0x21, type); \
2601 GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
2602 GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2604 /* lbz lbzu lbzux lbzx */
2605 GEN_LDS(lbz
, ld8u
, 0x02, PPC_INTEGER
);
2606 /* lha lhau lhaux lhax */
2607 GEN_LDS(lha
, ld16s
, 0x0A, PPC_INTEGER
);
2608 /* lhz lhzu lhzux lhzx */
2609 GEN_LDS(lhz
, ld16u
, 0x08, PPC_INTEGER
);
2610 /* lwz lwzu lwzux lwzx */
2611 GEN_LDS(lwz
, ld32u
, 0x00, PPC_INTEGER
);
2613 #define GEN_LDEPX(name, ldop, opc2, opc3) \
2614 static void glue(gen_, name##epx)(DisasContext *ctx) \
2618 gen_set_access_type(ctx, ACCESS_INT); \
2619 EA = tcg_temp_new(); \
2620 gen_addr_reg_index(ctx, EA); \
2621 tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\
2622 tcg_temp_free(EA); \
2625 GEN_LDEPX(lb
, DEF_MEMOP(MO_UB
), 0x1F, 0x02)
2626 GEN_LDEPX(lh
, DEF_MEMOP(MO_UW
), 0x1F, 0x08)
2627 GEN_LDEPX(lw
, DEF_MEMOP(MO_UL
), 0x1F, 0x00)
2628 #if defined(TARGET_PPC64)
2629 GEN_LDEPX(ld
, DEF_MEMOP(MO_Q
), 0x1D, 0x00)
2632 #if defined(TARGET_PPC64)
2634 GEN_LDUX(lwa
, ld32s
, 0x15, 0x0B, PPC_64B
);
2636 GEN_LDX(lwa
, ld32s
, 0x15, 0x0A, PPC_64B
);
2638 GEN_LDUX(ld
, ld64_i64
, 0x15, 0x01, PPC_64B
);
2640 GEN_LDX(ld
, ld64_i64
, 0x15, 0x00, PPC_64B
);
2642 /* CI load/store variants */
2643 GEN_LDX_HVRM(ldcix
, ld64_i64
, 0x15, 0x1b, PPC_CILDST
)
2644 GEN_LDX_HVRM(lwzcix
, ld32u
, 0x15, 0x15, PPC_CILDST
)
2645 GEN_LDX_HVRM(lhzcix
, ld16u
, 0x15, 0x19, PPC_CILDST
)
2646 GEN_LDX_HVRM(lbzcix
, ld8u
, 0x15, 0x1a, PPC_CILDST
)
2648 static void gen_ld(DisasContext
*ctx
)
2651 if (Rc(ctx
->opcode
)) {
2652 if (unlikely(rA(ctx
->opcode
) == 0 ||
2653 rA(ctx
->opcode
) == rD(ctx
->opcode
))) {
2654 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2658 gen_set_access_type(ctx
, ACCESS_INT
);
2659 EA
= tcg_temp_new();
2660 gen_addr_imm_index(ctx
, EA
, 0x03);
2661 if (ctx
->opcode
& 0x02) {
2662 /* lwa (lwau is undefined) */
2663 gen_qemu_ld32s(ctx
, cpu_gpr
[rD(ctx
->opcode
)], EA
);
2666 gen_qemu_ld64_i64(ctx
, cpu_gpr
[rD(ctx
->opcode
)], EA
);
2668 if (Rc(ctx
->opcode
)) {
2669 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], EA
);
2675 static void gen_lq(DisasContext
*ctx
)
2680 /* lq is a legal user mode instruction starting in ISA 2.07 */
2681 bool legal_in_user_mode
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
2682 bool le_is_supported
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
2684 if (!legal_in_user_mode
&& ctx
->pr
) {
2685 gen_priv_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
2689 if (!le_is_supported
&& ctx
->le_mode
) {
2690 gen_align_no_le(ctx
);
2693 ra
= rA(ctx
->opcode
);
2694 rd
= rD(ctx
->opcode
);
2695 if (unlikely((rd
& 1) || rd
== ra
)) {
2696 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2700 gen_set_access_type(ctx
, ACCESS_INT
);
2701 EA
= tcg_temp_new();
2702 gen_addr_imm_index(ctx
, EA
, 0x0F);
2704 /* Note that the low part is always in RD+1, even in LE mode. */
2705 lo
= cpu_gpr
[rd
+ 1];
2708 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
2709 if (HAVE_ATOMIC128
) {
2710 TCGv_i32 oi
= tcg_temp_new_i32();
2712 tcg_gen_movi_i32(oi
, make_memop_idx(MO_LEQ
, ctx
->mem_idx
));
2713 gen_helper_lq_le_parallel(lo
, cpu_env
, EA
, oi
);
2715 tcg_gen_movi_i32(oi
, make_memop_idx(MO_BEQ
, ctx
->mem_idx
));
2716 gen_helper_lq_be_parallel(lo
, cpu_env
, EA
, oi
);
2718 tcg_temp_free_i32(oi
);
2719 tcg_gen_ld_i64(hi
, cpu_env
, offsetof(CPUPPCState
, retxh
));
2721 /* Restart with exclusive lock. */
2722 gen_helper_exit_atomic(cpu_env
);
2723 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2725 } else if (ctx
->le_mode
) {
2726 tcg_gen_qemu_ld_i64(lo
, EA
, ctx
->mem_idx
, MO_LEQ
);
2727 gen_addr_add(ctx
, EA
, EA
, 8);
2728 tcg_gen_qemu_ld_i64(hi
, EA
, ctx
->mem_idx
, MO_LEQ
);
2730 tcg_gen_qemu_ld_i64(hi
, EA
, ctx
->mem_idx
, MO_BEQ
);
2731 gen_addr_add(ctx
, EA
, EA
, 8);
2732 tcg_gen_qemu_ld_i64(lo
, EA
, ctx
->mem_idx
, MO_BEQ
);
2738 /*** Integer store ***/
2739 #define GEN_ST(name, stop, opc, type) \
2740 static void glue(gen_, name)(DisasContext *ctx) \
2743 gen_set_access_type(ctx, ACCESS_INT); \
2744 EA = tcg_temp_new(); \
2745 gen_addr_imm_index(ctx, EA, 0); \
2746 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2747 tcg_temp_free(EA); \
2750 #define GEN_STU(name, stop, opc, type) \
2751 static void glue(gen_, stop##u)(DisasContext *ctx) \
2754 if (unlikely(rA(ctx->opcode) == 0)) { \
2755 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2758 gen_set_access_type(ctx, ACCESS_INT); \
2759 EA = tcg_temp_new(); \
2760 if (type == PPC_64B) \
2761 gen_addr_imm_index(ctx, EA, 0x03); \
2763 gen_addr_imm_index(ctx, EA, 0); \
2764 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2765 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2766 tcg_temp_free(EA); \
2769 #define GEN_STUX(name, stop, opc2, opc3, type) \
2770 static void glue(gen_, name##ux)(DisasContext *ctx) \
2773 if (unlikely(rA(ctx->opcode) == 0)) { \
2774 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2777 gen_set_access_type(ctx, ACCESS_INT); \
2778 EA = tcg_temp_new(); \
2779 gen_addr_reg_index(ctx, EA); \
2780 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2781 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2782 tcg_temp_free(EA); \
2785 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \
2786 static void glue(gen_, name##x)(DisasContext *ctx) \
2790 gen_set_access_type(ctx, ACCESS_INT); \
2791 EA = tcg_temp_new(); \
2792 gen_addr_reg_index(ctx, EA); \
2793 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2794 tcg_temp_free(EA); \
2796 #define GEN_STX(name, stop, opc2, opc3, type) \
2797 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE)
2799 #define GEN_STX_HVRM(name, stop, opc2, opc3, type) \
2800 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
2802 #define GEN_STS(name, stop, op, type) \
2803 GEN_ST(name, stop, op | 0x20, type); \
2804 GEN_STU(name, stop, op | 0x21, type); \
2805 GEN_STUX(name, stop, 0x17, op | 0x01, type); \
2806 GEN_STX(name, stop, 0x17, op | 0x00, type)
2808 /* stb stbu stbux stbx */
2809 GEN_STS(stb
, st8
, 0x06, PPC_INTEGER
);
2810 /* sth sthu sthux sthx */
2811 GEN_STS(sth
, st16
, 0x0C, PPC_INTEGER
);
2812 /* stw stwu stwux stwx */
2813 GEN_STS(stw
, st32
, 0x04, PPC_INTEGER
);
2815 #define GEN_STEPX(name, stop, opc2, opc3) \
2816 static void glue(gen_, name##epx)(DisasContext *ctx) \
2820 gen_set_access_type(ctx, ACCESS_INT); \
2821 EA = tcg_temp_new(); \
2822 gen_addr_reg_index(ctx, EA); \
2823 tcg_gen_qemu_st_tl( \
2824 cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop); \
2825 tcg_temp_free(EA); \
2828 GEN_STEPX(stb
, DEF_MEMOP(MO_UB
), 0x1F, 0x06)
2829 GEN_STEPX(sth
, DEF_MEMOP(MO_UW
), 0x1F, 0x0C)
2830 GEN_STEPX(stw
, DEF_MEMOP(MO_UL
), 0x1F, 0x04)
2831 #if defined(TARGET_PPC64)
2832 GEN_STEPX(std
, DEF_MEMOP(MO_Q
), 0x1d, 0x04)
2835 #if defined(TARGET_PPC64)
2836 GEN_STUX(std
, st64_i64
, 0x15, 0x05, PPC_64B
);
2837 GEN_STX(std
, st64_i64
, 0x15, 0x04, PPC_64B
);
2838 GEN_STX_HVRM(stdcix
, st64_i64
, 0x15, 0x1f, PPC_CILDST
)
2839 GEN_STX_HVRM(stwcix
, st32
, 0x15, 0x1c, PPC_CILDST
)
2840 GEN_STX_HVRM(sthcix
, st16
, 0x15, 0x1d, PPC_CILDST
)
2841 GEN_STX_HVRM(stbcix
, st8
, 0x15, 0x1e, PPC_CILDST
)
2843 static void gen_std(DisasContext
*ctx
)
2848 rs
= rS(ctx
->opcode
);
2849 if ((ctx
->opcode
& 0x3) == 0x2) { /* stq */
2850 bool legal_in_user_mode
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
2851 bool le_is_supported
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
2854 if (!(ctx
->insns_flags
& PPC_64BX
)) {
2855 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2858 if (!legal_in_user_mode
&& ctx
->pr
) {
2859 gen_priv_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
2863 if (!le_is_supported
&& ctx
->le_mode
) {
2864 gen_align_no_le(ctx
);
2868 if (unlikely(rs
& 1)) {
2869 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2872 gen_set_access_type(ctx
, ACCESS_INT
);
2873 EA
= tcg_temp_new();
2874 gen_addr_imm_index(ctx
, EA
, 0x03);
2876 /* Note that the low part is always in RS+1, even in LE mode. */
2877 lo
= cpu_gpr
[rs
+ 1];
2880 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
2881 if (HAVE_ATOMIC128
) {
2882 TCGv_i32 oi
= tcg_temp_new_i32();
2884 tcg_gen_movi_i32(oi
, make_memop_idx(MO_LEQ
, ctx
->mem_idx
));
2885 gen_helper_stq_le_parallel(cpu_env
, EA
, lo
, hi
, oi
);
2887 tcg_gen_movi_i32(oi
, make_memop_idx(MO_BEQ
, ctx
->mem_idx
));
2888 gen_helper_stq_be_parallel(cpu_env
, EA
, lo
, hi
, oi
);
2890 tcg_temp_free_i32(oi
);
2892 /* Restart with exclusive lock. */
2893 gen_helper_exit_atomic(cpu_env
);
2894 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2896 } else if (ctx
->le_mode
) {
2897 tcg_gen_qemu_st_i64(lo
, EA
, ctx
->mem_idx
, MO_LEQ
);
2898 gen_addr_add(ctx
, EA
, EA
, 8);
2899 tcg_gen_qemu_st_i64(hi
, EA
, ctx
->mem_idx
, MO_LEQ
);
2901 tcg_gen_qemu_st_i64(hi
, EA
, ctx
->mem_idx
, MO_BEQ
);
2902 gen_addr_add(ctx
, EA
, EA
, 8);
2903 tcg_gen_qemu_st_i64(lo
, EA
, ctx
->mem_idx
, MO_BEQ
);
2908 if (Rc(ctx
->opcode
)) {
2909 if (unlikely(rA(ctx
->opcode
) == 0)) {
2910 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2914 gen_set_access_type(ctx
, ACCESS_INT
);
2915 EA
= tcg_temp_new();
2916 gen_addr_imm_index(ctx
, EA
, 0x03);
2917 gen_qemu_st64_i64(ctx
, cpu_gpr
[rs
], EA
);
2918 if (Rc(ctx
->opcode
)) {
2919 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], EA
);
2925 /*** Integer load and store with byte reverse ***/
2928 GEN_LDX(lhbr
, ld16ur
, 0x16, 0x18, PPC_INTEGER
);
2931 GEN_LDX(lwbr
, ld32ur
, 0x16, 0x10, PPC_INTEGER
);
2933 #if defined(TARGET_PPC64)
2935 GEN_LDX_E(ldbr
, ld64ur_i64
, 0x14, 0x10, PPC_NONE
, PPC2_DBRX
, CHK_NONE
);
2937 GEN_STX_E(stdbr
, st64r_i64
, 0x14, 0x14, PPC_NONE
, PPC2_DBRX
, CHK_NONE
);
2938 #endif /* TARGET_PPC64 */
2941 GEN_STX(sthbr
, st16r
, 0x16, 0x1C, PPC_INTEGER
);
2943 GEN_STX(stwbr
, st32r
, 0x16, 0x14, PPC_INTEGER
);
2945 /*** Integer load and store multiple ***/
2948 static void gen_lmw(DisasContext
*ctx
)
2954 gen_align_no_le(ctx
);
2957 gen_set_access_type(ctx
, ACCESS_INT
);
2958 t0
= tcg_temp_new();
2959 t1
= tcg_const_i32(rD(ctx
->opcode
));
2960 gen_addr_imm_index(ctx
, t0
, 0);
2961 gen_helper_lmw(cpu_env
, t0
, t1
);
2963 tcg_temp_free_i32(t1
);
2967 static void gen_stmw(DisasContext
*ctx
)
2973 gen_align_no_le(ctx
);
2976 gen_set_access_type(ctx
, ACCESS_INT
);
2977 t0
= tcg_temp_new();
2978 t1
= tcg_const_i32(rS(ctx
->opcode
));
2979 gen_addr_imm_index(ctx
, t0
, 0);
2980 gen_helper_stmw(cpu_env
, t0
, t1
);
2982 tcg_temp_free_i32(t1
);
2985 /*** Integer load and store strings ***/
2989 * PowerPC32 specification says we must generate an exception if rA is
2990 * in the range of registers to be loaded. In an other hand, IBM says
2991 * this is valid, but rA won't be loaded. For now, I'll follow the
2994 static void gen_lswi(DisasContext
*ctx
)
2998 int nb
= NB(ctx
->opcode
);
2999 int start
= rD(ctx
->opcode
);
3000 int ra
= rA(ctx
->opcode
);
3004 gen_align_no_le(ctx
);
3010 nr
= DIV_ROUND_UP(nb
, 4);
3011 if (unlikely(lsw_reg_in_range(start
, nr
, ra
))) {
3012 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_LSWX
);
3015 gen_set_access_type(ctx
, ACCESS_INT
);
3016 t0
= tcg_temp_new();
3017 gen_addr_register(ctx
, t0
);
3018 t1
= tcg_const_i32(nb
);
3019 t2
= tcg_const_i32(start
);
3020 gen_helper_lsw(cpu_env
, t0
, t1
, t2
);
3022 tcg_temp_free_i32(t1
);
3023 tcg_temp_free_i32(t2
);
3027 static void gen_lswx(DisasContext
*ctx
)
3030 TCGv_i32 t1
, t2
, t3
;
3033 gen_align_no_le(ctx
);
3036 gen_set_access_type(ctx
, ACCESS_INT
);
3037 t0
= tcg_temp_new();
3038 gen_addr_reg_index(ctx
, t0
);
3039 t1
= tcg_const_i32(rD(ctx
->opcode
));
3040 t2
= tcg_const_i32(rA(ctx
->opcode
));
3041 t3
= tcg_const_i32(rB(ctx
->opcode
));
3042 gen_helper_lswx(cpu_env
, t0
, t1
, t2
, t3
);
3044 tcg_temp_free_i32(t1
);
3045 tcg_temp_free_i32(t2
);
3046 tcg_temp_free_i32(t3
);
3050 static void gen_stswi(DisasContext
*ctx
)
3054 int nb
= NB(ctx
->opcode
);
3057 gen_align_no_le(ctx
);
3060 gen_set_access_type(ctx
, ACCESS_INT
);
3061 t0
= tcg_temp_new();
3062 gen_addr_register(ctx
, t0
);
3066 t1
= tcg_const_i32(nb
);
3067 t2
= tcg_const_i32(rS(ctx
->opcode
));
3068 gen_helper_stsw(cpu_env
, t0
, t1
, t2
);
3070 tcg_temp_free_i32(t1
);
3071 tcg_temp_free_i32(t2
);
3075 static void gen_stswx(DisasContext
*ctx
)
3081 gen_align_no_le(ctx
);
3084 gen_set_access_type(ctx
, ACCESS_INT
);
3085 t0
= tcg_temp_new();
3086 gen_addr_reg_index(ctx
, t0
);
3087 t1
= tcg_temp_new_i32();
3088 tcg_gen_trunc_tl_i32(t1
, cpu_xer
);
3089 tcg_gen_andi_i32(t1
, t1
, 0x7F);
3090 t2
= tcg_const_i32(rS(ctx
->opcode
));
3091 gen_helper_stsw(cpu_env
, t0
, t1
, t2
);
3093 tcg_temp_free_i32(t1
);
3094 tcg_temp_free_i32(t2
);
3097 /*** Memory synchronisation ***/
3099 static void gen_eieio(DisasContext
*ctx
)
3101 TCGBar bar
= TCG_MO_LD_ST
;
3104 * POWER9 has a eieio instruction variant using bit 6 as a hint to
3105 * tell the CPU it is a store-forwarding barrier.
3107 if (ctx
->opcode
& 0x2000000) {
3109 * ISA says that "Reserved fields in instructions are ignored
3110 * by the processor". So ignore the bit 6 on non-POWER9 CPU but
3111 * as this is not an instruction software should be using,
3112 * complain to the user.
3114 if (!(ctx
->insns_flags2
& PPC2_ISA300
)) {
3115 qemu_log_mask(LOG_GUEST_ERROR
, "invalid eieio using bit 6 at @"
3116 TARGET_FMT_lx
"\n", ctx
->base
.pc_next
- 4);
3122 tcg_gen_mb(bar
| TCG_BAR_SC
);
3125 #if !defined(CONFIG_USER_ONLY)
3126 static inline void gen_check_tlb_flush(DisasContext
*ctx
, bool global
)
3131 if (!ctx
->lazy_tlb_flush
) {
3134 l
= gen_new_label();
3135 t
= tcg_temp_new_i32();
3136 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUPPCState
, tlb_need_flush
));
3137 tcg_gen_brcondi_i32(TCG_COND_EQ
, t
, 0, l
);
3139 gen_helper_check_tlb_flush_global(cpu_env
);
3141 gen_helper_check_tlb_flush_local(cpu_env
);
3144 tcg_temp_free_i32(t
);
3147 static inline void gen_check_tlb_flush(DisasContext
*ctx
, bool global
) { }
3151 static void gen_isync(DisasContext
*ctx
)
3154 * We need to check for a pending TLB flush. This can only happen in
3155 * kernel mode however so check MSR_PR
3158 gen_check_tlb_flush(ctx
, false);
3160 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
3161 gen_stop_exception(ctx
);
3164 #define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE))
3166 static void gen_load_locked(DisasContext
*ctx
, MemOp memop
)
3168 TCGv gpr
= cpu_gpr
[rD(ctx
->opcode
)];
3169 TCGv t0
= tcg_temp_new();
3171 gen_set_access_type(ctx
, ACCESS_RES
);
3172 gen_addr_reg_index(ctx
, t0
);
3173 tcg_gen_qemu_ld_tl(gpr
, t0
, ctx
->mem_idx
, memop
| MO_ALIGN
);
3174 tcg_gen_mov_tl(cpu_reserve
, t0
);
3175 tcg_gen_mov_tl(cpu_reserve_val
, gpr
);
3176 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
3180 #define LARX(name, memop) \
3181 static void gen_##name(DisasContext *ctx) \
3183 gen_load_locked(ctx, memop); \