2 * PowerPC emulation for qemu: main translation routines.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "qemu/host-utils.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
36 #define CPU_SINGLE_STEP 0x1
37 #define CPU_BRANCH_STEP 0x2
38 #define GDBSTUB_SINGLE_STEP 0x4
40 /* Include definitions for instructions classes and implementations flags */
41 //#define PPC_DEBUG_DISAS
42 //#define DO_PPC_STATISTICS
44 #ifdef PPC_DEBUG_DISAS
45 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
47 # define LOG_DISAS(...) do { } while (0)
49 /*****************************************************************************/
50 /* Code translation helpers */
52 /* global register indexes */
53 static TCGv_env cpu_env
;
54 static char cpu_reg_names
[10*3 + 22*4 /* GPR */
55 + 10*4 + 22*5 /* SPE GPRh */
56 + 10*4 + 22*5 /* FPR */
57 + 2*(10*6 + 22*7) /* AVRh, AVRl */
58 + 10*5 + 22*6 /* VSR */
60 static TCGv cpu_gpr
[32];
61 static TCGv cpu_gprh
[32];
62 static TCGv_i64 cpu_fpr
[32];
63 static TCGv_i64 cpu_avrh
[32], cpu_avrl
[32];
64 static TCGv_i64 cpu_vsr
[32];
65 static TCGv_i32 cpu_crf
[8];
70 #if defined(TARGET_PPC64)
73 static TCGv cpu_xer
, cpu_so
, cpu_ov
, cpu_ca
;
74 static TCGv cpu_reserve
;
75 static TCGv cpu_fpscr
;
76 static TCGv_i32 cpu_access_type
;
78 #include "exec/gen-icount.h"
80 void ppc_translate_init(void)
84 size_t cpu_reg_names_size
;
85 static int done_init
= 0;
90 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
93 cpu_reg_names_size
= sizeof(cpu_reg_names
);
95 for (i
= 0; i
< 8; i
++) {
96 snprintf(p
, cpu_reg_names_size
, "crf%d", i
);
97 cpu_crf
[i
] = tcg_global_mem_new_i32(cpu_env
,
98 offsetof(CPUPPCState
, crf
[i
]), p
);
100 cpu_reg_names_size
-= 5;
103 for (i
= 0; i
< 32; i
++) {
104 snprintf(p
, cpu_reg_names_size
, "r%d", i
);
105 cpu_gpr
[i
] = tcg_global_mem_new(cpu_env
,
106 offsetof(CPUPPCState
, gpr
[i
]), p
);
107 p
+= (i
< 10) ?
3 : 4;
108 cpu_reg_names_size
-= (i
< 10) ?
3 : 4;
109 snprintf(p
, cpu_reg_names_size
, "r%dH", i
);
110 cpu_gprh
[i
] = tcg_global_mem_new(cpu_env
,
111 offsetof(CPUPPCState
, gprh
[i
]), p
);
112 p
+= (i
< 10) ?
4 : 5;
113 cpu_reg_names_size
-= (i
< 10) ?
4 : 5;
115 snprintf(p
, cpu_reg_names_size
, "fp%d", i
);
116 cpu_fpr
[i
] = tcg_global_mem_new_i64(cpu_env
,
117 offsetof(CPUPPCState
, fpr
[i
]), p
);
118 p
+= (i
< 10) ?
4 : 5;
119 cpu_reg_names_size
-= (i
< 10) ?
4 : 5;
121 snprintf(p
, cpu_reg_names_size
, "avr%dH", i
);
122 #ifdef HOST_WORDS_BIGENDIAN
123 cpu_avrh
[i
] = tcg_global_mem_new_i64(cpu_env
,
124 offsetof(CPUPPCState
, avr
[i
].u64
[0]), p
);
126 cpu_avrh
[i
] = tcg_global_mem_new_i64(cpu_env
,
127 offsetof(CPUPPCState
, avr
[i
].u64
[1]), p
);
129 p
+= (i
< 10) ?
6 : 7;
130 cpu_reg_names_size
-= (i
< 10) ?
6 : 7;
132 snprintf(p
, cpu_reg_names_size
, "avr%dL", i
);
133 #ifdef HOST_WORDS_BIGENDIAN
134 cpu_avrl
[i
] = tcg_global_mem_new_i64(cpu_env
,
135 offsetof(CPUPPCState
, avr
[i
].u64
[1]), p
);
137 cpu_avrl
[i
] = tcg_global_mem_new_i64(cpu_env
,
138 offsetof(CPUPPCState
, avr
[i
].u64
[0]), p
);
140 p
+= (i
< 10) ?
6 : 7;
141 cpu_reg_names_size
-= (i
< 10) ?
6 : 7;
142 snprintf(p
, cpu_reg_names_size
, "vsr%d", i
);
143 cpu_vsr
[i
] = tcg_global_mem_new_i64(cpu_env
,
144 offsetof(CPUPPCState
, vsr
[i
]), p
);
145 p
+= (i
< 10) ?
5 : 6;
146 cpu_reg_names_size
-= (i
< 10) ?
5 : 6;
149 cpu_nip
= tcg_global_mem_new(cpu_env
,
150 offsetof(CPUPPCState
, nip
), "nip");
152 cpu_msr
= tcg_global_mem_new(cpu_env
,
153 offsetof(CPUPPCState
, msr
), "msr");
155 cpu_ctr
= tcg_global_mem_new(cpu_env
,
156 offsetof(CPUPPCState
, ctr
), "ctr");
158 cpu_lr
= tcg_global_mem_new(cpu_env
,
159 offsetof(CPUPPCState
, lr
), "lr");
161 #if defined(TARGET_PPC64)
162 cpu_cfar
= tcg_global_mem_new(cpu_env
,
163 offsetof(CPUPPCState
, cfar
), "cfar");
166 cpu_xer
= tcg_global_mem_new(cpu_env
,
167 offsetof(CPUPPCState
, xer
), "xer");
168 cpu_so
= tcg_global_mem_new(cpu_env
,
169 offsetof(CPUPPCState
, so
), "SO");
170 cpu_ov
= tcg_global_mem_new(cpu_env
,
171 offsetof(CPUPPCState
, ov
), "OV");
172 cpu_ca
= tcg_global_mem_new(cpu_env
,
173 offsetof(CPUPPCState
, ca
), "CA");
175 cpu_reserve
= tcg_global_mem_new(cpu_env
,
176 offsetof(CPUPPCState
, reserve_addr
),
179 cpu_fpscr
= tcg_global_mem_new(cpu_env
,
180 offsetof(CPUPPCState
, fpscr
), "fpscr");
182 cpu_access_type
= tcg_global_mem_new_i32(cpu_env
,
183 offsetof(CPUPPCState
, access_type
), "access_type");
188 /* internal defines */
189 struct DisasContext
{
190 struct TranslationBlock
*tb
;
194 /* Routine used to access memory */
198 /* Translation flags */
200 TCGMemOp default_tcg_memop_mask
;
201 #if defined(TARGET_PPC64)
210 ppc_spr_t
*spr_cb
; /* Needed to check rights for mfspr/mtspr */
211 int singlestep_enabled
;
212 uint64_t insns_flags
;
213 uint64_t insns_flags2
;
216 /* Return true iff byteswap is needed in a scalar memop */
217 static inline bool need_byteswap(const DisasContext
*ctx
)
219 #if defined(TARGET_WORDS_BIGENDIAN)
222 return !ctx
->le_mode
;
226 /* True when active word size < size of target_long. */
228 # define NARROW_MODE(C) (!(C)->sf_mode)
230 # define NARROW_MODE(C) 0
233 struct opc_handler_t
{
234 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
236 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
238 /* instruction type */
240 /* extended instruction type */
243 void (*handler
)(DisasContext
*ctx
);
244 #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
247 #if defined(DO_PPC_STATISTICS)
252 static inline void gen_reset_fpstatus(void)
254 gen_helper_reset_fpstatus(cpu_env
);
257 static inline void gen_compute_fprf(TCGv_i64 arg
)
259 gen_helper_compute_fprf(cpu_env
, arg
);
260 gen_helper_float_check_status(cpu_env
);
263 static inline void gen_set_access_type(DisasContext
*ctx
, int access_type
)
265 if (ctx
->access_type
!= access_type
) {
266 tcg_gen_movi_i32(cpu_access_type
, access_type
);
267 ctx
->access_type
= access_type
;
271 static inline void gen_update_nip(DisasContext
*ctx
, target_ulong nip
)
273 if (NARROW_MODE(ctx
)) {
276 tcg_gen_movi_tl(cpu_nip
, nip
);
279 void gen_update_current_nip(void *opaque
)
281 DisasContext
*ctx
= opaque
;
283 tcg_gen_movi_tl(cpu_nip
, ctx
->nip
);
286 static inline void gen_exception_err(DisasContext
*ctx
, uint32_t excp
, uint32_t error
)
289 if (ctx
->exception
== POWERPC_EXCP_NONE
) {
290 gen_update_nip(ctx
, ctx
->nip
);
292 t0
= tcg_const_i32(excp
);
293 t1
= tcg_const_i32(error
);
294 gen_helper_raise_exception_err(cpu_env
, t0
, t1
);
295 tcg_temp_free_i32(t0
);
296 tcg_temp_free_i32(t1
);
297 ctx
->exception
= (excp
);
300 static inline void gen_exception(DisasContext
*ctx
, uint32_t excp
)
303 if (ctx
->exception
== POWERPC_EXCP_NONE
) {
304 gen_update_nip(ctx
, ctx
->nip
);
306 t0
= tcg_const_i32(excp
);
307 gen_helper_raise_exception(cpu_env
, t0
);
308 tcg_temp_free_i32(t0
);
309 ctx
->exception
= (excp
);
312 static inline void gen_debug_exception(DisasContext
*ctx
)
316 if ((ctx
->exception
!= POWERPC_EXCP_BRANCH
) &&
317 (ctx
->exception
!= POWERPC_EXCP_SYNC
)) {
318 gen_update_nip(ctx
, ctx
->nip
);
320 t0
= tcg_const_i32(EXCP_DEBUG
);
321 gen_helper_raise_exception(cpu_env
, t0
);
322 tcg_temp_free_i32(t0
);
325 static inline void gen_inval_exception(DisasContext
*ctx
, uint32_t error
)
327 gen_exception_err(ctx
, POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_INVAL
| error
);
330 /* Stop translation */
331 static inline void gen_stop_exception(DisasContext
*ctx
)
333 gen_update_nip(ctx
, ctx
->nip
);
334 ctx
->exception
= POWERPC_EXCP_STOP
;
337 #ifndef CONFIG_USER_ONLY
338 /* No need to update nip here, as execution flow will change */
339 static inline void gen_sync_exception(DisasContext
*ctx
)
341 ctx
->exception
= POWERPC_EXCP_SYNC
;
345 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
346 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
348 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
349 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
351 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
352 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
354 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
355 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
357 typedef struct opcode_t
{
358 unsigned char opc1
, opc2
, opc3
;
359 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
360 unsigned char pad
[5];
362 unsigned char pad
[1];
364 opc_handler_t handler
;
368 /*****************************************************************************/
369 /*** Instruction decoding ***/
370 #define EXTRACT_HELPER(name, shift, nb) \
371 static inline uint32_t name(uint32_t opcode) \
373 return (opcode >> (shift)) & ((1 << (nb)) - 1); \
376 #define EXTRACT_SHELPER(name, shift, nb) \
377 static inline int32_t name(uint32_t opcode) \
379 return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
382 #define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \
383 static inline uint32_t name(uint32_t opcode) \
385 return (((opcode >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
386 ((opcode >> (shift2)) & ((1 << (nb2)) - 1)); \
389 EXTRACT_HELPER(opc1
, 26, 6);
391 EXTRACT_HELPER(opc2
, 1, 5);
393 EXTRACT_HELPER(opc3
, 6, 5);
394 /* Update Cr0 flags */
395 EXTRACT_HELPER(Rc
, 0, 1);
396 /* Update Cr6 flags (Altivec) */
397 EXTRACT_HELPER(Rc21
, 10, 1);
399 EXTRACT_HELPER(rD
, 21, 5);
401 EXTRACT_HELPER(rS
, 21, 5);
403 EXTRACT_HELPER(rA
, 16, 5);
405 EXTRACT_HELPER(rB
, 11, 5);
407 EXTRACT_HELPER(rC
, 6, 5);
409 EXTRACT_HELPER(crfD
, 23, 3);
410 EXTRACT_HELPER(crfS
, 18, 3);
411 EXTRACT_HELPER(crbD
, 21, 5);
412 EXTRACT_HELPER(crbA
, 16, 5);
413 EXTRACT_HELPER(crbB
, 11, 5);
415 EXTRACT_HELPER(_SPR
, 11, 10);
416 static inline uint32_t SPR(uint32_t opcode
)
418 uint32_t sprn
= _SPR(opcode
);
420 return ((sprn
>> 5) & 0x1F) | ((sprn
& 0x1F) << 5);
422 /*** Get constants ***/
423 /* 16 bits signed immediate value */
424 EXTRACT_SHELPER(SIMM
, 0, 16);
425 /* 16 bits unsigned immediate value */
426 EXTRACT_HELPER(UIMM
, 0, 16);
427 /* 5 bits signed immediate value */
428 EXTRACT_HELPER(SIMM5
, 16, 5);
429 /* 5 bits signed immediate value */
430 EXTRACT_HELPER(UIMM5
, 16, 5);
432 EXTRACT_HELPER(NB
, 11, 5);
434 EXTRACT_HELPER(SH
, 11, 5);
435 /* Vector shift count */
436 EXTRACT_HELPER(VSH
, 6, 4);
438 EXTRACT_HELPER(MB
, 6, 5);
440 EXTRACT_HELPER(ME
, 1, 5);
442 EXTRACT_HELPER(TO
, 21, 5);
444 EXTRACT_HELPER(CRM
, 12, 8);
446 #ifndef CONFIG_USER_ONLY
447 EXTRACT_HELPER(SR
, 16, 4);
451 EXTRACT_HELPER(FPBF
, 23, 3);
452 EXTRACT_HELPER(FPIMM
, 12, 4);
453 EXTRACT_HELPER(FPL
, 25, 1);
454 EXTRACT_HELPER(FPFLM
, 17, 8);
455 EXTRACT_HELPER(FPW
, 16, 1);
457 /*** Jump target decoding ***/
458 /* Immediate address */
459 static inline target_ulong
LI(uint32_t opcode
)
461 return (opcode
>> 0) & 0x03FFFFFC;
464 static inline uint32_t BD(uint32_t opcode
)
466 return (opcode
>> 0) & 0xFFFC;
469 EXTRACT_HELPER(BO
, 21, 5);
470 EXTRACT_HELPER(BI
, 16, 5);
471 /* Absolute/relative address */
472 EXTRACT_HELPER(AA
, 1, 1);
474 EXTRACT_HELPER(LK
, 0, 1);
477 EXTRACT_HELPER(DCM
, 10, 6)
480 EXTRACT_HELPER(RMC
, 9, 2)
482 /* Create a mask between <start> and <end> bits */
483 static inline target_ulong
MASK(uint32_t start
, uint32_t end
)
487 #if defined(TARGET_PPC64)
488 if (likely(start
== 0)) {
489 ret
= UINT64_MAX
<< (63 - end
);
490 } else if (likely(end
== 63)) {
491 ret
= UINT64_MAX
>> start
;
494 if (likely(start
== 0)) {
495 ret
= UINT32_MAX
<< (31 - end
);
496 } else if (likely(end
== 31)) {
497 ret
= UINT32_MAX
>> start
;
501 ret
= (((target_ulong
)(-1ULL)) >> (start
)) ^
502 (((target_ulong
)(-1ULL) >> (end
)) >> 1);
503 if (unlikely(start
> end
))
510 EXTRACT_HELPER_SPLIT(xT
, 0, 1, 21, 5);
511 EXTRACT_HELPER_SPLIT(xS
, 0, 1, 21, 5);
512 EXTRACT_HELPER_SPLIT(xA
, 2, 1, 16, 5);
513 EXTRACT_HELPER_SPLIT(xB
, 1, 1, 11, 5);
514 EXTRACT_HELPER_SPLIT(xC
, 3, 1, 6, 5);
515 EXTRACT_HELPER(DM
, 8, 2);
516 EXTRACT_HELPER(UIM
, 16, 2);
517 EXTRACT_HELPER(SHW
, 8, 2);
518 EXTRACT_HELPER(SP
, 19, 2);
519 /*****************************************************************************/
520 /* PowerPC instructions table */
522 #if defined(DO_PPC_STATISTICS)
523 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
533 .handler = &gen_##name, \
534 .oname = stringify(name), \
536 .oname = stringify(name), \
538 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
549 .handler = &gen_##name, \
550 .oname = stringify(name), \
552 .oname = stringify(name), \
554 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
564 .handler = &gen_##name, \
570 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
580 .handler = &gen_##name, \
582 .oname = stringify(name), \
584 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
595 .handler = &gen_##name, \
597 .oname = stringify(name), \
599 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
609 .handler = &gen_##name, \
615 /* SPR load/store helpers */
616 static inline void gen_load_spr(TCGv t
, int reg
)
618 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUPPCState
, spr
[reg
]));
621 static inline void gen_store_spr(int reg
, TCGv t
)
623 tcg_gen_st_tl(t
, cpu_env
, offsetof(CPUPPCState
, spr
[reg
]));
626 /* Invalid instruction */
627 static void gen_invalid(DisasContext
*ctx
)
629 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
632 static opc_handler_t invalid_handler
= {
633 .inval1
= 0xFFFFFFFF,
634 .inval2
= 0xFFFFFFFF,
637 .handler
= gen_invalid
,
640 /*** Integer comparison ***/
642 static inline void gen_op_cmp(TCGv arg0
, TCGv arg1
, int s
, int crf
)
644 TCGv t0
= tcg_temp_new();
645 TCGv_i32 t1
= tcg_temp_new_i32();
647 tcg_gen_trunc_tl_i32(cpu_crf
[crf
], cpu_so
);
649 tcg_gen_setcond_tl((s ? TCG_COND_LT
: TCG_COND_LTU
), t0
, arg0
, arg1
);
650 tcg_gen_trunc_tl_i32(t1
, t0
);
651 tcg_gen_shli_i32(t1
, t1
, CRF_LT
);
652 tcg_gen_or_i32(cpu_crf
[crf
], cpu_crf
[crf
], t1
);
654 tcg_gen_setcond_tl((s ? TCG_COND_GT
: TCG_COND_GTU
), t0
, arg0
, arg1
);
655 tcg_gen_trunc_tl_i32(t1
, t0
);
656 tcg_gen_shli_i32(t1
, t1
, CRF_GT
);
657 tcg_gen_or_i32(cpu_crf
[crf
], cpu_crf
[crf
], t1
);
659 tcg_gen_setcond_tl(TCG_COND_EQ
, t0
, arg0
, arg1
);
660 tcg_gen_trunc_tl_i32(t1
, t0
);
661 tcg_gen_shli_i32(t1
, t1
, CRF_EQ
);
662 tcg_gen_or_i32(cpu_crf
[crf
], cpu_crf
[crf
], t1
);
665 tcg_temp_free_i32(t1
);
668 static inline void gen_op_cmpi(TCGv arg0
, target_ulong arg1
, int s
, int crf
)
670 TCGv t0
= tcg_const_tl(arg1
);
671 gen_op_cmp(arg0
, t0
, s
, crf
);
675 static inline void gen_op_cmp32(TCGv arg0
, TCGv arg1
, int s
, int crf
)
681 tcg_gen_ext32s_tl(t0
, arg0
);
682 tcg_gen_ext32s_tl(t1
, arg1
);
684 tcg_gen_ext32u_tl(t0
, arg0
);
685 tcg_gen_ext32u_tl(t1
, arg1
);
687 gen_op_cmp(t0
, t1
, s
, crf
);
692 static inline void gen_op_cmpi32(TCGv arg0
, target_ulong arg1
, int s
, int crf
)
694 TCGv t0
= tcg_const_tl(arg1
);
695 gen_op_cmp32(arg0
, t0
, s
, crf
);
699 static inline void gen_set_Rc0(DisasContext
*ctx
, TCGv reg
)
701 if (NARROW_MODE(ctx
)) {
702 gen_op_cmpi32(reg
, 0, 1, 0);
704 gen_op_cmpi(reg
, 0, 1, 0);
709 static void gen_cmp(DisasContext
*ctx
)
711 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
712 gen_op_cmp(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
713 1, crfD(ctx
->opcode
));
715 gen_op_cmp32(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
716 1, crfD(ctx
->opcode
));
721 static void gen_cmpi(DisasContext
*ctx
)
723 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
724 gen_op_cmpi(cpu_gpr
[rA(ctx
->opcode
)], SIMM(ctx
->opcode
),
725 1, crfD(ctx
->opcode
));
727 gen_op_cmpi32(cpu_gpr
[rA(ctx
->opcode
)], SIMM(ctx
->opcode
),
728 1, crfD(ctx
->opcode
));
733 static void gen_cmpl(DisasContext
*ctx
)
735 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
736 gen_op_cmp(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
737 0, crfD(ctx
->opcode
));
739 gen_op_cmp32(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
740 0, crfD(ctx
->opcode
));
745 static void gen_cmpli(DisasContext
*ctx
)
747 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
748 gen_op_cmpi(cpu_gpr
[rA(ctx
->opcode
)], UIMM(ctx
->opcode
),
749 0, crfD(ctx
->opcode
));
751 gen_op_cmpi32(cpu_gpr
[rA(ctx
->opcode
)], UIMM(ctx
->opcode
),
752 0, crfD(ctx
->opcode
));
756 /* isel (PowerPC 2.03 specification) */
757 static void gen_isel(DisasContext
*ctx
)
759 uint32_t bi
= rC(ctx
->opcode
);
760 uint32_t mask
= 0x08 >> (bi
& 0x03);
761 TCGv t0
= tcg_temp_new();
764 tcg_gen_extu_i32_tl(t0
, cpu_crf
[bi
>> 2]);
765 tcg_gen_andi_tl(t0
, t0
, mask
);
767 zr
= tcg_const_tl(0);
768 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr
[rD(ctx
->opcode
)], t0
, zr
,
769 rA(ctx
->opcode
) ? cpu_gpr
[rA(ctx
->opcode
)] : zr
,
770 cpu_gpr
[rB(ctx
->opcode
)]);
775 /* cmpb: PowerPC 2.05 specification */
776 static void gen_cmpb(DisasContext
*ctx
)
778 gen_helper_cmpb(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)],
779 cpu_gpr
[rB(ctx
->opcode
)]);
782 /*** Integer arithmetic ***/
784 static inline void gen_op_arith_compute_ov(DisasContext
*ctx
, TCGv arg0
,
785 TCGv arg1
, TCGv arg2
, int sub
)
787 TCGv t0
= tcg_temp_new();
789 tcg_gen_xor_tl(cpu_ov
, arg0
, arg2
);
790 tcg_gen_xor_tl(t0
, arg1
, arg2
);
792 tcg_gen_and_tl(cpu_ov
, cpu_ov
, t0
);
794 tcg_gen_andc_tl(cpu_ov
, cpu_ov
, t0
);
797 if (NARROW_MODE(ctx
)) {
798 tcg_gen_ext32s_tl(cpu_ov
, cpu_ov
);
800 tcg_gen_shri_tl(cpu_ov
, cpu_ov
, TARGET_LONG_BITS
- 1);
801 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
804 /* Common add function */
805 static inline void gen_op_arith_add(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
806 TCGv arg2
, bool add_ca
, bool compute_ca
,
807 bool compute_ov
, bool compute_rc0
)
811 if (compute_ca
|| compute_ov
) {
816 if (NARROW_MODE(ctx
)) {
817 /* Caution: a non-obvious corner case of the spec is that we
818 must produce the *entire* 64-bit addition, but produce the
819 carry into bit 32. */
820 TCGv t1
= tcg_temp_new();
821 tcg_gen_xor_tl(t1
, arg1
, arg2
); /* add without carry */
822 tcg_gen_add_tl(t0
, arg1
, arg2
);
824 tcg_gen_add_tl(t0
, t0
, cpu_ca
);
826 tcg_gen_xor_tl(cpu_ca
, t0
, t1
); /* bits changed w/ carry */
828 tcg_gen_shri_tl(cpu_ca
, cpu_ca
, 32); /* extract bit 32 */
829 tcg_gen_andi_tl(cpu_ca
, cpu_ca
, 1);
831 TCGv zero
= tcg_const_tl(0);
833 tcg_gen_add2_tl(t0
, cpu_ca
, arg1
, zero
, cpu_ca
, zero
);
834 tcg_gen_add2_tl(t0
, cpu_ca
, t0
, cpu_ca
, arg2
, zero
);
836 tcg_gen_add2_tl(t0
, cpu_ca
, arg1
, zero
, arg2
, zero
);
841 tcg_gen_add_tl(t0
, arg1
, arg2
);
843 tcg_gen_add_tl(t0
, t0
, cpu_ca
);
848 gen_op_arith_compute_ov(ctx
, t0
, arg1
, arg2
, 0);
850 if (unlikely(compute_rc0
)) {
851 gen_set_Rc0(ctx
, t0
);
854 if (!TCGV_EQUAL(t0
, ret
)) {
855 tcg_gen_mov_tl(ret
, t0
);
859 /* Add functions with two operands */
860 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
861 static void glue(gen_, name)(DisasContext *ctx) \
863 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
864 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
865 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
867 /* Add functions with one operand and one immediate */
868 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
869 add_ca, compute_ca, compute_ov) \
870 static void glue(gen_, name)(DisasContext *ctx) \
872 TCGv t0 = tcg_const_tl(const_val); \
873 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
874 cpu_gpr[rA(ctx->opcode)], t0, \
875 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
879 /* add add. addo addo. */
880 GEN_INT_ARITH_ADD(add
, 0x08, 0, 0, 0)
881 GEN_INT_ARITH_ADD(addo
, 0x18, 0, 0, 1)
882 /* addc addc. addco addco. */
883 GEN_INT_ARITH_ADD(addc
, 0x00, 0, 1, 0)
884 GEN_INT_ARITH_ADD(addco
, 0x10, 0, 1, 1)
885 /* adde adde. addeo addeo. */
886 GEN_INT_ARITH_ADD(adde
, 0x04, 1, 1, 0)
887 GEN_INT_ARITH_ADD(addeo
, 0x14, 1, 1, 1)
888 /* addme addme. addmeo addmeo. */
889 GEN_INT_ARITH_ADD_CONST(addme
, 0x07, -1LL, 1, 1, 0)
890 GEN_INT_ARITH_ADD_CONST(addmeo
, 0x17, -1LL, 1, 1, 1)
891 /* addze addze. addzeo addzeo.*/
892 GEN_INT_ARITH_ADD_CONST(addze
, 0x06, 0, 1, 1, 0)
893 GEN_INT_ARITH_ADD_CONST(addzeo
, 0x16, 0, 1, 1, 1)
895 static void gen_addi(DisasContext
*ctx
)
897 target_long simm
= SIMM(ctx
->opcode
);
899 if (rA(ctx
->opcode
) == 0) {
901 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], simm
);
903 tcg_gen_addi_tl(cpu_gpr
[rD(ctx
->opcode
)],
904 cpu_gpr
[rA(ctx
->opcode
)], simm
);
908 static inline void gen_op_addic(DisasContext
*ctx
, bool compute_rc0
)
910 TCGv c
= tcg_const_tl(SIMM(ctx
->opcode
));
911 gen_op_arith_add(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
912 c
, 0, 1, 0, compute_rc0
);
916 static void gen_addic(DisasContext
*ctx
)
918 gen_op_addic(ctx
, 0);
921 static void gen_addic_(DisasContext
*ctx
)
923 gen_op_addic(ctx
, 1);
927 static void gen_addis(DisasContext
*ctx
)
929 target_long simm
= SIMM(ctx
->opcode
);
931 if (rA(ctx
->opcode
) == 0) {
933 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], simm
<< 16);
935 tcg_gen_addi_tl(cpu_gpr
[rD(ctx
->opcode
)],
936 cpu_gpr
[rA(ctx
->opcode
)], simm
<< 16);
940 static inline void gen_op_arith_divw(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
941 TCGv arg2
, int sign
, int compute_ov
)
943 TCGLabel
*l1
= gen_new_label();
944 TCGLabel
*l2
= gen_new_label();
945 TCGv_i32 t0
= tcg_temp_local_new_i32();
946 TCGv_i32 t1
= tcg_temp_local_new_i32();
948 tcg_gen_trunc_tl_i32(t0
, arg1
);
949 tcg_gen_trunc_tl_i32(t1
, arg2
);
950 tcg_gen_brcondi_i32(TCG_COND_EQ
, t1
, 0, l1
);
952 TCGLabel
*l3
= gen_new_label();
953 tcg_gen_brcondi_i32(TCG_COND_NE
, t1
, -1, l3
);
954 tcg_gen_brcondi_i32(TCG_COND_EQ
, t0
, INT32_MIN
, l1
);
956 tcg_gen_div_i32(t0
, t0
, t1
);
958 tcg_gen_divu_i32(t0
, t0
, t1
);
961 tcg_gen_movi_tl(cpu_ov
, 0);
966 tcg_gen_sari_i32(t0
, t0
, 31);
968 tcg_gen_movi_i32(t0
, 0);
971 tcg_gen_movi_tl(cpu_ov
, 1);
972 tcg_gen_movi_tl(cpu_so
, 1);
975 tcg_gen_extu_i32_tl(ret
, t0
);
976 tcg_temp_free_i32(t0
);
977 tcg_temp_free_i32(t1
);
978 if (unlikely(Rc(ctx
->opcode
) != 0))
979 gen_set_Rc0(ctx
, ret
);
982 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
983 static void glue(gen_, name)(DisasContext *ctx) \
985 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
986 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
989 /* divwu divwu. divwuo divwuo. */
990 GEN_INT_ARITH_DIVW(divwu
, 0x0E, 0, 0);
991 GEN_INT_ARITH_DIVW(divwuo
, 0x1E, 0, 1);
992 /* divw divw. divwo divwo. */
993 GEN_INT_ARITH_DIVW(divw
, 0x0F, 1, 0);
994 GEN_INT_ARITH_DIVW(divwo
, 0x1F, 1, 1);
996 /* div[wd]eu[o][.] */
997 #define GEN_DIVE(name, hlpr, compute_ov) \
998 static void gen_##name(DisasContext *ctx) \
1000 TCGv_i32 t0 = tcg_const_i32(compute_ov); \
1001 gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
1002 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1003 tcg_temp_free_i32(t0); \
1004 if (unlikely(Rc(ctx->opcode) != 0)) { \
1005 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
1009 GEN_DIVE(divweu
, divweu
, 0);
1010 GEN_DIVE(divweuo
, divweu
, 1);
1011 GEN_DIVE(divwe
, divwe
, 0);
1012 GEN_DIVE(divweo
, divwe
, 1);
1014 #if defined(TARGET_PPC64)
1015 static inline void gen_op_arith_divd(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
1016 TCGv arg2
, int sign
, int compute_ov
)
1018 TCGLabel
*l1
= gen_new_label();
1019 TCGLabel
*l2
= gen_new_label();
1021 tcg_gen_brcondi_i64(TCG_COND_EQ
, arg2
, 0, l1
);
1023 TCGLabel
*l3
= gen_new_label();
1024 tcg_gen_brcondi_i64(TCG_COND_NE
, arg2
, -1, l3
);
1025 tcg_gen_brcondi_i64(TCG_COND_EQ
, arg1
, INT64_MIN
, l1
);
1027 tcg_gen_div_i64(ret
, arg1
, arg2
);
1029 tcg_gen_divu_i64(ret
, arg1
, arg2
);
1032 tcg_gen_movi_tl(cpu_ov
, 0);
1037 tcg_gen_sari_i64(ret
, arg1
, 63);
1039 tcg_gen_movi_i64(ret
, 0);
1042 tcg_gen_movi_tl(cpu_ov
, 1);
1043 tcg_gen_movi_tl(cpu_so
, 1);
1046 if (unlikely(Rc(ctx
->opcode
) != 0))
1047 gen_set_Rc0(ctx
, ret
);
1049 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1050 static void glue(gen_, name)(DisasContext *ctx) \
1052 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1053 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1054 sign, compute_ov); \
1056 /* divwu divwu. divwuo divwuo. */
1057 GEN_INT_ARITH_DIVD(divdu
, 0x0E, 0, 0);
1058 GEN_INT_ARITH_DIVD(divduo
, 0x1E, 0, 1);
1059 /* divw divw. divwo divwo. */
1060 GEN_INT_ARITH_DIVD(divd
, 0x0F, 1, 0);
1061 GEN_INT_ARITH_DIVD(divdo
, 0x1F, 1, 1);
1063 GEN_DIVE(divdeu
, divdeu
, 0);
1064 GEN_DIVE(divdeuo
, divdeu
, 1);
1065 GEN_DIVE(divde
, divde
, 0);
1066 GEN_DIVE(divdeo
, divde
, 1);
1070 static void gen_mulhw(DisasContext
*ctx
)
1072 TCGv_i32 t0
= tcg_temp_new_i32();
1073 TCGv_i32 t1
= tcg_temp_new_i32();
1075 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1076 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1077 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
1078 tcg_gen_extu_i32_tl(cpu_gpr
[rD(ctx
->opcode
)], t1
);
1079 tcg_temp_free_i32(t0
);
1080 tcg_temp_free_i32(t1
);
1081 if (unlikely(Rc(ctx
->opcode
) != 0))
1082 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1085 /* mulhwu mulhwu. */
1086 static void gen_mulhwu(DisasContext
*ctx
)
1088 TCGv_i32 t0
= tcg_temp_new_i32();
1089 TCGv_i32 t1
= tcg_temp_new_i32();
1091 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1092 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1093 tcg_gen_mulu2_i32(t0
, t1
, t0
, t1
);
1094 tcg_gen_extu_i32_tl(cpu_gpr
[rD(ctx
->opcode
)], t1
);
1095 tcg_temp_free_i32(t0
);
1096 tcg_temp_free_i32(t1
);
1097 if (unlikely(Rc(ctx
->opcode
) != 0))
1098 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1102 static void gen_mullw(DisasContext
*ctx
)
1104 #if defined(TARGET_PPC64)
1106 t0
= tcg_temp_new_i64();
1107 t1
= tcg_temp_new_i64();
1108 tcg_gen_ext32s_tl(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1109 tcg_gen_ext32s_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1110 tcg_gen_mul_i64(cpu_gpr
[rD(ctx
->opcode
)], t0
, t1
);
1114 tcg_gen_mul_i32(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1115 cpu_gpr
[rB(ctx
->opcode
)]);
1117 if (unlikely(Rc(ctx
->opcode
) != 0))
1118 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1121 /* mullwo mullwo. */
1122 static void gen_mullwo(DisasContext
*ctx
)
1124 TCGv_i32 t0
= tcg_temp_new_i32();
1125 TCGv_i32 t1
= tcg_temp_new_i32();
1127 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1128 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1129 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
1130 #if defined(TARGET_PPC64)
1131 tcg_gen_concat_i32_i64(cpu_gpr
[rD(ctx
->opcode
)], t0
, t1
);
1133 tcg_gen_mov_i32(cpu_gpr
[rD(ctx
->opcode
)], t0
);
1136 tcg_gen_sari_i32(t0
, t0
, 31);
1137 tcg_gen_setcond_i32(TCG_COND_NE
, t0
, t0
, t1
);
1138 tcg_gen_extu_i32_tl(cpu_ov
, t0
);
1139 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
1141 tcg_temp_free_i32(t0
);
1142 tcg_temp_free_i32(t1
);
1143 if (unlikely(Rc(ctx
->opcode
) != 0))
1144 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1148 static void gen_mulli(DisasContext
*ctx
)
1150 tcg_gen_muli_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1154 #if defined(TARGET_PPC64)
1156 static void gen_mulhd(DisasContext
*ctx
)
1158 TCGv lo
= tcg_temp_new();
1159 tcg_gen_muls2_tl(lo
, cpu_gpr
[rD(ctx
->opcode
)],
1160 cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1162 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1163 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1167 /* mulhdu mulhdu. */
1168 static void gen_mulhdu(DisasContext
*ctx
)
1170 TCGv lo
= tcg_temp_new();
1171 tcg_gen_mulu2_tl(lo
, cpu_gpr
[rD(ctx
->opcode
)],
1172 cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1174 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1175 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1180 static void gen_mulld(DisasContext
*ctx
)
1182 tcg_gen_mul_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1183 cpu_gpr
[rB(ctx
->opcode
)]);
1184 if (unlikely(Rc(ctx
->opcode
) != 0))
1185 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1188 /* mulldo mulldo. */
1189 static void gen_mulldo(DisasContext
*ctx
)
1191 TCGv_i64 t0
= tcg_temp_new_i64();
1192 TCGv_i64 t1
= tcg_temp_new_i64();
1194 tcg_gen_muls2_i64(t0
, t1
, cpu_gpr
[rA(ctx
->opcode
)],
1195 cpu_gpr
[rB(ctx
->opcode
)]);
1196 tcg_gen_mov_i64(cpu_gpr
[rD(ctx
->opcode
)], t0
);
1198 tcg_gen_sari_i64(t0
, t0
, 63);
1199 tcg_gen_setcond_i64(TCG_COND_NE
, cpu_ov
, t0
, t1
);
1200 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
1202 tcg_temp_free_i64(t0
);
1203 tcg_temp_free_i64(t1
);
1205 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1206 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1211 /* Common subf function */
1212 static inline void gen_op_arith_subf(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
1213 TCGv arg2
, bool add_ca
, bool compute_ca
,
1214 bool compute_ov
, bool compute_rc0
)
1218 if (compute_ca
|| compute_ov
) {
1219 t0
= tcg_temp_new();
1223 /* dest = ~arg1 + arg2 [+ ca]. */
1224 if (NARROW_MODE(ctx
)) {
1225 /* Caution: a non-obvious corner case of the spec is that we
1226 must produce the *entire* 64-bit addition, but produce the
1227 carry into bit 32. */
1228 TCGv inv1
= tcg_temp_new();
1229 TCGv t1
= tcg_temp_new();
1230 tcg_gen_not_tl(inv1
, arg1
);
1232 tcg_gen_add_tl(t0
, arg2
, cpu_ca
);
1234 tcg_gen_addi_tl(t0
, arg2
, 1);
1236 tcg_gen_xor_tl(t1
, arg2
, inv1
); /* add without carry */
1237 tcg_gen_add_tl(t0
, t0
, inv1
);
1238 tcg_temp_free(inv1
);
1239 tcg_gen_xor_tl(cpu_ca
, t0
, t1
); /* bits changes w/ carry */
1241 tcg_gen_shri_tl(cpu_ca
, cpu_ca
, 32); /* extract bit 32 */
1242 tcg_gen_andi_tl(cpu_ca
, cpu_ca
, 1);
1243 } else if (add_ca
) {
1244 TCGv zero
, inv1
= tcg_temp_new();
1245 tcg_gen_not_tl(inv1
, arg1
);
1246 zero
= tcg_const_tl(0);
1247 tcg_gen_add2_tl(t0
, cpu_ca
, arg2
, zero
, cpu_ca
, zero
);
1248 tcg_gen_add2_tl(t0
, cpu_ca
, t0
, cpu_ca
, inv1
, zero
);
1249 tcg_temp_free(zero
);
1250 tcg_temp_free(inv1
);
1252 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_ca
, arg2
, arg1
);
1253 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1255 } else if (add_ca
) {
1256 /* Since we're ignoring carry-out, we can simplify the
1257 standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */
1258 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1259 tcg_gen_add_tl(t0
, t0
, cpu_ca
);
1260 tcg_gen_subi_tl(t0
, t0
, 1);
1262 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1266 gen_op_arith_compute_ov(ctx
, t0
, arg1
, arg2
, 1);
1268 if (unlikely(compute_rc0
)) {
1269 gen_set_Rc0(ctx
, t0
);
1272 if (!TCGV_EQUAL(t0
, ret
)) {
1273 tcg_gen_mov_tl(ret
, t0
);
1277 /* Sub functions with Two operands functions */
1278 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
1279 static void glue(gen_, name)(DisasContext *ctx) \
1281 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1282 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1283 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1285 /* Sub functions with one operand and one immediate */
1286 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
1287 add_ca, compute_ca, compute_ov) \
1288 static void glue(gen_, name)(DisasContext *ctx) \
1290 TCGv t0 = tcg_const_tl(const_val); \
1291 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1292 cpu_gpr[rA(ctx->opcode)], t0, \
1293 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1294 tcg_temp_free(t0); \
1296 /* subf subf. subfo subfo. */
1297 GEN_INT_ARITH_SUBF(subf
, 0x01, 0, 0, 0)
1298 GEN_INT_ARITH_SUBF(subfo
, 0x11, 0, 0, 1)
1299 /* subfc subfc. subfco subfco. */
1300 GEN_INT_ARITH_SUBF(subfc
, 0x00, 0, 1, 0)
1301 GEN_INT_ARITH_SUBF(subfco
, 0x10, 0, 1, 1)
1302 /* subfe subfe. subfeo subfo. */
1303 GEN_INT_ARITH_SUBF(subfe
, 0x04, 1, 1, 0)
1304 GEN_INT_ARITH_SUBF(subfeo
, 0x14, 1, 1, 1)
1305 /* subfme subfme. subfmeo subfmeo. */
1306 GEN_INT_ARITH_SUBF_CONST(subfme
, 0x07, -1LL, 1, 1, 0)
1307 GEN_INT_ARITH_SUBF_CONST(subfmeo
, 0x17, -1LL, 1, 1, 1)
1308 /* subfze subfze. subfzeo subfzeo.*/
1309 GEN_INT_ARITH_SUBF_CONST(subfze
, 0x06, 0, 1, 1, 0)
1310 GEN_INT_ARITH_SUBF_CONST(subfzeo
, 0x16, 0, 1, 1, 1)
1313 static void gen_subfic(DisasContext
*ctx
)
1315 TCGv c
= tcg_const_tl(SIMM(ctx
->opcode
));
1316 gen_op_arith_subf(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1321 /* neg neg. nego nego. */
1322 static inline void gen_op_arith_neg(DisasContext
*ctx
, bool compute_ov
)
1324 TCGv zero
= tcg_const_tl(0);
1325 gen_op_arith_subf(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1326 zero
, 0, 0, compute_ov
, Rc(ctx
->opcode
));
1327 tcg_temp_free(zero
);
1330 static void gen_neg(DisasContext
*ctx
)
1332 gen_op_arith_neg(ctx
, 0);
1335 static void gen_nego(DisasContext
*ctx
)
1337 gen_op_arith_neg(ctx
, 1);
1340 /*** Integer logical ***/
1341 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
1342 static void glue(gen_, name)(DisasContext *ctx) \
1344 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
1345 cpu_gpr[rB(ctx->opcode)]); \
1346 if (unlikely(Rc(ctx->opcode) != 0)) \
1347 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1350 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
1351 static void glue(gen_, name)(DisasContext *ctx) \
1353 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
1354 if (unlikely(Rc(ctx->opcode) != 0)) \
1355 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1359 GEN_LOGICAL2(and, tcg_gen_and_tl
, 0x00, PPC_INTEGER
);
1361 GEN_LOGICAL2(andc
, tcg_gen_andc_tl
, 0x01, PPC_INTEGER
);
1364 static void gen_andi_(DisasContext
*ctx
)
1366 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], UIMM(ctx
->opcode
));
1367 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1371 static void gen_andis_(DisasContext
*ctx
)
1373 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], UIMM(ctx
->opcode
) << 16);
1374 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1378 static void gen_cntlzw(DisasContext
*ctx
)
1380 gen_helper_cntlzw(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1381 if (unlikely(Rc(ctx
->opcode
) != 0))
1382 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1385 GEN_LOGICAL2(eqv
, tcg_gen_eqv_tl
, 0x08, PPC_INTEGER
);
1386 /* extsb & extsb. */
1387 GEN_LOGICAL1(extsb
, tcg_gen_ext8s_tl
, 0x1D, PPC_INTEGER
);
1388 /* extsh & extsh. */
1389 GEN_LOGICAL1(extsh
, tcg_gen_ext16s_tl
, 0x1C, PPC_INTEGER
);
1391 GEN_LOGICAL2(nand
, tcg_gen_nand_tl
, 0x0E, PPC_INTEGER
);
1393 GEN_LOGICAL2(nor
, tcg_gen_nor_tl
, 0x03, PPC_INTEGER
);
1395 #if defined(TARGET_PPC64)
1396 static void gen_pause(DisasContext
*ctx
)
1398 TCGv_i32 t0
= tcg_const_i32(0);
1399 tcg_gen_st_i32(t0
, cpu_env
,
1400 -offsetof(PowerPCCPU
, env
) + offsetof(CPUState
, halted
));
1401 tcg_temp_free_i32(t0
);
1403 /* Stop translation, this gives other CPUs a chance to run */
1404 gen_exception_err(ctx
, EXCP_HLT
, 1);
1406 #endif /* defined(TARGET_PPC64) */
1409 static void gen_or(DisasContext
*ctx
)
1413 rs
= rS(ctx
->opcode
);
1414 ra
= rA(ctx
->opcode
);
1415 rb
= rB(ctx
->opcode
);
1416 /* Optimisation for mr. ri case */
1417 if (rs
!= ra
|| rs
!= rb
) {
1419 tcg_gen_or_tl(cpu_gpr
[ra
], cpu_gpr
[rs
], cpu_gpr
[rb
]);
1421 tcg_gen_mov_tl(cpu_gpr
[ra
], cpu_gpr
[rs
]);
1422 if (unlikely(Rc(ctx
->opcode
) != 0))
1423 gen_set_Rc0(ctx
, cpu_gpr
[ra
]);
1424 } else if (unlikely(Rc(ctx
->opcode
) != 0)) {
1425 gen_set_Rc0(ctx
, cpu_gpr
[rs
]);
1426 #if defined(TARGET_PPC64)
1432 /* Set process priority to low */
1436 /* Set process priority to medium-low */
1440 /* Set process priority to normal */
1443 #if !defined(CONFIG_USER_ONLY)
1446 /* Set process priority to very low */
1452 /* Set process priority to medium-hight */
1458 /* Set process priority to high */
1463 if (ctx
->hv
&& !ctx
->pr
) {
1464 /* Set process priority to very high */
1474 TCGv t0
= tcg_temp_new();
1475 gen_load_spr(t0
, SPR_PPR
);
1476 tcg_gen_andi_tl(t0
, t0
, ~0x001C000000000000ULL
);
1477 tcg_gen_ori_tl(t0
, t0
, ((uint64_t)prio
) << 50);
1478 gen_store_spr(SPR_PPR
, t0
);
1480 /* Pause us out of TCG otherwise spin loops with smt_low
1481 * eat too much CPU and the kernel hangs
1489 GEN_LOGICAL2(orc
, tcg_gen_orc_tl
, 0x0C, PPC_INTEGER
);
1492 static void gen_xor(DisasContext
*ctx
)
1494 /* Optimisation for "set to zero" case */
1495 if (rS(ctx
->opcode
) != rB(ctx
->opcode
))
1496 tcg_gen_xor_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1498 tcg_gen_movi_tl(cpu_gpr
[rA(ctx
->opcode
)], 0);
1499 if (unlikely(Rc(ctx
->opcode
) != 0))
1500 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1504 static void gen_ori(DisasContext
*ctx
)
1506 target_ulong uimm
= UIMM(ctx
->opcode
);
1508 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1511 tcg_gen_ori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
);
1515 static void gen_oris(DisasContext
*ctx
)
1517 target_ulong uimm
= UIMM(ctx
->opcode
);
1519 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1523 tcg_gen_ori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
<< 16);
1527 static void gen_xori(DisasContext
*ctx
)
1529 target_ulong uimm
= UIMM(ctx
->opcode
);
1531 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1535 tcg_gen_xori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
);
1539 static void gen_xoris(DisasContext
*ctx
)
1541 target_ulong uimm
= UIMM(ctx
->opcode
);
1543 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1547 tcg_gen_xori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
<< 16);
1550 /* popcntb : PowerPC 2.03 specification */
1551 static void gen_popcntb(DisasContext
*ctx
)
1553 gen_helper_popcntb(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1556 static void gen_popcntw(DisasContext
*ctx
)
1558 gen_helper_popcntw(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1561 #if defined(TARGET_PPC64)
1562 /* popcntd: PowerPC 2.06 specification */
1563 static void gen_popcntd(DisasContext
*ctx
)
1565 gen_helper_popcntd(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1569 /* prtyw: PowerPC 2.05 specification */
1570 static void gen_prtyw(DisasContext
*ctx
)
1572 TCGv ra
= cpu_gpr
[rA(ctx
->opcode
)];
1573 TCGv rs
= cpu_gpr
[rS(ctx
->opcode
)];
1574 TCGv t0
= tcg_temp_new();
1575 tcg_gen_shri_tl(t0
, rs
, 16);
1576 tcg_gen_xor_tl(ra
, rs
, t0
);
1577 tcg_gen_shri_tl(t0
, ra
, 8);
1578 tcg_gen_xor_tl(ra
, ra
, t0
);
1579 tcg_gen_andi_tl(ra
, ra
, (target_ulong
)0x100000001ULL
);
1583 #if defined(TARGET_PPC64)
1584 /* prtyd: PowerPC 2.05 specification */
1585 static void gen_prtyd(DisasContext
*ctx
)
1587 TCGv ra
= cpu_gpr
[rA(ctx
->opcode
)];
1588 TCGv rs
= cpu_gpr
[rS(ctx
->opcode
)];
1589 TCGv t0
= tcg_temp_new();
1590 tcg_gen_shri_tl(t0
, rs
, 32);
1591 tcg_gen_xor_tl(ra
, rs
, t0
);
1592 tcg_gen_shri_tl(t0
, ra
, 16);
1593 tcg_gen_xor_tl(ra
, ra
, t0
);
1594 tcg_gen_shri_tl(t0
, ra
, 8);
1595 tcg_gen_xor_tl(ra
, ra
, t0
);
1596 tcg_gen_andi_tl(ra
, ra
, 1);
1601 #if defined(TARGET_PPC64)
1603 static void gen_bpermd(DisasContext
*ctx
)
1605 gen_helper_bpermd(cpu_gpr
[rA(ctx
->opcode
)],
1606 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1610 #if defined(TARGET_PPC64)
1611 /* extsw & extsw. */
1612 GEN_LOGICAL1(extsw
, tcg_gen_ext32s_tl
, 0x1E, PPC_64B
);
1615 static void gen_cntlzd(DisasContext
*ctx
)
1617 gen_helper_cntlzd(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1618 if (unlikely(Rc(ctx
->opcode
) != 0))
1619 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1623 /*** Integer rotate ***/
1625 /* rlwimi & rlwimi. */
1626 static void gen_rlwimi(DisasContext
*ctx
)
1628 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
1629 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
1630 uint32_t sh
= SH(ctx
->opcode
);
1631 uint32_t mb
= MB(ctx
->opcode
);
1632 uint32_t me
= ME(ctx
->opcode
);
1634 if (sh
== (31-me
) && mb
<= me
) {
1635 tcg_gen_deposit_tl(t_ra
, t_ra
, t_rs
, sh
, me
- mb
+ 1);
1641 #if defined(TARGET_PPC64)
1645 mask
= MASK(mb
, me
);
1647 t0
= tcg_temp_new_i32();
1648 t1
= tcg_temp_new();
1649 tcg_gen_trunc_tl_i32(t0
, t_rs
);
1650 tcg_gen_rotli_i32(t0
, t0
, sh
);
1651 tcg_gen_extu_i32_tl(t1
, t0
);
1652 tcg_temp_free_i32(t0
);
1654 tcg_gen_andi_tl(t1
, t1
, mask
);
1655 tcg_gen_andi_tl(t_ra
, t_ra
, ~mask
);
1656 tcg_gen_or_tl(t_ra
, t_ra
, t1
);
1659 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1660 gen_set_Rc0(ctx
, t_ra
);
1664 /* rlwinm & rlwinm. */
1665 static void gen_rlwinm(DisasContext
*ctx
)
1667 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
1668 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
1669 uint32_t sh
= SH(ctx
->opcode
);
1670 uint32_t mb
= MB(ctx
->opcode
);
1671 uint32_t me
= ME(ctx
->opcode
);
1673 if (mb
== 0 && me
== (31 - sh
)) {
1674 tcg_gen_shli_tl(t_ra
, t_rs
, sh
);
1675 tcg_gen_ext32u_tl(t_ra
, t_ra
);
1676 } else if (sh
!= 0 && me
== 31 && sh
== (32 - mb
)) {
1677 tcg_gen_ext32u_tl(t_ra
, t_rs
);
1678 tcg_gen_shri_tl(t_ra
, t_ra
, mb
);
1680 #if defined(TARGET_PPC64)
1685 tcg_gen_andi_tl(t_ra
, t_rs
, MASK(mb
, me
));
1687 TCGv_i32 t0
= tcg_temp_new_i32();
1689 tcg_gen_trunc_tl_i32(t0
, t_rs
);
1690 tcg_gen_rotli_i32(t0
, t0
, sh
);
1691 tcg_gen_andi_i32(t0
, t0
, MASK(mb
, me
));
1692 tcg_gen_extu_i32_tl(t_ra
, t0
);
1693 tcg_temp_free_i32(t0
);
1696 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1697 gen_set_Rc0(ctx
, t_ra
);
1701 /* rlwnm & rlwnm. */
1702 static void gen_rlwnm(DisasContext
*ctx
)
1704 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
1705 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
1706 TCGv t_rb
= cpu_gpr
[rB(ctx
->opcode
)];
1707 uint32_t mb
= MB(ctx
->opcode
);
1708 uint32_t me
= ME(ctx
->opcode
);
1711 #if defined(TARGET_PPC64)
1716 t0
= tcg_temp_new_i32();
1717 t1
= tcg_temp_new_i32();
1718 tcg_gen_trunc_tl_i32(t0
, t_rb
);
1719 tcg_gen_trunc_tl_i32(t1
, t_rs
);
1720 tcg_gen_andi_i32(t0
, t0
, 0x1f);
1721 tcg_gen_rotl_i32(t1
, t1
, t0
);
1722 tcg_temp_free_i32(t0
);
1724 tcg_gen_andi_i32(t1
, t1
, MASK(mb
, me
));
1725 tcg_gen_extu_i32_tl(t_ra
, t1
);
1726 tcg_temp_free_i32(t1
);
1728 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1729 gen_set_Rc0(ctx
, t_ra
);
1733 #if defined(TARGET_PPC64)
1734 #define GEN_PPC64_R2(name, opc1, opc2) \
1735 static void glue(gen_, name##0)(DisasContext *ctx) \
1737 gen_##name(ctx, 0); \
1740 static void glue(gen_, name##1)(DisasContext *ctx) \
1742 gen_##name(ctx, 1); \
1744 #define GEN_PPC64_R4(name, opc1, opc2) \
1745 static void glue(gen_, name##0)(DisasContext *ctx) \
1747 gen_##name(ctx, 0, 0); \
1750 static void glue(gen_, name##1)(DisasContext *ctx) \
1752 gen_##name(ctx, 0, 1); \
1755 static void glue(gen_, name##2)(DisasContext *ctx) \
1757 gen_##name(ctx, 1, 0); \
1760 static void glue(gen_, name##3)(DisasContext *ctx) \
1762 gen_##name(ctx, 1, 1); \
1765 static void gen_rldinm(DisasContext
*ctx
, int mb
, int me
, int sh
)
1767 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
1768 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
1770 if (sh
!= 0 && mb
== 0 && me
== (63 - sh
)) {
1771 tcg_gen_shli_tl(t_ra
, t_rs
, sh
);
1772 } else if (sh
!= 0 && me
== 63 && sh
== (64 - mb
)) {
1773 tcg_gen_shri_tl(t_ra
, t_rs
, mb
);
1775 tcg_gen_rotli_tl(t_ra
, t_rs
, sh
);
1776 tcg_gen_andi_tl(t_ra
, t_ra
, MASK(mb
, me
));
1778 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1779 gen_set_Rc0(ctx
, t_ra
);
1783 /* rldicl - rldicl. */
1784 static inline void gen_rldicl(DisasContext
*ctx
, int mbn
, int shn
)
1788 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1789 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1790 gen_rldinm(ctx
, mb
, 63, sh
);
1792 GEN_PPC64_R4(rldicl
, 0x1E, 0x00);
1794 /* rldicr - rldicr. */
1795 static inline void gen_rldicr(DisasContext
*ctx
, int men
, int shn
)
1799 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1800 me
= MB(ctx
->opcode
) | (men
<< 5);
1801 gen_rldinm(ctx
, 0, me
, sh
);
1803 GEN_PPC64_R4(rldicr
, 0x1E, 0x02);
1805 /* rldic - rldic. */
1806 static inline void gen_rldic(DisasContext
*ctx
, int mbn
, int shn
)
1810 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1811 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1812 gen_rldinm(ctx
, mb
, 63 - sh
, sh
);
1814 GEN_PPC64_R4(rldic
, 0x1E, 0x04);
1816 static void gen_rldnm(DisasContext
*ctx
, int mb
, int me
)
1818 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
1819 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
1820 TCGv t_rb
= cpu_gpr
[rB(ctx
->opcode
)];
1823 t0
= tcg_temp_new();
1824 tcg_gen_andi_tl(t0
, t_rb
, 0x3f);
1825 tcg_gen_rotl_tl(t_ra
, t_rs
, t0
);
1828 tcg_gen_andi_tl(t_ra
, t_ra
, MASK(mb
, me
));
1829 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1830 gen_set_Rc0(ctx
, t_ra
);
1834 /* rldcl - rldcl. */
1835 static inline void gen_rldcl(DisasContext
*ctx
, int mbn
)
1839 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1840 gen_rldnm(ctx
, mb
, 63);
1842 GEN_PPC64_R2(rldcl
, 0x1E, 0x08);
1844 /* rldcr - rldcr. */
1845 static inline void gen_rldcr(DisasContext
*ctx
, int men
)
1849 me
= MB(ctx
->opcode
) | (men
<< 5);
1850 gen_rldnm(ctx
, 0, me
);
1852 GEN_PPC64_R2(rldcr
, 0x1E, 0x09);
1854 /* rldimi - rldimi. */
1855 static void gen_rldimi(DisasContext
*ctx
, int mbn
, int shn
)
1857 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
1858 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
1859 uint32_t sh
= SH(ctx
->opcode
) | (shn
<< 5);
1860 uint32_t mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1861 uint32_t me
= 63 - sh
;
1864 tcg_gen_deposit_tl(t_ra
, t_ra
, t_rs
, sh
, me
- mb
+ 1);
1866 target_ulong mask
= MASK(mb
, me
);
1867 TCGv t1
= tcg_temp_new();
1869 tcg_gen_rotli_tl(t1
, t_rs
, sh
);
1870 tcg_gen_andi_tl(t1
, t1
, mask
);
1871 tcg_gen_andi_tl(t_ra
, t_ra
, ~mask
);
1872 tcg_gen_or_tl(t_ra
, t_ra
, t1
);
1875 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1876 gen_set_Rc0(ctx
, t_ra
);
1879 GEN_PPC64_R4(rldimi
, 0x1E, 0x06);
1882 /*** Integer shift ***/
1885 static void gen_slw(DisasContext
*ctx
)
1889 t0
= tcg_temp_new();
1890 /* AND rS with a mask that is 0 when rB >= 0x20 */
1891 #if defined(TARGET_PPC64)
1892 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x3a);
1893 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1895 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x1a);
1896 tcg_gen_sari_tl(t0
, t0
, 0x1f);
1898 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1899 t1
= tcg_temp_new();
1900 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x1f);
1901 tcg_gen_shl_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1904 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)]);
1905 if (unlikely(Rc(ctx
->opcode
) != 0))
1906 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1910 static void gen_sraw(DisasContext
*ctx
)
1912 gen_helper_sraw(cpu_gpr
[rA(ctx
->opcode
)], cpu_env
,
1913 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1914 if (unlikely(Rc(ctx
->opcode
) != 0))
1915 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1918 /* srawi & srawi. */
1919 static void gen_srawi(DisasContext
*ctx
)
1921 int sh
= SH(ctx
->opcode
);
1922 TCGv dst
= cpu_gpr
[rA(ctx
->opcode
)];
1923 TCGv src
= cpu_gpr
[rS(ctx
->opcode
)];
1925 tcg_gen_ext32s_tl(dst
, src
);
1926 tcg_gen_movi_tl(cpu_ca
, 0);
1929 tcg_gen_ext32s_tl(dst
, src
);
1930 tcg_gen_andi_tl(cpu_ca
, dst
, (1ULL << sh
) - 1);
1931 t0
= tcg_temp_new();
1932 tcg_gen_sari_tl(t0
, dst
, TARGET_LONG_BITS
- 1);
1933 tcg_gen_and_tl(cpu_ca
, cpu_ca
, t0
);
1935 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_ca
, cpu_ca
, 0);
1936 tcg_gen_sari_tl(dst
, dst
, sh
);
1938 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1939 gen_set_Rc0(ctx
, dst
);
1944 static void gen_srw(DisasContext
*ctx
)
1948 t0
= tcg_temp_new();
1949 /* AND rS with a mask that is 0 when rB >= 0x20 */
1950 #if defined(TARGET_PPC64)
1951 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x3a);
1952 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1954 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x1a);
1955 tcg_gen_sari_tl(t0
, t0
, 0x1f);
1957 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1958 tcg_gen_ext32u_tl(t0
, t0
);
1959 t1
= tcg_temp_new();
1960 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x1f);
1961 tcg_gen_shr_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1964 if (unlikely(Rc(ctx
->opcode
) != 0))
1965 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1968 #if defined(TARGET_PPC64)
1970 static void gen_sld(DisasContext
*ctx
)
1974 t0
= tcg_temp_new();
1975 /* AND rS with a mask that is 0 when rB >= 0x40 */
1976 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x39);
1977 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1978 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1979 t1
= tcg_temp_new();
1980 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x3f);
1981 tcg_gen_shl_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1984 if (unlikely(Rc(ctx
->opcode
) != 0))
1985 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1989 static void gen_srad(DisasContext
*ctx
)
1991 gen_helper_srad(cpu_gpr
[rA(ctx
->opcode
)], cpu_env
,
1992 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1993 if (unlikely(Rc(ctx
->opcode
) != 0))
1994 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1996 /* sradi & sradi. */
1997 static inline void gen_sradi(DisasContext
*ctx
, int n
)
1999 int sh
= SH(ctx
->opcode
) + (n
<< 5);
2000 TCGv dst
= cpu_gpr
[rA(ctx
->opcode
)];
2001 TCGv src
= cpu_gpr
[rS(ctx
->opcode
)];
2003 tcg_gen_mov_tl(dst
, src
);
2004 tcg_gen_movi_tl(cpu_ca
, 0);
2007 tcg_gen_andi_tl(cpu_ca
, src
, (1ULL << sh
) - 1);
2008 t0
= tcg_temp_new();
2009 tcg_gen_sari_tl(t0
, src
, TARGET_LONG_BITS
- 1);
2010 tcg_gen_and_tl(cpu_ca
, cpu_ca
, t0
);
2012 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_ca
, cpu_ca
, 0);
2013 tcg_gen_sari_tl(dst
, src
, sh
);
2015 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2016 gen_set_Rc0(ctx
, dst
);
2020 static void gen_sradi0(DisasContext
*ctx
)
2025 static void gen_sradi1(DisasContext
*ctx
)
2031 static void gen_srd(DisasContext
*ctx
)
2035 t0
= tcg_temp_new();
2036 /* AND rS with a mask that is 0 when rB >= 0x40 */
2037 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x39);
2038 tcg_gen_sari_tl(t0
, t0
, 0x3f);
2039 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
2040 t1
= tcg_temp_new();
2041 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x3f);
2042 tcg_gen_shr_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
2045 if (unlikely(Rc(ctx
->opcode
) != 0))
2046 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
2050 #if defined(TARGET_PPC64)
2051 static void gen_set_cr1_from_fpscr(DisasContext
*ctx
)
2053 TCGv_i32 tmp
= tcg_temp_new_i32();
2054 tcg_gen_trunc_tl_i32(tmp
, cpu_fpscr
);
2055 tcg_gen_shri_i32(cpu_crf
[1], tmp
, 28);
2056 tcg_temp_free_i32(tmp
);
2059 static void gen_set_cr1_from_fpscr(DisasContext
*ctx
)
2061 tcg_gen_shri_tl(cpu_crf
[1], cpu_fpscr
, 28);
2065 /*** Floating-Point arithmetic ***/
2066 #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
2067 static void gen_f##name(DisasContext *ctx) \
2069 if (unlikely(!ctx->fpu_enabled)) { \
2070 gen_exception(ctx, POWERPC_EXCP_FPU); \
2073 /* NIP cannot be restored if the memory exception comes from an helper */ \
2074 gen_update_nip(ctx, ctx->nip - 4); \
2075 gen_reset_fpstatus(); \
2076 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2077 cpu_fpr[rA(ctx->opcode)], \
2078 cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
2080 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2081 cpu_fpr[rD(ctx->opcode)]); \
2084 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2086 if (unlikely(Rc(ctx->opcode) != 0)) { \
2087 gen_set_cr1_from_fpscr(ctx); \
2091 #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
2092 _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
2093 _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
2095 #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2096 static void gen_f##name(DisasContext *ctx) \
2098 if (unlikely(!ctx->fpu_enabled)) { \
2099 gen_exception(ctx, POWERPC_EXCP_FPU); \
2102 /* NIP cannot be restored if the memory exception comes from an helper */ \
2103 gen_update_nip(ctx, ctx->nip - 4); \
2104 gen_reset_fpstatus(); \
2105 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2106 cpu_fpr[rA(ctx->opcode)], \
2107 cpu_fpr[rB(ctx->opcode)]); \
2109 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2110 cpu_fpr[rD(ctx->opcode)]); \
2113 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2115 if (unlikely(Rc(ctx->opcode) != 0)) { \
2116 gen_set_cr1_from_fpscr(ctx); \
2119 #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
2120 _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2121 _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2123 #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2124 static void gen_f##name(DisasContext *ctx) \
2126 if (unlikely(!ctx->fpu_enabled)) { \
2127 gen_exception(ctx, POWERPC_EXCP_FPU); \
2130 /* NIP cannot be restored if the memory exception comes from an helper */ \
2131 gen_update_nip(ctx, ctx->nip - 4); \
2132 gen_reset_fpstatus(); \
2133 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2134 cpu_fpr[rA(ctx->opcode)], \
2135 cpu_fpr[rC(ctx->opcode)]); \
2137 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2138 cpu_fpr[rD(ctx->opcode)]); \
2141 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2143 if (unlikely(Rc(ctx->opcode) != 0)) { \
2144 gen_set_cr1_from_fpscr(ctx); \
2147 #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
2148 _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2149 _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2151 #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
2152 static void gen_f##name(DisasContext *ctx) \
2154 if (unlikely(!ctx->fpu_enabled)) { \
2155 gen_exception(ctx, POWERPC_EXCP_FPU); \
2158 /* NIP cannot be restored if the memory exception comes from an helper */ \
2159 gen_update_nip(ctx, ctx->nip - 4); \
2160 gen_reset_fpstatus(); \
2161 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2162 cpu_fpr[rB(ctx->opcode)]); \
2164 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2166 if (unlikely(Rc(ctx->opcode) != 0)) { \
2167 gen_set_cr1_from_fpscr(ctx); \
2171 #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
2172 static void gen_f##name(DisasContext *ctx) \
2174 if (unlikely(!ctx->fpu_enabled)) { \
2175 gen_exception(ctx, POWERPC_EXCP_FPU); \
2178 /* NIP cannot be restored if the memory exception comes from an helper */ \
2179 gen_update_nip(ctx, ctx->nip - 4); \
2180 gen_reset_fpstatus(); \
2181 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2182 cpu_fpr[rB(ctx->opcode)]); \
2184 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2186 if (unlikely(Rc(ctx->opcode) != 0)) { \
2187 gen_set_cr1_from_fpscr(ctx); \
2192 GEN_FLOAT_AB(add
, 0x15, 0x000007C0, 1, PPC_FLOAT
);
2194 GEN_FLOAT_AB(div
, 0x12, 0x000007C0, 1, PPC_FLOAT
);
2196 GEN_FLOAT_AC(mul
, 0x19, 0x0000F800, 1, PPC_FLOAT
);
2199 GEN_FLOAT_BS(re
, 0x3F, 0x18, 1, PPC_FLOAT_EXT
);
2202 GEN_FLOAT_BS(res
, 0x3B, 0x18, 1, PPC_FLOAT_FRES
);
2205 GEN_FLOAT_BS(rsqrte
, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE
);
2208 static void gen_frsqrtes(DisasContext
*ctx
)
2210 if (unlikely(!ctx
->fpu_enabled
)) {
2211 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2214 /* NIP cannot be restored if the memory exception comes from an helper */
2215 gen_update_nip(ctx
, ctx
->nip
- 4);
2216 gen_reset_fpstatus();
2217 gen_helper_frsqrte(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2218 cpu_fpr
[rB(ctx
->opcode
)]);
2219 gen_helper_frsp(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2220 cpu_fpr
[rD(ctx
->opcode
)]);
2221 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)]);
2222 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2223 gen_set_cr1_from_fpscr(ctx
);
2228 _GEN_FLOAT_ACB(sel
, sel
, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL
);
2230 GEN_FLOAT_AB(sub
, 0x14, 0x000007C0, 1, PPC_FLOAT
);
2234 static void gen_fsqrt(DisasContext
*ctx
)
2236 if (unlikely(!ctx
->fpu_enabled
)) {
2237 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2240 /* NIP cannot be restored if the memory exception comes from an helper */
2241 gen_update_nip(ctx
, ctx
->nip
- 4);
2242 gen_reset_fpstatus();
2243 gen_helper_fsqrt(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2244 cpu_fpr
[rB(ctx
->opcode
)]);
2245 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)]);
2246 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2247 gen_set_cr1_from_fpscr(ctx
);
2251 static void gen_fsqrts(DisasContext
*ctx
)
2253 if (unlikely(!ctx
->fpu_enabled
)) {
2254 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2257 /* NIP cannot be restored if the memory exception comes from an helper */
2258 gen_update_nip(ctx
, ctx
->nip
- 4);
2259 gen_reset_fpstatus();
2260 gen_helper_fsqrt(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2261 cpu_fpr
[rB(ctx
->opcode
)]);
2262 gen_helper_frsp(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2263 cpu_fpr
[rD(ctx
->opcode
)]);
2264 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)]);
2265 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2266 gen_set_cr1_from_fpscr(ctx
);
2270 /*** Floating-Point multiply-and-add ***/
2271 /* fmadd - fmadds */
2272 GEN_FLOAT_ACB(madd
, 0x1D, 1, PPC_FLOAT
);
2273 /* fmsub - fmsubs */
2274 GEN_FLOAT_ACB(msub
, 0x1C, 1, PPC_FLOAT
);
2275 /* fnmadd - fnmadds */
2276 GEN_FLOAT_ACB(nmadd
, 0x1F, 1, PPC_FLOAT
);
2277 /* fnmsub - fnmsubs */
2278 GEN_FLOAT_ACB(nmsub
, 0x1E, 1, PPC_FLOAT
);
2280 /*** Floating-Point round & convert ***/
2282 GEN_FLOAT_B(ctiw
, 0x0E, 0x00, 0, PPC_FLOAT
);
2284 GEN_FLOAT_B(ctiwu
, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206
);
2286 GEN_FLOAT_B(ctiwz
, 0x0F, 0x00, 0, PPC_FLOAT
);
2288 GEN_FLOAT_B(ctiwuz
, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206
);
2290 GEN_FLOAT_B(rsp
, 0x0C, 0x00, 1, PPC_FLOAT
);
2292 GEN_FLOAT_B(cfid
, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64
);
2294 GEN_FLOAT_B(cfids
, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206
);
2296 GEN_FLOAT_B(cfidu
, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206
);
2298 GEN_FLOAT_B(cfidus
, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206
);
2300 GEN_FLOAT_B(ctid
, 0x0E, 0x19, 0, PPC2_FP_CVT_S64
);
2302 GEN_FLOAT_B(ctidu
, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206
);
2304 GEN_FLOAT_B(ctidz
, 0x0F, 0x19, 0, PPC2_FP_CVT_S64
);
2306 GEN_FLOAT_B(ctiduz
, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206
);
2309 GEN_FLOAT_B(rin
, 0x08, 0x0C, 1, PPC_FLOAT_EXT
);
2311 GEN_FLOAT_B(riz
, 0x08, 0x0D, 1, PPC_FLOAT_EXT
);
2313 GEN_FLOAT_B(rip
, 0x08, 0x0E, 1, PPC_FLOAT_EXT
);
2315 GEN_FLOAT_B(rim
, 0x08, 0x0F, 1, PPC_FLOAT_EXT
);
2317 static void gen_ftdiv(DisasContext
*ctx
)
2319 if (unlikely(!ctx
->fpu_enabled
)) {
2320 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2323 gen_helper_ftdiv(cpu_crf
[crfD(ctx
->opcode
)], cpu_fpr
[rA(ctx
->opcode
)],
2324 cpu_fpr
[rB(ctx
->opcode
)]);
2327 static void gen_ftsqrt(DisasContext
*ctx
)
2329 if (unlikely(!ctx
->fpu_enabled
)) {
2330 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2333 gen_helper_ftsqrt(cpu_crf
[crfD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)]);
2338 /*** Floating-Point compare ***/
2341 static void gen_fcmpo(DisasContext
*ctx
)
2344 if (unlikely(!ctx
->fpu_enabled
)) {
2345 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2348 /* NIP cannot be restored if the memory exception comes from an helper */
2349 gen_update_nip(ctx
, ctx
->nip
- 4);
2350 gen_reset_fpstatus();
2351 crf
= tcg_const_i32(crfD(ctx
->opcode
));
2352 gen_helper_fcmpo(cpu_env
, cpu_fpr
[rA(ctx
->opcode
)],
2353 cpu_fpr
[rB(ctx
->opcode
)], crf
);
2354 tcg_temp_free_i32(crf
);
2355 gen_helper_float_check_status(cpu_env
);
2359 static void gen_fcmpu(DisasContext
*ctx
)
2362 if (unlikely(!ctx
->fpu_enabled
)) {
2363 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2366 /* NIP cannot be restored if the memory exception comes from an helper */
2367 gen_update_nip(ctx
, ctx
->nip
- 4);
2368 gen_reset_fpstatus();
2369 crf
= tcg_const_i32(crfD(ctx
->opcode
));
2370 gen_helper_fcmpu(cpu_env
, cpu_fpr
[rA(ctx
->opcode
)],
2371 cpu_fpr
[rB(ctx
->opcode
)], crf
);
2372 tcg_temp_free_i32(crf
);
2373 gen_helper_float_check_status(cpu_env
);
2376 /*** Floating-point move ***/
2378 /* XXX: beware that fabs never checks for NaNs nor update FPSCR */
2379 static void gen_fabs(DisasContext
*ctx
)
2381 if (unlikely(!ctx
->fpu_enabled
)) {
2382 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2385 tcg_gen_andi_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)],
2387 if (unlikely(Rc(ctx
->opcode
))) {
2388 gen_set_cr1_from_fpscr(ctx
);
2393 /* XXX: beware that fmr never checks for NaNs nor update FPSCR */
2394 static void gen_fmr(DisasContext
*ctx
)
2396 if (unlikely(!ctx
->fpu_enabled
)) {
2397 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2400 tcg_gen_mov_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)]);
2401 if (unlikely(Rc(ctx
->opcode
))) {
2402 gen_set_cr1_from_fpscr(ctx
);
2407 /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
2408 static void gen_fnabs(DisasContext
*ctx
)
2410 if (unlikely(!ctx
->fpu_enabled
)) {
2411 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2414 tcg_gen_ori_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)],
2416 if (unlikely(Rc(ctx
->opcode
))) {
2417 gen_set_cr1_from_fpscr(ctx
);
2422 /* XXX: beware that fneg never checks for NaNs nor update FPSCR */
2423 static void gen_fneg(DisasContext
*ctx
)
2425 if (unlikely(!ctx
->fpu_enabled
)) {
2426 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2429 tcg_gen_xori_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)],
2431 if (unlikely(Rc(ctx
->opcode
))) {
2432 gen_set_cr1_from_fpscr(ctx
);
2436 /* fcpsgn: PowerPC 2.05 specification */
2437 /* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */
2438 static void gen_fcpsgn(DisasContext
*ctx
)
2440 if (unlikely(!ctx
->fpu_enabled
)) {
2441 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2444 tcg_gen_deposit_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rA(ctx
->opcode
)],
2445 cpu_fpr
[rB(ctx
->opcode
)], 0, 63);
2446 if (unlikely(Rc(ctx
->opcode
))) {
2447 gen_set_cr1_from_fpscr(ctx
);
2451 static void gen_fmrgew(DisasContext
*ctx
)
2454 if (unlikely(!ctx
->fpu_enabled
)) {
2455 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2458 b0
= tcg_temp_new_i64();
2459 tcg_gen_shri_i64(b0
, cpu_fpr
[rB(ctx
->opcode
)], 32);
2460 tcg_gen_deposit_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rA(ctx
->opcode
)],
2462 tcg_temp_free_i64(b0
);
2465 static void gen_fmrgow(DisasContext
*ctx
)
2467 if (unlikely(!ctx
->fpu_enabled
)) {
2468 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2471 tcg_gen_deposit_i64(cpu_fpr
[rD(ctx
->opcode
)],
2472 cpu_fpr
[rB(ctx
->opcode
)],
2473 cpu_fpr
[rA(ctx
->opcode
)],
2477 /*** Floating-Point status & ctrl register ***/
2480 static void gen_mcrfs(DisasContext
*ctx
)
2482 TCGv tmp
= tcg_temp_new();
2484 TCGv_i64 tnew_fpscr
= tcg_temp_new_i64();
2489 if (unlikely(!ctx
->fpu_enabled
)) {
2490 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2493 bfa
= crfS(ctx
->opcode
);
2496 tcg_gen_shri_tl(tmp
, cpu_fpscr
, shift
);
2497 tcg_gen_trunc_tl_i32(cpu_crf
[crfD(ctx
->opcode
)], tmp
);
2498 tcg_gen_andi_i32(cpu_crf
[crfD(ctx
->opcode
)], cpu_crf
[crfD(ctx
->opcode
)], 0xf);
2500 tcg_gen_extu_tl_i64(tnew_fpscr
, cpu_fpscr
);
2501 /* Only the exception bits (including FX) should be cleared if read */
2502 tcg_gen_andi_i64(tnew_fpscr
, tnew_fpscr
, ~((0xF << shift
) & FP_EX_CLEAR_BITS
));
2503 /* FEX and VX need to be updated, so don't set fpscr directly */
2504 tmask
= tcg_const_i32(1 << nibble
);
2505 gen_helper_store_fpscr(cpu_env
, tnew_fpscr
, tmask
);
2506 tcg_temp_free_i32(tmask
);
2507 tcg_temp_free_i64(tnew_fpscr
);
2511 static void gen_mffs(DisasContext
*ctx
)
2513 if (unlikely(!ctx
->fpu_enabled
)) {
2514 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2517 gen_reset_fpstatus();
2518 tcg_gen_extu_tl_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpscr
);
2519 if (unlikely(Rc(ctx
->opcode
))) {
2520 gen_set_cr1_from_fpscr(ctx
);
2525 static void gen_mtfsb0(DisasContext
*ctx
)
2529 if (unlikely(!ctx
->fpu_enabled
)) {
2530 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2533 crb
= 31 - crbD(ctx
->opcode
);
2534 gen_reset_fpstatus();
2535 if (likely(crb
!= FPSCR_FEX
&& crb
!= FPSCR_VX
)) {
2537 /* NIP cannot be restored if the memory exception comes from an helper */
2538 gen_update_nip(ctx
, ctx
->nip
- 4);
2539 t0
= tcg_const_i32(crb
);
2540 gen_helper_fpscr_clrbit(cpu_env
, t0
);
2541 tcg_temp_free_i32(t0
);
2543 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2544 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2545 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2550 static void gen_mtfsb1(DisasContext
*ctx
)
2554 if (unlikely(!ctx
->fpu_enabled
)) {
2555 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2558 crb
= 31 - crbD(ctx
->opcode
);
2559 gen_reset_fpstatus();
2560 /* XXX: we pretend we can only do IEEE floating-point computations */
2561 if (likely(crb
!= FPSCR_FEX
&& crb
!= FPSCR_VX
&& crb
!= FPSCR_NI
)) {
2563 /* NIP cannot be restored if the memory exception comes from an helper */
2564 gen_update_nip(ctx
, ctx
->nip
- 4);
2565 t0
= tcg_const_i32(crb
);
2566 gen_helper_fpscr_setbit(cpu_env
, t0
);
2567 tcg_temp_free_i32(t0
);
2569 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2570 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2571 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2573 /* We can raise a differed exception */
2574 gen_helper_float_check_status(cpu_env
);
2578 static void gen_mtfsf(DisasContext
*ctx
)
2583 if (unlikely(!ctx
->fpu_enabled
)) {
2584 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2587 flm
= FPFLM(ctx
->opcode
);
2588 l
= FPL(ctx
->opcode
);
2589 w
= FPW(ctx
->opcode
);
2590 if (unlikely(w
& !(ctx
->insns_flags2
& PPC2_ISA205
))) {
2591 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2594 /* NIP cannot be restored if the memory exception comes from an helper */
2595 gen_update_nip(ctx
, ctx
->nip
- 4);
2596 gen_reset_fpstatus();
2598 t0
= tcg_const_i32((ctx
->insns_flags2
& PPC2_ISA205
) ?
0xffff : 0xff);
2600 t0
= tcg_const_i32(flm
<< (w
* 8));
2602 gen_helper_store_fpscr(cpu_env
, cpu_fpr
[rB(ctx
->opcode
)], t0
);
2603 tcg_temp_free_i32(t0
);
2604 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2605 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2606 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2608 /* We can raise a differed exception */
2609 gen_helper_float_check_status(cpu_env
);
2613 static void gen_mtfsfi(DisasContext
*ctx
)
2619 if (unlikely(!ctx
->fpu_enabled
)) {
2620 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2623 w
= FPW(ctx
->opcode
);
2624 bf
= FPBF(ctx
->opcode
);
2625 if (unlikely(w
& !(ctx
->insns_flags2
& PPC2_ISA205
))) {
2626 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2629 sh
= (8 * w
) + 7 - bf
;
2630 /* NIP cannot be restored if the memory exception comes from an helper */
2631 gen_update_nip(ctx
, ctx
->nip
- 4);
2632 gen_reset_fpstatus();
2633 t0
= tcg_const_i64(((uint64_t)FPIMM(ctx
->opcode
)) << (4 * sh
));
2634 t1
= tcg_const_i32(1 << sh
);
2635 gen_helper_store_fpscr(cpu_env
, t0
, t1
);
2636 tcg_temp_free_i64(t0
);
2637 tcg_temp_free_i32(t1
);
2638 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2639 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2640 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2642 /* We can raise a differed exception */
2643 gen_helper_float_check_status(cpu_env
);
2646 /*** Addressing modes ***/
2647 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2648 static inline void gen_addr_imm_index(DisasContext
*ctx
, TCGv EA
,
2651 target_long simm
= SIMM(ctx
->opcode
);
2654 if (rA(ctx
->opcode
) == 0) {
2655 if (NARROW_MODE(ctx
)) {
2656 simm
= (uint32_t)simm
;
2658 tcg_gen_movi_tl(EA
, simm
);
2659 } else if (likely(simm
!= 0)) {
2660 tcg_gen_addi_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)], simm
);
2661 if (NARROW_MODE(ctx
)) {
2662 tcg_gen_ext32u_tl(EA
, EA
);
2665 if (NARROW_MODE(ctx
)) {
2666 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2668 tcg_gen_mov_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2673 static inline void gen_addr_reg_index(DisasContext
*ctx
, TCGv EA
)
2675 if (rA(ctx
->opcode
) == 0) {
2676 if (NARROW_MODE(ctx
)) {
2677 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rB(ctx
->opcode
)]);
2679 tcg_gen_mov_tl(EA
, cpu_gpr
[rB(ctx
->opcode
)]);
2682 tcg_gen_add_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
2683 if (NARROW_MODE(ctx
)) {
2684 tcg_gen_ext32u_tl(EA
, EA
);
2689 static inline void gen_addr_register(DisasContext
*ctx
, TCGv EA
)
2691 if (rA(ctx
->opcode
) == 0) {
2692 tcg_gen_movi_tl(EA
, 0);
2693 } else if (NARROW_MODE(ctx
)) {
2694 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2696 tcg_gen_mov_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2700 static inline void gen_addr_add(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
2703 tcg_gen_addi_tl(ret
, arg1
, val
);
2704 if (NARROW_MODE(ctx
)) {
2705 tcg_gen_ext32u_tl(ret
, ret
);
2709 static inline void gen_check_align(DisasContext
*ctx
, TCGv EA
, int mask
)
2711 TCGLabel
*l1
= gen_new_label();
2712 TCGv t0
= tcg_temp_new();
2714 /* NIP cannot be restored if the memory exception comes from an helper */
2715 gen_update_nip(ctx
, ctx
->nip
- 4);
2716 tcg_gen_andi_tl(t0
, EA
, mask
);
2717 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, l1
);
2718 t1
= tcg_const_i32(POWERPC_EXCP_ALIGN
);
2719 t2
= tcg_const_i32(0);
2720 gen_helper_raise_exception_err(cpu_env
, t1
, t2
);
2721 tcg_temp_free_i32(t1
);
2722 tcg_temp_free_i32(t2
);
2727 /*** Integer load ***/
2728 static inline void gen_qemu_ld8u(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2730 tcg_gen_qemu_ld8u(arg1
, arg2
, ctx
->mem_idx
);
2733 static inline void gen_qemu_ld16u(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2735 TCGMemOp op
= MO_UW
| ctx
->default_tcg_memop_mask
;
2736 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2739 static inline void gen_qemu_ld16s(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2741 TCGMemOp op
= MO_SW
| ctx
->default_tcg_memop_mask
;
2742 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2745 static inline void gen_qemu_ld32u(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2747 TCGMemOp op
= MO_UL
| ctx
->default_tcg_memop_mask
;
2748 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2751 static void gen_qemu_ld32u_i64(DisasContext
*ctx
, TCGv_i64 val
, TCGv addr
)
2753 TCGv tmp
= tcg_temp_new();
2754 gen_qemu_ld32u(ctx
, tmp
, addr
);
2755 tcg_gen_extu_tl_i64(val
, tmp
);
2759 static inline void gen_qemu_ld32s(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2761 TCGMemOp op
= MO_SL
| ctx
->default_tcg_memop_mask
;
2762 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2765 static void gen_qemu_ld32s_i64(DisasContext
*ctx
, TCGv_i64 val
, TCGv addr
)
2767 TCGv tmp
= tcg_temp_new();
2768 gen_qemu_ld32s(ctx
, tmp
, addr
);
2769 tcg_gen_ext_tl_i64(val
, tmp
);
2773 static inline void gen_qemu_ld64(DisasContext
*ctx
, TCGv_i64 arg1
, TCGv arg2
)
2775 TCGMemOp op
= MO_Q
| ctx
->default_tcg_memop_mask
;
2776 tcg_gen_qemu_ld_i64(arg1
, arg2
, ctx
->mem_idx
, op
);
2779 static inline void gen_qemu_st8(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2781 tcg_gen_qemu_st8(arg1
, arg2
, ctx
->mem_idx
);
2784 static inline void gen_qemu_st16(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2786 TCGMemOp op
= MO_UW
| ctx
->default_tcg_memop_mask
;
2787 tcg_gen_qemu_st_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2790 static inline void gen_qemu_st32(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2792 TCGMemOp op
= MO_UL
| ctx
->default_tcg_memop_mask
;
2793 tcg_gen_qemu_st_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2796 static void gen_qemu_st32_i64(DisasContext
*ctx
, TCGv_i64 val
, TCGv addr
)
2798 TCGv tmp
= tcg_temp_new();
2799 tcg_gen_trunc_i64_tl(tmp
, val
);
2800 gen_qemu_st32(ctx
, tmp
, addr
);
2804 static inline void gen_qemu_st64(DisasContext
*ctx
, TCGv_i64 arg1
, TCGv arg2
)
2806 TCGMemOp op
= MO_Q
| ctx
->default_tcg_memop_mask
;
2807 tcg_gen_qemu_st_i64(arg1
, arg2
, ctx
->mem_idx
, op
);
2810 #define GEN_LD(name, ldop, opc, type) \
2811 static void glue(gen_, name)(DisasContext *ctx) \
2814 gen_set_access_type(ctx, ACCESS_INT); \
2815 EA = tcg_temp_new(); \
2816 gen_addr_imm_index(ctx, EA, 0); \
2817 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2818 tcg_temp_free(EA); \
2821 #define GEN_LDU(name, ldop, opc, type) \
2822 static void glue(gen_, name##u)(DisasContext *ctx) \
2825 if (unlikely(rA(ctx->opcode) == 0 || \
2826 rA(ctx->opcode) == rD(ctx->opcode))) { \
2827 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2830 gen_set_access_type(ctx, ACCESS_INT); \
2831 EA = tcg_temp_new(); \
2832 if (type == PPC_64B) \
2833 gen_addr_imm_index(ctx, EA, 0x03); \
2835 gen_addr_imm_index(ctx, EA, 0); \
2836 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2837 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2838 tcg_temp_free(EA); \
2841 #define GEN_LDUX(name, ldop, opc2, opc3, type) \
2842 static void glue(gen_, name##ux)(DisasContext *ctx) \
2845 if (unlikely(rA(ctx->opcode) == 0 || \
2846 rA(ctx->opcode) == rD(ctx->opcode))) { \
2847 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2850 gen_set_access_type(ctx, ACCESS_INT); \
2851 EA = tcg_temp_new(); \
2852 gen_addr_reg_index(ctx, EA); \
2853 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2854 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2855 tcg_temp_free(EA); \
2858 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2) \
2859 static void glue(gen_, name##x)(DisasContext *ctx) \
2862 gen_set_access_type(ctx, ACCESS_INT); \
2863 EA = tcg_temp_new(); \
2864 gen_addr_reg_index(ctx, EA); \
2865 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2866 tcg_temp_free(EA); \
2868 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2869 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE)
2871 #define GEN_LDS(name, ldop, op, type) \
2872 GEN_LD(name, ldop, op | 0x20, type); \
2873 GEN_LDU(name, ldop, op | 0x21, type); \
2874 GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
2875 GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2877 /* lbz lbzu lbzux lbzx */
2878 GEN_LDS(lbz
, ld8u
, 0x02, PPC_INTEGER
);
2879 /* lha lhau lhaux lhax */
2880 GEN_LDS(lha
, ld16s
, 0x0A, PPC_INTEGER
);
2881 /* lhz lhzu lhzux lhzx */
2882 GEN_LDS(lhz
, ld16u
, 0x08, PPC_INTEGER
);
2883 /* lwz lwzu lwzux lwzx */
2884 GEN_LDS(lwz
, ld32u
, 0x00, PPC_INTEGER
);
2885 #if defined(TARGET_PPC64)
2887 GEN_LDUX(lwa
, ld32s
, 0x15, 0x0B, PPC_64B
);
2889 GEN_LDX(lwa
, ld32s
, 0x15, 0x0A, PPC_64B
);
2891 GEN_LDUX(ld
, ld64
, 0x15, 0x01, PPC_64B
);
2893 GEN_LDX(ld
, ld64
, 0x15, 0x00, PPC_64B
);
2895 static void gen_ld(DisasContext
*ctx
)
2898 if (Rc(ctx
->opcode
)) {
2899 if (unlikely(rA(ctx
->opcode
) == 0 ||
2900 rA(ctx
->opcode
) == rD(ctx
->opcode
))) {
2901 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2905 gen_set_access_type(ctx
, ACCESS_INT
);
2906 EA
= tcg_temp_new();
2907 gen_addr_imm_index(ctx
, EA
, 0x03);
2908 if (ctx
->opcode
& 0x02) {
2909 /* lwa (lwau is undefined) */
2910 gen_qemu_ld32s(ctx
, cpu_gpr
[rD(ctx
->opcode
)], EA
);
2913 gen_qemu_ld64(ctx
, cpu_gpr
[rD(ctx
->opcode
)], EA
);
2915 if (Rc(ctx
->opcode
))
2916 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], EA
);
2921 static void gen_lq(DisasContext
*ctx
)
2926 /* lq is a legal user mode instruction starting in ISA 2.07 */
2927 bool legal_in_user_mode
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
2928 bool le_is_supported
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
2930 if (!legal_in_user_mode
&& ctx
->pr
) {
2931 gen_inval_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
2935 if (!le_is_supported
&& ctx
->le_mode
) {
2936 gen_exception_err(ctx
, POWERPC_EXCP_ALIGN
, POWERPC_EXCP_ALIGN_LE
);
2940 ra
= rA(ctx
->opcode
);
2941 rd
= rD(ctx
->opcode
);
2942 if (unlikely((rd
& 1) || rd
== ra
)) {
2943 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2947 gen_set_access_type(ctx
, ACCESS_INT
);
2948 EA
= tcg_temp_new();
2949 gen_addr_imm_index(ctx
, EA
, 0x0F);
2951 /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
2952 64-bit byteswap already. */
2953 if (unlikely(ctx
->le_mode
)) {
2954 gen_qemu_ld64(ctx
, cpu_gpr
[rd
+1], EA
);
2955 gen_addr_add(ctx
, EA
, EA
, 8);
2956 gen_qemu_ld64(ctx
, cpu_gpr
[rd
], EA
);
2958 gen_qemu_ld64(ctx
, cpu_gpr
[rd
], EA
);
2959 gen_addr_add(ctx
, EA
, EA
, 8);
2960 gen_qemu_ld64(ctx
, cpu_gpr
[rd
+1], EA
);
2966 /*** Integer store ***/
2967 #define GEN_ST(name, stop, opc, type) \
2968 static void glue(gen_, name)(DisasContext *ctx) \
2971 gen_set_access_type(ctx, ACCESS_INT); \
2972 EA = tcg_temp_new(); \
2973 gen_addr_imm_index(ctx, EA, 0); \
2974 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2975 tcg_temp_free(EA); \
2978 #define GEN_STU(name, stop, opc, type) \
2979 static void glue(gen_, stop##u)(DisasContext *ctx) \
2982 if (unlikely(rA(ctx->opcode) == 0)) { \
2983 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2986 gen_set_access_type(ctx, ACCESS_INT); \
2987 EA = tcg_temp_new(); \
2988 if (type == PPC_64B) \
2989 gen_addr_imm_index(ctx, EA, 0x03); \
2991 gen_addr_imm_index(ctx, EA, 0); \
2992 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2993 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2994 tcg_temp_free(EA); \
2997 #define GEN_STUX(name, stop, opc2, opc3, type) \
2998 static void glue(gen_, name##ux)(DisasContext *ctx) \
3001 if (unlikely(rA(ctx->opcode) == 0)) { \
3002 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3005 gen_set_access_type(ctx, ACCESS_INT); \
3006 EA = tcg_temp_new(); \
3007 gen_addr_reg_index(ctx, EA); \
3008 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3009 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3010 tcg_temp_free(EA); \
3013 #define GEN_STX_E(name, stop, opc2, opc3, type, type2) \
3014 static void glue(gen_, name##x)(DisasContext *ctx) \
3017 gen_set_access_type(ctx, ACCESS_INT); \
3018 EA = tcg_temp_new(); \
3019 gen_addr_reg_index(ctx, EA); \
3020 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3021 tcg_temp_free(EA); \
3023 #define GEN_STX(name, stop, opc2, opc3, type) \
3024 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE)
3026 #define GEN_STS(name, stop, op, type) \
3027 GEN_ST(name, stop, op | 0x20, type); \
3028 GEN_STU(name, stop, op | 0x21, type); \
3029 GEN_STUX(name, stop, 0x17, op | 0x01, type); \
3030 GEN_STX(name, stop, 0x17, op | 0x00, type)
3032 /* stb stbu stbux stbx */
3033 GEN_STS(stb
, st8
, 0x06, PPC_INTEGER
);
3034 /* sth sthu sthux sthx */
3035 GEN_STS(sth
, st16
, 0x0C, PPC_INTEGER
);
3036 /* stw stwu stwux stwx */
3037 GEN_STS(stw
, st32
, 0x04, PPC_INTEGER
);
3038 #if defined(TARGET_PPC64)
3039 GEN_STUX(std
, st64
, 0x15, 0x05, PPC_64B
);
3040 GEN_STX(std
, st64
, 0x15, 0x04, PPC_64B
);
3042 static void gen_std(DisasContext
*ctx
)
3047 rs
= rS(ctx
->opcode
);
3048 if ((ctx
->opcode
& 0x3) == 0x2) { /* stq */
3050 bool legal_in_user_mode
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
3051 bool le_is_supported
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
3053 if (!legal_in_user_mode
&& ctx
->pr
) {
3054 gen_inval_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
3058 if (!le_is_supported
&& ctx
->le_mode
) {
3059 gen_exception_err(ctx
, POWERPC_EXCP_ALIGN
, POWERPC_EXCP_ALIGN_LE
);
3063 if (unlikely(rs
& 1)) {
3064 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
3067 gen_set_access_type(ctx
, ACCESS_INT
);
3068 EA
= tcg_temp_new();
3069 gen_addr_imm_index(ctx
, EA
, 0x03);
3071 /* We only need to swap high and low halves. gen_qemu_st64 does
3072 necessary 64-bit byteswap already. */
3073 if (unlikely(ctx
->le_mode
)) {
3074 gen_qemu_st64(ctx
, cpu_gpr
[rs
+1], EA
);
3075 gen_addr_add(ctx
, EA
, EA
, 8);
3076 gen_qemu_st64(ctx
, cpu_gpr
[rs
], EA
);
3078 gen_qemu_st64(ctx
, cpu_gpr
[rs
], EA
);
3079 gen_addr_add(ctx
, EA
, EA
, 8);
3080 gen_qemu_st64(ctx
, cpu_gpr
[rs
+1], EA
);
3085 if (Rc(ctx
->opcode
)) {
3086 if (unlikely(rA(ctx
->opcode
) == 0)) {
3087 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
3091 gen_set_access_type(ctx
, ACCESS_INT
);
3092 EA
= tcg_temp_new();
3093 gen_addr_imm_index(ctx
, EA
, 0x03);
3094 gen_qemu_st64(ctx
, cpu_gpr
[rs
], EA
);
3095 if (Rc(ctx
->opcode
))
3096 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], EA
);
3101 /*** Integer load and store with byte reverse ***/
3104 static inline void gen_qemu_ld16ur(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
3106 TCGMemOp op
= MO_UW
| (ctx
->default_tcg_memop_mask
^ MO_BSWAP
);
3107 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
3109 GEN_LDX(lhbr
, ld16ur
, 0x16, 0x18, PPC_INTEGER
);
3112 static inline void gen_qemu_ld32ur(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
3114 TCGMemOp op
= MO_UL
| (ctx
->default_tcg_memop_mask
^ MO_BSWAP
);
3115 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
3117 GEN_LDX(lwbr
, ld32ur
, 0x16, 0x10, PPC_INTEGER
);
3119 #if defined(TARGET_PPC64)
3121 static inline void gen_qemu_ld64ur(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
3123 TCGMemOp op
= MO_Q
| (ctx
->default_tcg_memop_mask
^ MO_BSWAP
);
3124 tcg_gen_qemu_ld_i64(arg1
, arg2
, ctx
->mem_idx
, op
);
3126 GEN_LDX_E(ldbr
, ld64ur
, 0x14, 0x10, PPC_NONE
, PPC2_DBRX
);
3127 #endif /* TARGET_PPC64 */
3130 static inline void gen_qemu_st16r(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
3132 TCGMemOp op
= MO_UW
| (ctx
->default_tcg_memop_mask
^ MO_BSWAP
);
3133 tcg_gen_qemu_st_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
3135 GEN_STX(sthbr
, st16r
, 0x16, 0x1C, PPC_INTEGER
);
3138 static inline void gen_qemu_st32r(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)