2 * PowerPC emulation for qemu: main translation routines.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "qemu/host-utils.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
36 #define CPU_SINGLE_STEP 0x1
37 #define CPU_BRANCH_STEP 0x2
38 #define GDBSTUB_SINGLE_STEP 0x4
40 /* Include definitions for instructions classes and implementations flags */
41 //#define PPC_DEBUG_DISAS
42 //#define DO_PPC_STATISTICS
44 #ifdef PPC_DEBUG_DISAS
45 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
47 # define LOG_DISAS(...) do { } while (0)
49 /*****************************************************************************/
50 /* Code translation helpers */
52 /* global register indexes */
53 static TCGv_env cpu_env
;
54 static char cpu_reg_names
[10*3 + 22*4 /* GPR */
55 + 10*4 + 22*5 /* SPE GPRh */
56 + 10*4 + 22*5 /* FPR */
57 + 2*(10*6 + 22*7) /* AVRh, AVRl */
58 + 10*5 + 22*6 /* VSR */
60 static TCGv cpu_gpr
[32];
61 static TCGv cpu_gprh
[32];
62 static TCGv_i64 cpu_fpr
[32];
63 static TCGv_i64 cpu_avrh
[32], cpu_avrl
[32];
64 static TCGv_i64 cpu_vsr
[32];
65 static TCGv_i32 cpu_crf
[8];
70 #if defined(TARGET_PPC64)
73 static TCGv cpu_xer
, cpu_so
, cpu_ov
, cpu_ca
;
74 static TCGv cpu_reserve
;
75 static TCGv cpu_fpscr
;
76 static TCGv_i32 cpu_access_type
;
78 #include "exec/gen-icount.h"
80 void ppc_translate_init(void)
84 size_t cpu_reg_names_size
;
85 static int done_init
= 0;
90 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
93 cpu_reg_names_size
= sizeof(cpu_reg_names
);
95 for (i
= 0; i
< 8; i
++) {
96 snprintf(p
, cpu_reg_names_size
, "crf%d", i
);
97 cpu_crf
[i
] = tcg_global_mem_new_i32(cpu_env
,
98 offsetof(CPUPPCState
, crf
[i
]), p
);
100 cpu_reg_names_size
-= 5;
103 for (i
= 0; i
< 32; i
++) {
104 snprintf(p
, cpu_reg_names_size
, "r%d", i
);
105 cpu_gpr
[i
] = tcg_global_mem_new(cpu_env
,
106 offsetof(CPUPPCState
, gpr
[i
]), p
);
107 p
+= (i
< 10) ?
3 : 4;
108 cpu_reg_names_size
-= (i
< 10) ?
3 : 4;
109 snprintf(p
, cpu_reg_names_size
, "r%dH", i
);
110 cpu_gprh
[i
] = tcg_global_mem_new(cpu_env
,
111 offsetof(CPUPPCState
, gprh
[i
]), p
);
112 p
+= (i
< 10) ?
4 : 5;
113 cpu_reg_names_size
-= (i
< 10) ?
4 : 5;
115 snprintf(p
, cpu_reg_names_size
, "fp%d", i
);
116 cpu_fpr
[i
] = tcg_global_mem_new_i64(cpu_env
,
117 offsetof(CPUPPCState
, fpr
[i
]), p
);
118 p
+= (i
< 10) ?
4 : 5;
119 cpu_reg_names_size
-= (i
< 10) ?
4 : 5;
121 snprintf(p
, cpu_reg_names_size
, "avr%dH", i
);
122 #ifdef HOST_WORDS_BIGENDIAN
123 cpu_avrh
[i
] = tcg_global_mem_new_i64(cpu_env
,
124 offsetof(CPUPPCState
, avr
[i
].u64
[0]), p
);
126 cpu_avrh
[i
] = tcg_global_mem_new_i64(cpu_env
,
127 offsetof(CPUPPCState
, avr
[i
].u64
[1]), p
);
129 p
+= (i
< 10) ?
6 : 7;
130 cpu_reg_names_size
-= (i
< 10) ?
6 : 7;
132 snprintf(p
, cpu_reg_names_size
, "avr%dL", i
);
133 #ifdef HOST_WORDS_BIGENDIAN
134 cpu_avrl
[i
] = tcg_global_mem_new_i64(cpu_env
,
135 offsetof(CPUPPCState
, avr
[i
].u64
[1]), p
);
137 cpu_avrl
[i
] = tcg_global_mem_new_i64(cpu_env
,
138 offsetof(CPUPPCState
, avr
[i
].u64
[0]), p
);
140 p
+= (i
< 10) ?
6 : 7;
141 cpu_reg_names_size
-= (i
< 10) ?
6 : 7;
142 snprintf(p
, cpu_reg_names_size
, "vsr%d", i
);
143 cpu_vsr
[i
] = tcg_global_mem_new_i64(cpu_env
,
144 offsetof(CPUPPCState
, vsr
[i
]), p
);
145 p
+= (i
< 10) ?
5 : 6;
146 cpu_reg_names_size
-= (i
< 10) ?
5 : 6;
149 cpu_nip
= tcg_global_mem_new(cpu_env
,
150 offsetof(CPUPPCState
, nip
), "nip");
152 cpu_msr
= tcg_global_mem_new(cpu_env
,
153 offsetof(CPUPPCState
, msr
), "msr");
155 cpu_ctr
= tcg_global_mem_new(cpu_env
,
156 offsetof(CPUPPCState
, ctr
), "ctr");
158 cpu_lr
= tcg_global_mem_new(cpu_env
,
159 offsetof(CPUPPCState
, lr
), "lr");
161 #if defined(TARGET_PPC64)
162 cpu_cfar
= tcg_global_mem_new(cpu_env
,
163 offsetof(CPUPPCState
, cfar
), "cfar");
166 cpu_xer
= tcg_global_mem_new(cpu_env
,
167 offsetof(CPUPPCState
, xer
), "xer");
168 cpu_so
= tcg_global_mem_new(cpu_env
,
169 offsetof(CPUPPCState
, so
), "SO");
170 cpu_ov
= tcg_global_mem_new(cpu_env
,
171 offsetof(CPUPPCState
, ov
), "OV");
172 cpu_ca
= tcg_global_mem_new(cpu_env
,
173 offsetof(CPUPPCState
, ca
), "CA");
175 cpu_reserve
= tcg_global_mem_new(cpu_env
,
176 offsetof(CPUPPCState
, reserve_addr
),
179 cpu_fpscr
= tcg_global_mem_new(cpu_env
,
180 offsetof(CPUPPCState
, fpscr
), "fpscr");
182 cpu_access_type
= tcg_global_mem_new_i32(cpu_env
,
183 offsetof(CPUPPCState
, access_type
), "access_type");
188 /* internal defines */
189 struct DisasContext
{
190 struct TranslationBlock
*tb
;
194 /* Routine used to access memory */
198 /* Translation flags */
200 TCGMemOp default_tcg_memop_mask
;
201 #if defined(TARGET_PPC64)
210 ppc_spr_t
*spr_cb
; /* Needed to check rights for mfspr/mtspr */
211 int singlestep_enabled
;
212 uint64_t insns_flags
;
213 uint64_t insns_flags2
;
216 /* Return true iff byteswap is needed in a scalar memop */
217 static inline bool need_byteswap(const DisasContext
*ctx
)
219 #if defined(TARGET_WORDS_BIGENDIAN)
222 return !ctx
->le_mode
;
226 /* True when active word size < size of target_long. */
228 # define NARROW_MODE(C) (!(C)->sf_mode)
230 # define NARROW_MODE(C) 0
233 struct opc_handler_t
{
234 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
236 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
238 /* instruction type */
240 /* extended instruction type */
243 void (*handler
)(DisasContext
*ctx
);
244 #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
247 #if defined(DO_PPC_STATISTICS)
252 static inline void gen_reset_fpstatus(void)
254 gen_helper_reset_fpstatus(cpu_env
);
257 static inline void gen_compute_fprf(TCGv_i64 arg
)
259 gen_helper_compute_fprf(cpu_env
, arg
);
260 gen_helper_float_check_status(cpu_env
);
263 static inline void gen_set_access_type(DisasContext
*ctx
, int access_type
)
265 if (ctx
->access_type
!= access_type
) {
266 tcg_gen_movi_i32(cpu_access_type
, access_type
);
267 ctx
->access_type
= access_type
;
271 static inline void gen_update_nip(DisasContext
*ctx
, target_ulong nip
)
273 if (NARROW_MODE(ctx
)) {
276 tcg_gen_movi_tl(cpu_nip
, nip
);
279 void gen_update_current_nip(void *opaque
)
281 DisasContext
*ctx
= opaque
;
283 tcg_gen_movi_tl(cpu_nip
, ctx
->nip
);
286 static inline void gen_exception_err(DisasContext
*ctx
, uint32_t excp
, uint32_t error
)
289 if (ctx
->exception
== POWERPC_EXCP_NONE
) {
290 gen_update_nip(ctx
, ctx
->nip
);
292 t0
= tcg_const_i32(excp
);
293 t1
= tcg_const_i32(error
);
294 gen_helper_raise_exception_err(cpu_env
, t0
, t1
);
295 tcg_temp_free_i32(t0
);
296 tcg_temp_free_i32(t1
);
297 ctx
->exception
= (excp
);
300 static inline void gen_exception(DisasContext
*ctx
, uint32_t excp
)
303 if (ctx
->exception
== POWERPC_EXCP_NONE
) {
304 gen_update_nip(ctx
, ctx
->nip
);
306 t0
= tcg_const_i32(excp
);
307 gen_helper_raise_exception(cpu_env
, t0
);
308 tcg_temp_free_i32(t0
);
309 ctx
->exception
= (excp
);
312 static inline void gen_debug_exception(DisasContext
*ctx
)
316 if ((ctx
->exception
!= POWERPC_EXCP_BRANCH
) &&
317 (ctx
->exception
!= POWERPC_EXCP_SYNC
)) {
318 gen_update_nip(ctx
, ctx
->nip
);
320 t0
= tcg_const_i32(EXCP_DEBUG
);
321 gen_helper_raise_exception(cpu_env
, t0
);
322 tcg_temp_free_i32(t0
);
325 static inline void gen_inval_exception(DisasContext
*ctx
, uint32_t error
)
327 gen_exception_err(ctx
, POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_INVAL
| error
);
330 /* Stop translation */
331 static inline void gen_stop_exception(DisasContext
*ctx
)
333 gen_update_nip(ctx
, ctx
->nip
);
334 ctx
->exception
= POWERPC_EXCP_STOP
;
337 #ifndef CONFIG_USER_ONLY
338 /* No need to update nip here, as execution flow will change */
339 static inline void gen_sync_exception(DisasContext
*ctx
)
341 ctx
->exception
= POWERPC_EXCP_SYNC
;
345 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
346 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
348 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
349 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
351 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
352 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
354 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
355 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
357 typedef struct opcode_t
{
358 unsigned char opc1
, opc2
, opc3
;
359 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
360 unsigned char pad
[5];
362 unsigned char pad
[1];
364 opc_handler_t handler
;
368 /*****************************************************************************/
369 /*** Instruction decoding ***/
370 #define EXTRACT_HELPER(name, shift, nb) \
371 static inline uint32_t name(uint32_t opcode) \
373 return (opcode >> (shift)) & ((1 << (nb)) - 1); \
376 #define EXTRACT_SHELPER(name, shift, nb) \
377 static inline int32_t name(uint32_t opcode) \
379 return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
382 #define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \
383 static inline uint32_t name(uint32_t opcode) \
385 return (((opcode >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
386 ((opcode >> (shift2)) & ((1 << (nb2)) - 1)); \
389 EXTRACT_HELPER(opc1
, 26, 6);
391 EXTRACT_HELPER(opc2
, 1, 5);
393 EXTRACT_HELPER(opc3
, 6, 5);
394 /* Update Cr0 flags */
395 EXTRACT_HELPER(Rc
, 0, 1);
396 /* Update Cr6 flags (Altivec) */
397 EXTRACT_HELPER(Rc21
, 10, 1);
399 EXTRACT_HELPER(rD
, 21, 5);
401 EXTRACT_HELPER(rS
, 21, 5);
403 EXTRACT_HELPER(rA
, 16, 5);
405 EXTRACT_HELPER(rB
, 11, 5);
407 EXTRACT_HELPER(rC
, 6, 5);
409 EXTRACT_HELPER(crfD
, 23, 3);
410 EXTRACT_HELPER(crfS
, 18, 3);
411 EXTRACT_HELPER(crbD
, 21, 5);
412 EXTRACT_HELPER(crbA
, 16, 5);
413 EXTRACT_HELPER(crbB
, 11, 5);
415 EXTRACT_HELPER(_SPR
, 11, 10);
416 static inline uint32_t SPR(uint32_t opcode
)
418 uint32_t sprn
= _SPR(opcode
);
420 return ((sprn
>> 5) & 0x1F) | ((sprn
& 0x1F) << 5);
422 /*** Get constants ***/
423 /* 16 bits signed immediate value */
424 EXTRACT_SHELPER(SIMM
, 0, 16);
425 /* 16 bits unsigned immediate value */
426 EXTRACT_HELPER(UIMM
, 0, 16);
427 /* 5 bits signed immediate value */
428 EXTRACT_HELPER(SIMM5
, 16, 5);
429 /* 5 bits signed immediate value */
430 EXTRACT_HELPER(UIMM5
, 16, 5);
432 EXTRACT_HELPER(NB
, 11, 5);
434 EXTRACT_HELPER(SH
, 11, 5);
435 /* Vector shift count */
436 EXTRACT_HELPER(VSH
, 6, 4);
438 EXTRACT_HELPER(MB
, 6, 5);
440 EXTRACT_HELPER(ME
, 1, 5);
442 EXTRACT_HELPER(TO
, 21, 5);
444 EXTRACT_HELPER(CRM
, 12, 8);
446 #ifndef CONFIG_USER_ONLY
447 EXTRACT_HELPER(SR
, 16, 4);
451 EXTRACT_HELPER(FPBF
, 23, 3);
452 EXTRACT_HELPER(FPIMM
, 12, 4);
453 EXTRACT_HELPER(FPL
, 25, 1);
454 EXTRACT_HELPER(FPFLM
, 17, 8);
455 EXTRACT_HELPER(FPW
, 16, 1);
457 /*** Jump target decoding ***/
458 /* Immediate address */
459 static inline target_ulong
LI(uint32_t opcode
)
461 return (opcode
>> 0) & 0x03FFFFFC;
464 static inline uint32_t BD(uint32_t opcode
)
466 return (opcode
>> 0) & 0xFFFC;
469 EXTRACT_HELPER(BO
, 21, 5);
470 EXTRACT_HELPER(BI
, 16, 5);
471 /* Absolute/relative address */
472 EXTRACT_HELPER(AA
, 1, 1);
474 EXTRACT_HELPER(LK
, 0, 1);
477 EXTRACT_HELPER(DCM
, 10, 6)
480 EXTRACT_HELPER(RMC
, 9, 2)
482 /* Create a mask between <start> and <end> bits */
483 static inline target_ulong
MASK(uint32_t start
, uint32_t end
)
487 #if defined(TARGET_PPC64)
488 if (likely(start
== 0)) {
489 ret
= UINT64_MAX
<< (63 - end
);
490 } else if (likely(end
== 63)) {
491 ret
= UINT64_MAX
>> start
;
494 if (likely(start
== 0)) {
495 ret
= UINT32_MAX
<< (31 - end
);
496 } else if (likely(end
== 31)) {
497 ret
= UINT32_MAX
>> start
;
501 ret
= (((target_ulong
)(-1ULL)) >> (start
)) ^
502 (((target_ulong
)(-1ULL) >> (end
)) >> 1);
503 if (unlikely(start
> end
))
510 EXTRACT_HELPER_SPLIT(xT
, 0, 1, 21, 5);
511 EXTRACT_HELPER_SPLIT(xS
, 0, 1, 21, 5);
512 EXTRACT_HELPER_SPLIT(xA
, 2, 1, 16, 5);
513 EXTRACT_HELPER_SPLIT(xB
, 1, 1, 11, 5);
514 EXTRACT_HELPER_SPLIT(xC
, 3, 1, 6, 5);
515 EXTRACT_HELPER(DM
, 8, 2);
516 EXTRACT_HELPER(UIM
, 16, 2);
517 EXTRACT_HELPER(SHW
, 8, 2);
518 EXTRACT_HELPER(SP
, 19, 2);
519 /*****************************************************************************/
520 /* PowerPC instructions table */
522 #if defined(DO_PPC_STATISTICS)
523 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
533 .handler = &gen_##name, \
534 .oname = stringify(name), \
536 .oname = stringify(name), \
538 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
549 .handler = &gen_##name, \
550 .oname = stringify(name), \
552 .oname = stringify(name), \
554 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
564 .handler = &gen_##name, \
570 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
580 .handler = &gen_##name, \
582 .oname = stringify(name), \
584 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
595 .handler = &gen_##name, \
597 .oname = stringify(name), \
599 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
609 .handler = &gen_##name, \
615 /* SPR load/store helpers */
616 static inline void gen_load_spr(TCGv t
, int reg
)
618 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUPPCState
, spr
[reg
]));
621 static inline void gen_store_spr(int reg
, TCGv t
)
623 tcg_gen_st_tl(t
, cpu_env
, offsetof(CPUPPCState
, spr
[reg
]));
626 /* Invalid instruction */
627 static void gen_invalid(DisasContext
*ctx
)
629 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
632 static opc_handler_t invalid_handler
= {
633 .inval1
= 0xFFFFFFFF,
634 .inval2
= 0xFFFFFFFF,
637 .handler
= gen_invalid
,
640 /*** Integer comparison ***/
642 static inline void gen_op_cmp(TCGv arg0
, TCGv arg1
, int s
, int crf
)
644 TCGv t0
= tcg_temp_new();
645 TCGv_i32 t1
= tcg_temp_new_i32();
647 tcg_gen_trunc_tl_i32(cpu_crf
[crf
], cpu_so
);
649 tcg_gen_setcond_tl((s ? TCG_COND_LT
: TCG_COND_LTU
), t0
, arg0
, arg1
);
650 tcg_gen_trunc_tl_i32(t1
, t0
);
651 tcg_gen_shli_i32(t1
, t1
, CRF_LT
);
652 tcg_gen_or_i32(cpu_crf
[crf
], cpu_crf
[crf
], t1
);
654 tcg_gen_setcond_tl((s ? TCG_COND_GT
: TCG_COND_GTU
), t0
, arg0
, arg1
);
655 tcg_gen_trunc_tl_i32(t1
, t0
);
656 tcg_gen_shli_i32(t1
, t1
, CRF_GT
);
657 tcg_gen_or_i32(cpu_crf
[crf
], cpu_crf
[crf
], t1
);
659 tcg_gen_setcond_tl(TCG_COND_EQ
, t0
, arg0
, arg1
);
660 tcg_gen_trunc_tl_i32(t1
, t0
);
661 tcg_gen_shli_i32(t1
, t1
, CRF_EQ
);
662 tcg_gen_or_i32(cpu_crf
[crf
], cpu_crf
[crf
], t1
);
665 tcg_temp_free_i32(t1
);
668 static inline void gen_op_cmpi(TCGv arg0
, target_ulong arg1
, int s
, int crf
)
670 TCGv t0
= tcg_const_tl(arg1
);
671 gen_op_cmp(arg0
, t0
, s
, crf
);
675 static inline void gen_op_cmp32(TCGv arg0
, TCGv arg1
, int s
, int crf
)
681 tcg_gen_ext32s_tl(t0
, arg0
);
682 tcg_gen_ext32s_tl(t1
, arg1
);
684 tcg_gen_ext32u_tl(t0
, arg0
);
685 tcg_gen_ext32u_tl(t1
, arg1
);
687 gen_op_cmp(t0
, t1
, s
, crf
);
692 static inline void gen_op_cmpi32(TCGv arg0
, target_ulong arg1
, int s
, int crf
)
694 TCGv t0
= tcg_const_tl(arg1
);
695 gen_op_cmp32(arg0
, t0
, s
, crf
);
699 static inline void gen_set_Rc0(DisasContext
*ctx
, TCGv reg
)
701 if (NARROW_MODE(ctx
)) {
702 gen_op_cmpi32(reg
, 0, 1, 0);
704 gen_op_cmpi(reg
, 0, 1, 0);
709 static void gen_cmp(DisasContext
*ctx
)
711 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
712 gen_op_cmp(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
713 1, crfD(ctx
->opcode
));
715 gen_op_cmp32(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
716 1, crfD(ctx
->opcode
));
721 static void gen_cmpi(DisasContext
*ctx
)
723 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
724 gen_op_cmpi(cpu_gpr
[rA(ctx
->opcode
)], SIMM(ctx
->opcode
),
725 1, crfD(ctx
->opcode
));
727 gen_op_cmpi32(cpu_gpr
[rA(ctx
->opcode
)], SIMM(ctx
->opcode
),
728 1, crfD(ctx
->opcode
));
733 static void gen_cmpl(DisasContext
*ctx
)
735 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
736 gen_op_cmp(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
737 0, crfD(ctx
->opcode
));
739 gen_op_cmp32(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
740 0, crfD(ctx
->opcode
));
745 static void gen_cmpli(DisasContext
*ctx
)
747 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
748 gen_op_cmpi(cpu_gpr
[rA(ctx
->opcode
)], UIMM(ctx
->opcode
),
749 0, crfD(ctx
->opcode
));
751 gen_op_cmpi32(cpu_gpr
[rA(ctx
->opcode
)], UIMM(ctx
->opcode
),
752 0, crfD(ctx
->opcode
));
756 /* isel (PowerPC 2.03 specification) */
757 static void gen_isel(DisasContext
*ctx
)
760 uint32_t bi
= rC(ctx
->opcode
);
764 l1
= gen_new_label();
765 l2
= gen_new_label();
767 mask
= 0x08 >> (bi
& 0x03);
768 t0
= tcg_temp_new_i32();
769 tcg_gen_andi_i32(t0
, cpu_crf
[bi
>> 2], mask
);
770 tcg_gen_brcondi_i32(TCG_COND_EQ
, t0
, 0, l1
);
771 if (rA(ctx
->opcode
) == 0)
772 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], 0);
774 tcg_gen_mov_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)]);
777 tcg_gen_mov_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
779 tcg_temp_free_i32(t0
);
782 /* cmpb: PowerPC 2.05 specification */
783 static void gen_cmpb(DisasContext
*ctx
)
785 gen_helper_cmpb(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)],
786 cpu_gpr
[rB(ctx
->opcode
)]);
789 /*** Integer arithmetic ***/
791 static inline void gen_op_arith_compute_ov(DisasContext
*ctx
, TCGv arg0
,
792 TCGv arg1
, TCGv arg2
, int sub
)
794 TCGv t0
= tcg_temp_new();
796 tcg_gen_xor_tl(cpu_ov
, arg0
, arg2
);
797 tcg_gen_xor_tl(t0
, arg1
, arg2
);
799 tcg_gen_and_tl(cpu_ov
, cpu_ov
, t0
);
801 tcg_gen_andc_tl(cpu_ov
, cpu_ov
, t0
);
804 if (NARROW_MODE(ctx
)) {
805 tcg_gen_ext32s_tl(cpu_ov
, cpu_ov
);
807 tcg_gen_shri_tl(cpu_ov
, cpu_ov
, TARGET_LONG_BITS
- 1);
808 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
811 /* Common add function */
812 static inline void gen_op_arith_add(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
813 TCGv arg2
, bool add_ca
, bool compute_ca
,
814 bool compute_ov
, bool compute_rc0
)
818 if (compute_ca
|| compute_ov
) {
823 if (NARROW_MODE(ctx
)) {
824 /* Caution: a non-obvious corner case of the spec is that we
825 must produce the *entire* 64-bit addition, but produce the
826 carry into bit 32. */
827 TCGv t1
= tcg_temp_new();
828 tcg_gen_xor_tl(t1
, arg1
, arg2
); /* add without carry */
829 tcg_gen_add_tl(t0
, arg1
, arg2
);
831 tcg_gen_add_tl(t0
, t0
, cpu_ca
);
833 tcg_gen_xor_tl(cpu_ca
, t0
, t1
); /* bits changed w/ carry */
835 tcg_gen_shri_tl(cpu_ca
, cpu_ca
, 32); /* extract bit 32 */
836 tcg_gen_andi_tl(cpu_ca
, cpu_ca
, 1);
838 TCGv zero
= tcg_const_tl(0);
840 tcg_gen_add2_tl(t0
, cpu_ca
, arg1
, zero
, cpu_ca
, zero
);
841 tcg_gen_add2_tl(t0
, cpu_ca
, t0
, cpu_ca
, arg2
, zero
);
843 tcg_gen_add2_tl(t0
, cpu_ca
, arg1
, zero
, arg2
, zero
);
848 tcg_gen_add_tl(t0
, arg1
, arg2
);
850 tcg_gen_add_tl(t0
, t0
, cpu_ca
);
855 gen_op_arith_compute_ov(ctx
, t0
, arg1
, arg2
, 0);
857 if (unlikely(compute_rc0
)) {
858 gen_set_Rc0(ctx
, t0
);
861 if (!TCGV_EQUAL(t0
, ret
)) {
862 tcg_gen_mov_tl(ret
, t0
);
866 /* Add functions with two operands */
867 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
868 static void glue(gen_, name)(DisasContext *ctx) \
870 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
871 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
872 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
874 /* Add functions with one operand and one immediate */
875 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
876 add_ca, compute_ca, compute_ov) \
877 static void glue(gen_, name)(DisasContext *ctx) \
879 TCGv t0 = tcg_const_tl(const_val); \
880 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
881 cpu_gpr[rA(ctx->opcode)], t0, \
882 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
886 /* add add. addo addo. */
887 GEN_INT_ARITH_ADD(add
, 0x08, 0, 0, 0)
888 GEN_INT_ARITH_ADD(addo
, 0x18, 0, 0, 1)
889 /* addc addc. addco addco. */
890 GEN_INT_ARITH_ADD(addc
, 0x00, 0, 1, 0)
891 GEN_INT_ARITH_ADD(addco
, 0x10, 0, 1, 1)
892 /* adde adde. addeo addeo. */
893 GEN_INT_ARITH_ADD(adde
, 0x04, 1, 1, 0)
894 GEN_INT_ARITH_ADD(addeo
, 0x14, 1, 1, 1)
895 /* addme addme. addmeo addmeo. */
896 GEN_INT_ARITH_ADD_CONST(addme
, 0x07, -1LL, 1, 1, 0)
897 GEN_INT_ARITH_ADD_CONST(addmeo
, 0x17, -1LL, 1, 1, 1)
898 /* addze addze. addzeo addzeo.*/
899 GEN_INT_ARITH_ADD_CONST(addze
, 0x06, 0, 1, 1, 0)
900 GEN_INT_ARITH_ADD_CONST(addzeo
, 0x16, 0, 1, 1, 1)
902 static void gen_addi(DisasContext
*ctx
)
904 target_long simm
= SIMM(ctx
->opcode
);
906 if (rA(ctx
->opcode
) == 0) {
908 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], simm
);
910 tcg_gen_addi_tl(cpu_gpr
[rD(ctx
->opcode
)],
911 cpu_gpr
[rA(ctx
->opcode
)], simm
);
915 static inline void gen_op_addic(DisasContext
*ctx
, bool compute_rc0
)
917 TCGv c
= tcg_const_tl(SIMM(ctx
->opcode
));
918 gen_op_arith_add(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
919 c
, 0, 1, 0, compute_rc0
);
923 static void gen_addic(DisasContext
*ctx
)
925 gen_op_addic(ctx
, 0);
928 static void gen_addic_(DisasContext
*ctx
)
930 gen_op_addic(ctx
, 1);
934 static void gen_addis(DisasContext
*ctx
)
936 target_long simm
= SIMM(ctx
->opcode
);
938 if (rA(ctx
->opcode
) == 0) {
940 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], simm
<< 16);
942 tcg_gen_addi_tl(cpu_gpr
[rD(ctx
->opcode
)],
943 cpu_gpr
[rA(ctx
->opcode
)], simm
<< 16);
947 static inline void gen_op_arith_divw(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
948 TCGv arg2
, int sign
, int compute_ov
)
950 TCGLabel
*l1
= gen_new_label();
951 TCGLabel
*l2
= gen_new_label();
952 TCGv_i32 t0
= tcg_temp_local_new_i32();
953 TCGv_i32 t1
= tcg_temp_local_new_i32();
955 tcg_gen_trunc_tl_i32(t0
, arg1
);
956 tcg_gen_trunc_tl_i32(t1
, arg2
);
957 tcg_gen_brcondi_i32(TCG_COND_EQ
, t1
, 0, l1
);
959 TCGLabel
*l3
= gen_new_label();
960 tcg_gen_brcondi_i32(TCG_COND_NE
, t1
, -1, l3
);
961 tcg_gen_brcondi_i32(TCG_COND_EQ
, t0
, INT32_MIN
, l1
);
963 tcg_gen_div_i32(t0
, t0
, t1
);
965 tcg_gen_divu_i32(t0
, t0
, t1
);
968 tcg_gen_movi_tl(cpu_ov
, 0);
973 tcg_gen_sari_i32(t0
, t0
, 31);
975 tcg_gen_movi_i32(t0
, 0);
978 tcg_gen_movi_tl(cpu_ov
, 1);
979 tcg_gen_movi_tl(cpu_so
, 1);
982 tcg_gen_extu_i32_tl(ret
, t0
);
983 tcg_temp_free_i32(t0
);
984 tcg_temp_free_i32(t1
);
985 if (unlikely(Rc(ctx
->opcode
) != 0))
986 gen_set_Rc0(ctx
, ret
);
989 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
990 static void glue(gen_, name)(DisasContext *ctx) \
992 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
993 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
996 /* divwu divwu. divwuo divwuo. */
997 GEN_INT_ARITH_DIVW(divwu
, 0x0E, 0, 0);
998 GEN_INT_ARITH_DIVW(divwuo
, 0x1E, 0, 1);
999 /* divw divw. divwo divwo. */
1000 GEN_INT_ARITH_DIVW(divw
, 0x0F, 1, 0);
1001 GEN_INT_ARITH_DIVW(divwo
, 0x1F, 1, 1);
1003 /* div[wd]eu[o][.] */
1004 #define GEN_DIVE(name, hlpr, compute_ov) \
1005 static void gen_##name(DisasContext *ctx) \
1007 TCGv_i32 t0 = tcg_const_i32(compute_ov); \
1008 gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
1009 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1010 tcg_temp_free_i32(t0); \
1011 if (unlikely(Rc(ctx->opcode) != 0)) { \
1012 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
1016 GEN_DIVE(divweu
, divweu
, 0);
1017 GEN_DIVE(divweuo
, divweu
, 1);
1018 GEN_DIVE(divwe
, divwe
, 0);
1019 GEN_DIVE(divweo
, divwe
, 1);
1021 #if defined(TARGET_PPC64)
1022 static inline void gen_op_arith_divd(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
1023 TCGv arg2
, int sign
, int compute_ov
)
1025 TCGLabel
*l1
= gen_new_label();
1026 TCGLabel
*l2
= gen_new_label();
1028 tcg_gen_brcondi_i64(TCG_COND_EQ
, arg2
, 0, l1
);
1030 TCGLabel
*l3
= gen_new_label();
1031 tcg_gen_brcondi_i64(TCG_COND_NE
, arg2
, -1, l3
);
1032 tcg_gen_brcondi_i64(TCG_COND_EQ
, arg1
, INT64_MIN
, l1
);
1034 tcg_gen_div_i64(ret
, arg1
, arg2
);
1036 tcg_gen_divu_i64(ret
, arg1
, arg2
);
1039 tcg_gen_movi_tl(cpu_ov
, 0);
1044 tcg_gen_sari_i64(ret
, arg1
, 63);
1046 tcg_gen_movi_i64(ret
, 0);
1049 tcg_gen_movi_tl(cpu_ov
, 1);
1050 tcg_gen_movi_tl(cpu_so
, 1);
1053 if (unlikely(Rc(ctx
->opcode
) != 0))
1054 gen_set_Rc0(ctx
, ret
);
1056 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1057 static void glue(gen_, name)(DisasContext *ctx) \
1059 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1060 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1061 sign, compute_ov); \
1063 /* divwu divwu. divwuo divwuo. */
1064 GEN_INT_ARITH_DIVD(divdu
, 0x0E, 0, 0);
1065 GEN_INT_ARITH_DIVD(divduo
, 0x1E, 0, 1);
1066 /* divw divw. divwo divwo. */
1067 GEN_INT_ARITH_DIVD(divd
, 0x0F, 1, 0);
1068 GEN_INT_ARITH_DIVD(divdo
, 0x1F, 1, 1);
1070 GEN_DIVE(divdeu
, divdeu
, 0);
1071 GEN_DIVE(divdeuo
, divdeu
, 1);
1072 GEN_DIVE(divde
, divde
, 0);
1073 GEN_DIVE(divdeo
, divde
, 1);
1077 static void gen_mulhw(DisasContext
*ctx
)
1079 TCGv_i32 t0
= tcg_temp_new_i32();
1080 TCGv_i32 t1
= tcg_temp_new_i32();
1082 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1083 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1084 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
1085 tcg_gen_extu_i32_tl(cpu_gpr
[rD(ctx
->opcode
)], t1
);
1086 tcg_temp_free_i32(t0
);
1087 tcg_temp_free_i32(t1
);
1088 if (unlikely(Rc(ctx
->opcode
) != 0))
1089 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1092 /* mulhwu mulhwu. */
1093 static void gen_mulhwu(DisasContext
*ctx
)
1095 TCGv_i32 t0
= tcg_temp_new_i32();
1096 TCGv_i32 t1
= tcg_temp_new_i32();
1098 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1099 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1100 tcg_gen_mulu2_i32(t0
, t1
, t0
, t1
);
1101 tcg_gen_extu_i32_tl(cpu_gpr
[rD(ctx
->opcode
)], t1
);
1102 tcg_temp_free_i32(t0
);
1103 tcg_temp_free_i32(t1
);
1104 if (unlikely(Rc(ctx
->opcode
) != 0))
1105 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1109 static void gen_mullw(DisasContext
*ctx
)
1111 #if defined(TARGET_PPC64)
1113 t0
= tcg_temp_new_i64();
1114 t1
= tcg_temp_new_i64();
1115 tcg_gen_ext32s_tl(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1116 tcg_gen_ext32s_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1117 tcg_gen_mul_i64(cpu_gpr
[rD(ctx
->opcode
)], t0
, t1
);
1121 tcg_gen_mul_i32(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1122 cpu_gpr
[rB(ctx
->opcode
)]);
1124 if (unlikely(Rc(ctx
->opcode
) != 0))
1125 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1128 /* mullwo mullwo. */
1129 static void gen_mullwo(DisasContext
*ctx
)
1131 TCGv_i32 t0
= tcg_temp_new_i32();
1132 TCGv_i32 t1
= tcg_temp_new_i32();
1134 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1135 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1136 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
1137 #if defined(TARGET_PPC64)
1138 tcg_gen_concat_i32_i64(cpu_gpr
[rD(ctx
->opcode
)], t0
, t1
);
1140 tcg_gen_mov_i32(cpu_gpr
[rD(ctx
->opcode
)], t0
);
1143 tcg_gen_sari_i32(t0
, t0
, 31);
1144 tcg_gen_setcond_i32(TCG_COND_NE
, t0
, t0
, t1
);
1145 tcg_gen_extu_i32_tl(cpu_ov
, t0
);
1146 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
1148 tcg_temp_free_i32(t0
);
1149 tcg_temp_free_i32(t1
);
1150 if (unlikely(Rc(ctx
->opcode
) != 0))
1151 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1155 static void gen_mulli(DisasContext
*ctx
)
1157 tcg_gen_muli_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1161 #if defined(TARGET_PPC64)
1163 static void gen_mulhd(DisasContext
*ctx
)
1165 TCGv lo
= tcg_temp_new();
1166 tcg_gen_muls2_tl(lo
, cpu_gpr
[rD(ctx
->opcode
)],
1167 cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1169 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1170 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1174 /* mulhdu mulhdu. */
1175 static void gen_mulhdu(DisasContext
*ctx
)
1177 TCGv lo
= tcg_temp_new();
1178 tcg_gen_mulu2_tl(lo
, cpu_gpr
[rD(ctx
->opcode
)],
1179 cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1181 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1182 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1187 static void gen_mulld(DisasContext
*ctx
)
1189 tcg_gen_mul_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1190 cpu_gpr
[rB(ctx
->opcode
)]);
1191 if (unlikely(Rc(ctx
->opcode
) != 0))
1192 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1195 /* mulldo mulldo. */
1196 static void gen_mulldo(DisasContext
*ctx
)
1198 TCGv_i64 t0
= tcg_temp_new_i64();
1199 TCGv_i64 t1
= tcg_temp_new_i64();
1201 tcg_gen_muls2_i64(t0
, t1
, cpu_gpr
[rA(ctx
->opcode
)],
1202 cpu_gpr
[rB(ctx
->opcode
)]);
1203 tcg_gen_mov_i64(cpu_gpr
[rD(ctx
->opcode
)], t0
);
1205 tcg_gen_sari_i64(t0
, t0
, 63);
1206 tcg_gen_setcond_i64(TCG_COND_NE
, cpu_ov
, t0
, t1
);
1207 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
1209 tcg_temp_free_i64(t0
);
1210 tcg_temp_free_i64(t1
);
1212 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1213 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1218 /* Common subf function */
1219 static inline void gen_op_arith_subf(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
1220 TCGv arg2
, bool add_ca
, bool compute_ca
,
1221 bool compute_ov
, bool compute_rc0
)
1225 if (compute_ca
|| compute_ov
) {
1226 t0
= tcg_temp_new();
1230 /* dest = ~arg1 + arg2 [+ ca]. */
1231 if (NARROW_MODE(ctx
)) {
1232 /* Caution: a non-obvious corner case of the spec is that we
1233 must produce the *entire* 64-bit addition, but produce the
1234 carry into bit 32. */
1235 TCGv inv1
= tcg_temp_new();
1236 TCGv t1
= tcg_temp_new();
1237 tcg_gen_not_tl(inv1
, arg1
);
1239 tcg_gen_add_tl(t0
, arg2
, cpu_ca
);
1241 tcg_gen_addi_tl(t0
, arg2
, 1);
1243 tcg_gen_xor_tl(t1
, arg2
, inv1
); /* add without carry */
1244 tcg_gen_add_tl(t0
, t0
, inv1
);
1245 tcg_temp_free(inv1
);
1246 tcg_gen_xor_tl(cpu_ca
, t0
, t1
); /* bits changes w/ carry */
1248 tcg_gen_shri_tl(cpu_ca
, cpu_ca
, 32); /* extract bit 32 */
1249 tcg_gen_andi_tl(cpu_ca
, cpu_ca
, 1);
1250 } else if (add_ca
) {
1251 TCGv zero
, inv1
= tcg_temp_new();
1252 tcg_gen_not_tl(inv1
, arg1
);
1253 zero
= tcg_const_tl(0);
1254 tcg_gen_add2_tl(t0
, cpu_ca
, arg2
, zero
, cpu_ca
, zero
);
1255 tcg_gen_add2_tl(t0
, cpu_ca
, t0
, cpu_ca
, inv1
, zero
);
1256 tcg_temp_free(zero
);
1257 tcg_temp_free(inv1
);
1259 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_ca
, arg2
, arg1
);
1260 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1262 } else if (add_ca
) {
1263 /* Since we're ignoring carry-out, we can simplify the
1264 standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */
1265 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1266 tcg_gen_add_tl(t0
, t0
, cpu_ca
);
1267 tcg_gen_subi_tl(t0
, t0
, 1);
1269 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1273 gen_op_arith_compute_ov(ctx
, t0
, arg1
, arg2
, 1);
1275 if (unlikely(compute_rc0
)) {
1276 gen_set_Rc0(ctx
, t0
);
1279 if (!TCGV_EQUAL(t0
, ret
)) {
1280 tcg_gen_mov_tl(ret
, t0
);
1284 /* Sub functions with Two operands functions */
1285 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
1286 static void glue(gen_, name)(DisasContext *ctx) \
1288 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1289 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1290 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1292 /* Sub functions with one operand and one immediate */
1293 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
1294 add_ca, compute_ca, compute_ov) \
1295 static void glue(gen_, name)(DisasContext *ctx) \
1297 TCGv t0 = tcg_const_tl(const_val); \
1298 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1299 cpu_gpr[rA(ctx->opcode)], t0, \
1300 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1301 tcg_temp_free(t0); \
1303 /* subf subf. subfo subfo. */
1304 GEN_INT_ARITH_SUBF(subf
, 0x01, 0, 0, 0)
1305 GEN_INT_ARITH_SUBF(subfo
, 0x11, 0, 0, 1)
1306 /* subfc subfc. subfco subfco. */
1307 GEN_INT_ARITH_SUBF(subfc
, 0x00, 0, 1, 0)
1308 GEN_INT_ARITH_SUBF(subfco
, 0x10, 0, 1, 1)
1309 /* subfe subfe. subfeo subfo. */
1310 GEN_INT_ARITH_SUBF(subfe
, 0x04, 1, 1, 0)
1311 GEN_INT_ARITH_SUBF(subfeo
, 0x14, 1, 1, 1)
1312 /* subfme subfme. subfmeo subfmeo. */
1313 GEN_INT_ARITH_SUBF_CONST(subfme
, 0x07, -1LL, 1, 1, 0)
1314 GEN_INT_ARITH_SUBF_CONST(subfmeo
, 0x17, -1LL, 1, 1, 1)
1315 /* subfze subfze. subfzeo subfzeo.*/
1316 GEN_INT_ARITH_SUBF_CONST(subfze
, 0x06, 0, 1, 1, 0)
1317 GEN_INT_ARITH_SUBF_CONST(subfzeo
, 0x16, 0, 1, 1, 1)
1320 static void gen_subfic(DisasContext
*ctx
)
1322 TCGv c
= tcg_const_tl(SIMM(ctx
->opcode
));
1323 gen_op_arith_subf(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1328 /* neg neg. nego nego. */
1329 static inline void gen_op_arith_neg(DisasContext
*ctx
, bool compute_ov
)
1331 TCGv zero
= tcg_const_tl(0);
1332 gen_op_arith_subf(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1333 zero
, 0, 0, compute_ov
, Rc(ctx
->opcode
));
1334 tcg_temp_free(zero
);
1337 static void gen_neg(DisasContext
*ctx
)
1339 gen_op_arith_neg(ctx
, 0);
1342 static void gen_nego(DisasContext
*ctx
)
1344 gen_op_arith_neg(ctx
, 1);
1347 /*** Integer logical ***/
1348 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
1349 static void glue(gen_, name)(DisasContext *ctx) \
1351 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
1352 cpu_gpr[rB(ctx->opcode)]); \
1353 if (unlikely(Rc(ctx->opcode) != 0)) \
1354 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1357 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
1358 static void glue(gen_, name)(DisasContext *ctx) \
1360 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
1361 if (unlikely(Rc(ctx->opcode) != 0)) \
1362 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1366 GEN_LOGICAL2(and, tcg_gen_and_tl
, 0x00, PPC_INTEGER
);
1368 GEN_LOGICAL2(andc
, tcg_gen_andc_tl
, 0x01, PPC_INTEGER
);
1371 static void gen_andi_(DisasContext
*ctx
)
1373 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], UIMM(ctx
->opcode
));
1374 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1378 static void gen_andis_(DisasContext
*ctx
)
1380 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], UIMM(ctx
->opcode
) << 16);
1381 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1385 static void gen_cntlzw(DisasContext
*ctx
)
1387 gen_helper_cntlzw(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1388 if (unlikely(Rc(ctx
->opcode
) != 0))
1389 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1392 GEN_LOGICAL2(eqv
, tcg_gen_eqv_tl
, 0x08, PPC_INTEGER
);
1393 /* extsb & extsb. */
1394 GEN_LOGICAL1(extsb
, tcg_gen_ext8s_tl
, 0x1D, PPC_INTEGER
);
1395 /* extsh & extsh. */
1396 GEN_LOGICAL1(extsh
, tcg_gen_ext16s_tl
, 0x1C, PPC_INTEGER
);
1398 GEN_LOGICAL2(nand
, tcg_gen_nand_tl
, 0x0E, PPC_INTEGER
);
1400 GEN_LOGICAL2(nor
, tcg_gen_nor_tl
, 0x03, PPC_INTEGER
);
1403 static void gen_or(DisasContext
*ctx
)
1407 rs
= rS(ctx
->opcode
);
1408 ra
= rA(ctx
->opcode
);
1409 rb
= rB(ctx
->opcode
);
1410 /* Optimisation for mr. ri case */
1411 if (rs
!= ra
|| rs
!= rb
) {
1413 tcg_gen_or_tl(cpu_gpr
[ra
], cpu_gpr
[rs
], cpu_gpr
[rb
]);
1415 tcg_gen_mov_tl(cpu_gpr
[ra
], cpu_gpr
[rs
]);
1416 if (unlikely(Rc(ctx
->opcode
) != 0))
1417 gen_set_Rc0(ctx
, cpu_gpr
[ra
]);
1418 } else if (unlikely(Rc(ctx
->opcode
) != 0)) {
1419 gen_set_Rc0(ctx
, cpu_gpr
[rs
]);
1420 #if defined(TARGET_PPC64)
1426 /* Set process priority to low */
1430 /* Set process priority to medium-low */
1434 /* Set process priority to normal */
1437 #if !defined(CONFIG_USER_ONLY)
1440 /* Set process priority to very low */
1446 /* Set process priority to medium-hight */
1452 /* Set process priority to high */
1458 /* Set process priority to very high */
1468 TCGv t0
= tcg_temp_new();
1469 gen_load_spr(t0
, SPR_PPR
);
1470 tcg_gen_andi_tl(t0
, t0
, ~0x001C000000000000ULL
);
1471 tcg_gen_ori_tl(t0
, t0
, ((uint64_t)prio
) << 50);
1472 gen_store_spr(SPR_PPR
, t0
);
1479 GEN_LOGICAL2(orc
, tcg_gen_orc_tl
, 0x0C, PPC_INTEGER
);
1482 static void gen_xor(DisasContext
*ctx
)
1484 /* Optimisation for "set to zero" case */
1485 if (rS(ctx
->opcode
) != rB(ctx
->opcode
))
1486 tcg_gen_xor_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1488 tcg_gen_movi_tl(cpu_gpr
[rA(ctx
->opcode
)], 0);
1489 if (unlikely(Rc(ctx
->opcode
) != 0))
1490 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1494 static void gen_ori(DisasContext
*ctx
)
1496 target_ulong uimm
= UIMM(ctx
->opcode
);
1498 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1500 /* XXX: should handle special NOPs for POWER series */
1503 tcg_gen_ori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
);
1507 static void gen_oris(DisasContext
*ctx
)
1509 target_ulong uimm
= UIMM(ctx
->opcode
);
1511 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1515 tcg_gen_ori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
<< 16);
1519 static void gen_xori(DisasContext
*ctx
)
1521 target_ulong uimm
= UIMM(ctx
->opcode
);
1523 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1527 tcg_gen_xori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
);
1531 static void gen_xoris(DisasContext
*ctx
)
1533 target_ulong uimm
= UIMM(ctx
->opcode
);
1535 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1539 tcg_gen_xori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
<< 16);
1542 /* popcntb : PowerPC 2.03 specification */
1543 static void gen_popcntb(DisasContext
*ctx
)
1545 gen_helper_popcntb(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1548 static void gen_popcntw(DisasContext
*ctx
)
1550 gen_helper_popcntw(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1553 #if defined(TARGET_PPC64)
1554 /* popcntd: PowerPC 2.06 specification */
1555 static void gen_popcntd(DisasContext
*ctx
)
1557 gen_helper_popcntd(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1561 /* prtyw: PowerPC 2.05 specification */
1562 static void gen_prtyw(DisasContext
*ctx
)
1564 TCGv ra
= cpu_gpr
[rA(ctx
->opcode
)];
1565 TCGv rs
= cpu_gpr
[rS(ctx
->opcode
)];
1566 TCGv t0
= tcg_temp_new();
1567 tcg_gen_shri_tl(t0
, rs
, 16);
1568 tcg_gen_xor_tl(ra
, rs
, t0
);
1569 tcg_gen_shri_tl(t0
, ra
, 8);
1570 tcg_gen_xor_tl(ra
, ra
, t0
);
1571 tcg_gen_andi_tl(ra
, ra
, (target_ulong
)0x100000001ULL
);
1575 #if defined(TARGET_PPC64)
1576 /* prtyd: PowerPC 2.05 specification */
1577 static void gen_prtyd(DisasContext
*ctx
)
1579 TCGv ra
= cpu_gpr
[rA(ctx
->opcode
)];
1580 TCGv rs
= cpu_gpr
[rS(ctx
->opcode
)];
1581 TCGv t0
= tcg_temp_new();
1582 tcg_gen_shri_tl(t0
, rs
, 32);
1583 tcg_gen_xor_tl(ra
, rs
, t0
);
1584 tcg_gen_shri_tl(t0
, ra
, 16);
1585 tcg_gen_xor_tl(ra
, ra
, t0
);
1586 tcg_gen_shri_tl(t0
, ra
, 8);
1587 tcg_gen_xor_tl(ra
, ra
, t0
);
1588 tcg_gen_andi_tl(ra
, ra
, 1);
1593 #if defined(TARGET_PPC64)
1595 static void gen_bpermd(DisasContext
*ctx
)
1597 gen_helper_bpermd(cpu_gpr
[rA(ctx
->opcode
)],
1598 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1602 #if defined(TARGET_PPC64)
1603 /* extsw & extsw. */
1604 GEN_LOGICAL1(extsw
, tcg_gen_ext32s_tl
, 0x1E, PPC_64B
);
1607 static void gen_cntlzd(DisasContext
*ctx
)
1609 gen_helper_cntlzd(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1610 if (unlikely(Rc(ctx
->opcode
) != 0))
1611 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1615 /*** Integer rotate ***/
1617 /* rlwimi & rlwimi. */
1618 static void gen_rlwimi(DisasContext
*ctx
)
1620 uint32_t mb
, me
, sh
;
1622 mb
= MB(ctx
->opcode
);
1623 me
= ME(ctx
->opcode
);
1624 sh
= SH(ctx
->opcode
);
1625 if (likely(sh
== (31-me
) && mb
<= me
)) {
1626 tcg_gen_deposit_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1627 cpu_gpr
[rS(ctx
->opcode
)], sh
, me
- mb
+ 1);
1631 TCGv t0
= tcg_temp_new();
1632 #if defined(TARGET_PPC64)
1633 tcg_gen_deposit_i64(t0
, cpu_gpr
[rS(ctx
->opcode
)],
1634 cpu_gpr
[rS(ctx
->opcode
)], 32, 32);
1635 tcg_gen_rotli_i64(t0
, t0
, sh
);
1637 tcg_gen_rotli_i32(t0
, cpu_gpr
[rS(ctx
->opcode
)], sh
);
1639 #if defined(TARGET_PPC64)
1643 mask
= MASK(mb
, me
);
1644 t1
= tcg_temp_new();
1645 tcg_gen_andi_tl(t0
, t0
, mask
);
1646 tcg_gen_andi_tl(t1
, cpu_gpr
[rA(ctx
->opcode
)], ~mask
);
1647 tcg_gen_or_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1651 if (unlikely(Rc(ctx
->opcode
) != 0))
1652 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1655 /* rlwinm & rlwinm. */
1656 static void gen_rlwinm(DisasContext
*ctx
)
1658 uint32_t mb
, me
, sh
;
1660 sh
= SH(ctx
->opcode
);
1661 mb
= MB(ctx
->opcode
);
1662 me
= ME(ctx
->opcode
);
1664 if (likely(mb
== 0 && me
== (31 - sh
))) {
1665 if (likely(sh
== 0)) {
1666 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1668 TCGv t0
= tcg_temp_new();
1669 tcg_gen_ext32u_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)]);
1670 tcg_gen_shli_tl(t0
, t0
, sh
);
1671 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1674 } else if (likely(sh
!= 0 && me
== 31 && sh
== (32 - mb
))) {
1675 TCGv t0
= tcg_temp_new();
1676 tcg_gen_ext32u_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)]);
1677 tcg_gen_shri_tl(t0
, t0
, mb
);
1678 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1680 } else if (likely(mb
== 0 && me
== 31)) {
1681 TCGv_i32 t0
= tcg_temp_new_i32();
1682 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rS(ctx
->opcode
)]);
1683 tcg_gen_rotli_i32(t0
, t0
, sh
);
1684 tcg_gen_extu_i32_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1685 tcg_temp_free_i32(t0
);
1687 TCGv t0
= tcg_temp_new();
1688 #if defined(TARGET_PPC64)
1689 tcg_gen_deposit_i64(t0
, cpu_gpr
[rS(ctx
->opcode
)],
1690 cpu_gpr
[rS(ctx
->opcode
)], 32, 32);
1691 tcg_gen_rotli_i64(t0
, t0
, sh
);
1693 tcg_gen_rotli_i32(t0
, cpu_gpr
[rS(ctx
->opcode
)], sh
);
1695 #if defined(TARGET_PPC64)
1699 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, MASK(mb
, me
));
1702 if (unlikely(Rc(ctx
->opcode
) != 0))
1703 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1706 /* rlwnm & rlwnm. */
1707 static void gen_rlwnm(DisasContext
*ctx
)
1710 mb
= MB(ctx
->opcode
);
1711 me
= ME(ctx
->opcode
);
1713 if (likely(mb
== 0 && me
== 31)) {
1715 t0
= tcg_temp_new_i32();
1716 t1
= tcg_temp_new_i32();
1717 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rB(ctx
->opcode
)]);
1718 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rS(ctx
->opcode
)]);
1719 tcg_gen_andi_i32(t0
, t0
, 0x1f);
1720 tcg_gen_rotl_i32(t1
, t1
, t0
);
1721 tcg_gen_extu_i32_tl(cpu_gpr
[rA(ctx
->opcode
)], t1
);
1722 tcg_temp_free_i32(t0
);
1723 tcg_temp_free_i32(t1
);
1726 #if defined(TARGET_PPC64)
1730 t0
= tcg_temp_new();
1731 tcg_gen_andi_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x1f);
1732 #if defined(TARGET_PPC64)
1733 t1
= tcg_temp_new_i64();
1734 tcg_gen_deposit_i64(t1
, cpu_gpr
[rS(ctx
->opcode
)],
1735 cpu_gpr
[rS(ctx
->opcode
)], 32, 32);
1736 tcg_gen_rotl_i64(t0
, t1
, t0
);
1737 tcg_temp_free_i64(t1
);
1739 tcg_gen_rotl_i32(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1741 if (unlikely(mb
!= 0 || me
!= 31)) {
1742 #if defined(TARGET_PPC64)
1746 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, MASK(mb
, me
));
1748 tcg_gen_andi_tl(t0
, t0
, MASK(32, 63));
1749 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1753 if (unlikely(Rc(ctx
->opcode
) != 0))
1754 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1757 #if defined(TARGET_PPC64)
1758 #define GEN_PPC64_R2(name, opc1, opc2) \
1759 static void glue(gen_, name##0)(DisasContext *ctx) \
1761 gen_##name(ctx, 0); \
1764 static void glue(gen_, name##1)(DisasContext *ctx) \
1766 gen_##name(ctx, 1); \
1768 #define GEN_PPC64_R4(name, opc1, opc2) \
1769 static void glue(gen_, name##0)(DisasContext *ctx) \
1771 gen_##name(ctx, 0, 0); \
1774 static void glue(gen_, name##1)(DisasContext *ctx) \
1776 gen_##name(ctx, 0, 1); \
1779 static void glue(gen_, name##2)(DisasContext *ctx) \
1781 gen_##name(ctx, 1, 0); \
1784 static void glue(gen_, name##3)(DisasContext *ctx) \
1786 gen_##name(ctx, 1, 1); \
1789 static inline void gen_rldinm(DisasContext
*ctx
, uint32_t mb
, uint32_t me
,
1792 if (likely(sh
!= 0 && mb
== 0 && me
== (63 - sh
))) {
1793 tcg_gen_shli_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], sh
);
1794 } else if (likely(sh
!= 0 && me
== 63 && sh
== (64 - mb
))) {
1795 tcg_gen_shri_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], mb
);
1797 TCGv t0
= tcg_temp_new();
1798 tcg_gen_rotli_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], sh
);
1799 if (likely(mb
== 0 && me
== 63)) {
1800 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1802 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, MASK(mb
, me
));
1806 if (unlikely(Rc(ctx
->opcode
) != 0))
1807 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1809 /* rldicl - rldicl. */
1810 static inline void gen_rldicl(DisasContext
*ctx
, int mbn
, int shn
)
1814 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1815 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1816 gen_rldinm(ctx
, mb
, 63, sh
);
1818 GEN_PPC64_R4(rldicl
, 0x1E, 0x00);
1819 /* rldicr - rldicr. */
1820 static inline void gen_rldicr(DisasContext
*ctx
, int men
, int shn
)
1824 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1825 me
= MB(ctx
->opcode
) | (men
<< 5);
1826 gen_rldinm(ctx
, 0, me
, sh
);
1828 GEN_PPC64_R4(rldicr
, 0x1E, 0x02);
1829 /* rldic - rldic. */
1830 static inline void gen_rldic(DisasContext
*ctx
, int mbn
, int shn
)
1834 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1835 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1836 gen_rldinm(ctx
, mb
, 63 - sh
, sh
);
1838 GEN_PPC64_R4(rldic
, 0x1E, 0x04);
1840 static inline void gen_rldnm(DisasContext
*ctx
, uint32_t mb
, uint32_t me
)
1844 t0
= tcg_temp_new();
1845 tcg_gen_andi_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x3f);
1846 tcg_gen_rotl_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1847 if (unlikely(mb
!= 0 || me
!= 63)) {
1848 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, MASK(mb
, me
));
1850 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1853 if (unlikely(Rc(ctx
->opcode
) != 0))
1854 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1857 /* rldcl - rldcl. */
1858 static inline void gen_rldcl(DisasContext
*ctx
, int mbn
)
1862 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1863 gen_rldnm(ctx
, mb
, 63);
1865 GEN_PPC64_R2(rldcl
, 0x1E, 0x08);
1866 /* rldcr - rldcr. */
1867 static inline void gen_rldcr(DisasContext
*ctx
, int men
)
1871 me
= MB(ctx
->opcode
) | (men
<< 5);
1872 gen_rldnm(ctx
, 0, me
);
1874 GEN_PPC64_R2(rldcr
, 0x1E, 0x09);
1875 /* rldimi - rldimi. */
1876 static inline void gen_rldimi(DisasContext
*ctx
, int mbn
, int shn
)
1878 uint32_t sh
, mb
, me
;
1880 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1881 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1883 if (unlikely(sh
== 0 && mb
== 0)) {
1884 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1889 t0
= tcg_temp_new();
1890 tcg_gen_rotli_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], sh
);
1891 t1
= tcg_temp_new();
1892 mask
= MASK(mb
, me
);
1893 tcg_gen_andi_tl(t0
, t0
, mask
);
1894 tcg_gen_andi_tl(t1
, cpu_gpr
[rA(ctx
->opcode
)], ~mask
);
1895 tcg_gen_or_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1899 if (unlikely(Rc(ctx
->opcode
) != 0))
1900 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1902 GEN_PPC64_R4(rldimi
, 0x1E, 0x06);
1905 /*** Integer shift ***/
1908 static void gen_slw(DisasContext
*ctx
)
1912 t0
= tcg_temp_new();
1913 /* AND rS with a mask that is 0 when rB >= 0x20 */
1914 #if defined(TARGET_PPC64)
1915 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x3a);
1916 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1918 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x1a);
1919 tcg_gen_sari_tl(t0
, t0
, 0x1f);
1921 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1922 t1
= tcg_temp_new();
1923 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x1f);
1924 tcg_gen_shl_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1927 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)]);
1928 if (unlikely(Rc(ctx
->opcode
) != 0))
1929 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1933 static void gen_sraw(DisasContext
*ctx
)
1935 gen_helper_sraw(cpu_gpr
[rA(ctx
->opcode
)], cpu_env
,
1936 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1937 if (unlikely(Rc(ctx
->opcode
) != 0))
1938 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1941 /* srawi & srawi. */
1942 static void gen_srawi(DisasContext
*ctx
)
1944 int sh
= SH(ctx
->opcode
);
1945 TCGv dst
= cpu_gpr
[rA(ctx
->opcode
)];
1946 TCGv src
= cpu_gpr
[rS(ctx
->opcode
)];
1948 tcg_gen_ext32s_tl(dst
, src
);
1949 tcg_gen_movi_tl(cpu_ca
, 0);
1952 tcg_gen_ext32s_tl(dst
, src
);
1953 tcg_gen_andi_tl(cpu_ca
, dst
, (1ULL << sh
) - 1);
1954 t0
= tcg_temp_new();
1955 tcg_gen_sari_tl(t0
, dst
, TARGET_LONG_BITS
- 1);
1956 tcg_gen_and_tl(cpu_ca
, cpu_ca
, t0
);
1958 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_ca
, cpu_ca
, 0);
1959 tcg_gen_sari_tl(dst
, dst
, sh
);
1961 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1962 gen_set_Rc0(ctx
, dst
);
1967 static void gen_srw(DisasContext
*ctx
)
1971 t0
= tcg_temp_new();
1972 /* AND rS with a mask that is 0 when rB >= 0x20 */
1973 #if defined(TARGET_PPC64)
1974 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x3a);
1975 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1977 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x1a);
1978 tcg_gen_sari_tl(t0
, t0
, 0x1f);
1980 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1981 tcg_gen_ext32u_tl(t0
, t0
);
1982 t1
= tcg_temp_new();
1983 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x1f);
1984 tcg_gen_shr_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1987 if (unlikely(Rc(ctx
->opcode
) != 0))
1988 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1991 #if defined(TARGET_PPC64)
1993 static void gen_sld(DisasContext
*ctx
)
1997 t0
= tcg_temp_new();
1998 /* AND rS with a mask that is 0 when rB >= 0x40 */
1999 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x39);
2000 tcg_gen_sari_tl(t0
, t0
, 0x3f);
2001 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
2002 t1
= tcg_temp_new();
2003 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x3f);
2004 tcg_gen_shl_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
2007 if (unlikely(Rc(ctx
->opcode
) != 0))
2008 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
2012 static void gen_srad(DisasContext
*ctx
)
2014 gen_helper_srad(cpu_gpr
[rA(ctx
->opcode
)], cpu_env
,
2015 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
2016 if (unlikely(Rc(ctx
->opcode
) != 0))
2017 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
2019 /* sradi & sradi. */
2020 static inline void gen_sradi(DisasContext
*ctx
, int n
)
2022 int sh
= SH(ctx
->opcode
) + (n
<< 5);
2023 TCGv dst
= cpu_gpr
[rA(ctx
->opcode
)];
2024 TCGv src
= cpu_gpr
[rS(ctx
->opcode
)];
2026 tcg_gen_mov_tl(dst
, src
);
2027 tcg_gen_movi_tl(cpu_ca
, 0);
2030 tcg_gen_andi_tl(cpu_ca
, src
, (1ULL << sh
) - 1);
2031 t0
= tcg_temp_new();
2032 tcg_gen_sari_tl(t0
, src
, TARGET_LONG_BITS
- 1);
2033 tcg_gen_and_tl(cpu_ca
, cpu_ca
, t0
);
2035 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_ca
, cpu_ca
, 0);
2036 tcg_gen_sari_tl(dst
, src
, sh
);
2038 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2039 gen_set_Rc0(ctx
, dst
);
2043 static void gen_sradi0(DisasContext
*ctx
)
2048 static void gen_sradi1(DisasContext
*ctx
)
2054 static void gen_srd(DisasContext
*ctx
)
2058 t0
= tcg_temp_new();
2059 /* AND rS with a mask that is 0 when rB >= 0x40 */
2060 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x39);
2061 tcg_gen_sari_tl(t0
, t0
, 0x3f);
2062 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
2063 t1
= tcg_temp_new();
2064 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x3f);
2065 tcg_gen_shr_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
2068 if (unlikely(Rc(ctx
->opcode
) != 0))
2069 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
2073 #if defined(TARGET_PPC64)
2074 static void gen_set_cr1_from_fpscr(DisasContext
*ctx
)
2076 TCGv_i32 tmp
= tcg_temp_new_i32();
2077 tcg_gen_trunc_tl_i32(tmp
, cpu_fpscr
);
2078 tcg_gen_shri_i32(cpu_crf
[1], tmp
, 28);
2079 tcg_temp_free_i32(tmp
);
2082 static void gen_set_cr1_from_fpscr(DisasContext
*ctx
)
2084 tcg_gen_shri_tl(cpu_crf
[1], cpu_fpscr
, 28);
2088 /*** Floating-Point arithmetic ***/
2089 #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
2090 static void gen_f##name(DisasContext *ctx) \
2092 if (unlikely(!ctx->fpu_enabled)) { \
2093 gen_exception(ctx, POWERPC_EXCP_FPU); \
2096 /* NIP cannot be restored if the memory exception comes from an helper */ \
2097 gen_update_nip(ctx, ctx->nip - 4); \
2098 gen_reset_fpstatus(); \
2099 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2100 cpu_fpr[rA(ctx->opcode)], \
2101 cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
2103 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2104 cpu_fpr[rD(ctx->opcode)]); \
2107 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2109 if (unlikely(Rc(ctx->opcode) != 0)) { \
2110 gen_set_cr1_from_fpscr(ctx); \
2114 #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
2115 _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
2116 _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
2118 #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2119 static void gen_f##name(DisasContext *ctx) \
2121 if (unlikely(!ctx->fpu_enabled)) { \
2122 gen_exception(ctx, POWERPC_EXCP_FPU); \
2125 /* NIP cannot be restored if the memory exception comes from an helper */ \
2126 gen_update_nip(ctx, ctx->nip - 4); \
2127 gen_reset_fpstatus(); \
2128 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2129 cpu_fpr[rA(ctx->opcode)], \
2130 cpu_fpr[rB(ctx->opcode)]); \
2132 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2133 cpu_fpr[rD(ctx->opcode)]); \
2136 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2138 if (unlikely(Rc(ctx->opcode) != 0)) { \
2139 gen_set_cr1_from_fpscr(ctx); \
2142 #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
2143 _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2144 _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2146 #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2147 static void gen_f##name(DisasContext *ctx) \
2149 if (unlikely(!ctx->fpu_enabled)) { \
2150 gen_exception(ctx, POWERPC_EXCP_FPU); \
2153 /* NIP cannot be restored if the memory exception comes from an helper */ \
2154 gen_update_nip(ctx, ctx->nip - 4); \
2155 gen_reset_fpstatus(); \
2156 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2157 cpu_fpr[rA(ctx->opcode)], \
2158 cpu_fpr[rC(ctx->opcode)]); \
2160 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2161 cpu_fpr[rD(ctx->opcode)]); \
2164 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2166 if (unlikely(Rc(ctx->opcode) != 0)) { \
2167 gen_set_cr1_from_fpscr(ctx); \
2170 #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
2171 _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2172 _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2174 #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
2175 static void gen_f##name(DisasContext *ctx) \
2177 if (unlikely(!ctx->fpu_enabled)) { \
2178 gen_exception(ctx, POWERPC_EXCP_FPU); \
2181 /* NIP cannot be restored if the memory exception comes from an helper */ \
2182 gen_update_nip(ctx, ctx->nip - 4); \
2183 gen_reset_fpstatus(); \
2184 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2185 cpu_fpr[rB(ctx->opcode)]); \
2187 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2189 if (unlikely(Rc(ctx->opcode) != 0)) { \
2190 gen_set_cr1_from_fpscr(ctx); \
2194 #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
2195 static void gen_f##name(DisasContext *ctx) \
2197 if (unlikely(!ctx->fpu_enabled)) { \
2198 gen_exception(ctx, POWERPC_EXCP_FPU); \
2201 /* NIP cannot be restored if the memory exception comes from an helper */ \
2202 gen_update_nip(ctx, ctx->nip - 4); \
2203 gen_reset_fpstatus(); \
2204 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2205 cpu_fpr[rB(ctx->opcode)]); \
2207 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2209 if (unlikely(Rc(ctx->opcode) != 0)) { \
2210 gen_set_cr1_from_fpscr(ctx); \
2215 GEN_FLOAT_AB(add
, 0x15, 0x000007C0, 1, PPC_FLOAT
);
2217 GEN_FLOAT_AB(div
, 0x12, 0x000007C0, 1, PPC_FLOAT
);
2219 GEN_FLOAT_AC(mul
, 0x19, 0x0000F800, 1, PPC_FLOAT
);
2222 GEN_FLOAT_BS(re
, 0x3F, 0x18, 1, PPC_FLOAT_EXT
);
2225 GEN_FLOAT_BS(res
, 0x3B, 0x18, 1, PPC_FLOAT_FRES
);
2228 GEN_FLOAT_BS(rsqrte
, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE
);
2231 static void gen_frsqrtes(DisasContext
*ctx
)
2233 if (unlikely(!ctx
->fpu_enabled
)) {
2234 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2237 /* NIP cannot be restored if the memory exception comes from an helper */
2238 gen_update_nip(ctx
, ctx
->nip
- 4);
2239 gen_reset_fpstatus();
2240 gen_helper_frsqrte(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2241 cpu_fpr
[rB(ctx
->opcode
)]);
2242 gen_helper_frsp(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2243 cpu_fpr
[rD(ctx
->opcode
)]);
2244 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)]);
2245 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2246 gen_set_cr1_from_fpscr(ctx
);
2251 _GEN_FLOAT_ACB(sel
, sel
, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL
);
2253 GEN_FLOAT_AB(sub
, 0x14, 0x000007C0, 1, PPC_FLOAT
);
2257 static void gen_fsqrt(DisasContext
*ctx
)
2259 if (unlikely(!ctx
->fpu_enabled
)) {
2260 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2263 /* NIP cannot be restored if the memory exception comes from an helper */
2264 gen_update_nip(ctx
, ctx
->nip
- 4);
2265 gen_reset_fpstatus();
2266 gen_helper_fsqrt(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2267 cpu_fpr
[rB(ctx
->opcode
)]);
2268 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)]);
2269 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2270 gen_set_cr1_from_fpscr(ctx
);
2274 static void gen_fsqrts(DisasContext
*ctx
)
2276 if (unlikely(!ctx
->fpu_enabled
)) {
2277 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2280 /* NIP cannot be restored if the memory exception comes from an helper */
2281 gen_update_nip(ctx
, ctx
->nip
- 4);
2282 gen_reset_fpstatus();
2283 gen_helper_fsqrt(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2284 cpu_fpr
[rB(ctx
->opcode
)]);
2285 gen_helper_frsp(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2286 cpu_fpr
[rD(ctx
->opcode
)]);
2287 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)]);
2288 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2289 gen_set_cr1_from_fpscr(ctx
);
2293 /*** Floating-Point multiply-and-add ***/
2294 /* fmadd - fmadds */
2295 GEN_FLOAT_ACB(madd
, 0x1D, 1, PPC_FLOAT
);
2296 /* fmsub - fmsubs */
2297 GEN_FLOAT_ACB(msub
, 0x1C, 1, PPC_FLOAT
);
2298 /* fnmadd - fnmadds */
2299 GEN_FLOAT_ACB(nmadd
, 0x1F, 1, PPC_FLOAT
);
2300 /* fnmsub - fnmsubs */
2301 GEN_FLOAT_ACB(nmsub
, 0x1E, 1, PPC_FLOAT
);
2303 /*** Floating-Point round & convert ***/
2305 GEN_FLOAT_B(ctiw
, 0x0E, 0x00, 0, PPC_FLOAT
);
2307 GEN_FLOAT_B(ctiwu
, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206
);
2309 GEN_FLOAT_B(ctiwz
, 0x0F, 0x00, 0, PPC_FLOAT
);
2311 GEN_FLOAT_B(ctiwuz
, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206
);
2313 GEN_FLOAT_B(rsp
, 0x0C, 0x00, 1, PPC_FLOAT
);
2315 GEN_FLOAT_B(cfid
, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64
);
2317 GEN_FLOAT_B(cfids
, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206
);
2319 GEN_FLOAT_B(cfidu
, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206
);
2321 GEN_FLOAT_B(cfidus
, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206
);
2323 GEN_FLOAT_B(ctid
, 0x0E, 0x19, 0, PPC2_FP_CVT_S64
);
2325 GEN_FLOAT_B(ctidu
, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206
);
2327 GEN_FLOAT_B(ctidz
, 0x0F, 0x19, 0, PPC2_FP_CVT_S64
);
2329 GEN_FLOAT_B(ctiduz
, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206
);
2332 GEN_FLOAT_B(rin
, 0x08, 0x0C, 1, PPC_FLOAT_EXT
);
2334 GEN_FLOAT_B(riz
, 0x08, 0x0D, 1, PPC_FLOAT_EXT
);
2336 GEN_FLOAT_B(rip
, 0x08, 0x0E, 1, PPC_FLOAT_EXT
);
2338 GEN_FLOAT_B(rim
, 0x08, 0x0F, 1, PPC_FLOAT_EXT
);
2340 static void gen_ftdiv(DisasContext
*ctx
)
2342 if (unlikely(!ctx
->fpu_enabled
)) {
2343 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2346 gen_helper_ftdiv(cpu_crf
[crfD(ctx
->opcode
)], cpu_fpr
[rA(ctx
->opcode
)],
2347 cpu_fpr
[rB(ctx
->opcode
)]);
2350 static void gen_ftsqrt(DisasContext
*ctx
)
2352 if (unlikely(!ctx
->fpu_enabled
)) {
2353 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2356 gen_helper_ftsqrt(cpu_crf
[crfD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)]);
2361 /*** Floating-Point compare ***/
2364 static void gen_fcmpo(DisasContext
*ctx
)
2367 if (unlikely(!ctx
->fpu_enabled
)) {
2368 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2371 /* NIP cannot be restored if the memory exception comes from an helper */
2372 gen_update_nip(ctx
, ctx
->nip
- 4);
2373 gen_reset_fpstatus();
2374 crf
= tcg_const_i32(crfD(ctx
->opcode
));
2375 gen_helper_fcmpo(cpu_env
, cpu_fpr
[rA(ctx
->opcode
)],
2376 cpu_fpr
[rB(ctx
->opcode
)], crf
);
2377 tcg_temp_free_i32(crf
);
2378 gen_helper_float_check_status(cpu_env
);
2382 static void gen_fcmpu(DisasContext
*ctx
)
2385 if (unlikely(!ctx
->fpu_enabled
)) {
2386 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2389 /* NIP cannot be restored if the memory exception comes from an helper */
2390 gen_update_nip(ctx
, ctx
->nip
- 4);
2391 gen_reset_fpstatus();
2392 crf
= tcg_const_i32(crfD(ctx
->opcode
));
2393 gen_helper_fcmpu(cpu_env
, cpu_fpr
[rA(ctx
->opcode
)],
2394 cpu_fpr
[rB(ctx
->opcode
)], crf
);
2395 tcg_temp_free_i32(crf
);
2396 gen_helper_float_check_status(cpu_env
);
2399 /*** Floating-point move ***/
2401 /* XXX: beware that fabs never checks for NaNs nor update FPSCR */
2402 static void gen_fabs(DisasContext
*ctx
)
2404 if (unlikely(!ctx
->fpu_enabled
)) {
2405 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2408 tcg_gen_andi_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)],
2410 if (unlikely(Rc(ctx
->opcode
))) {
2411 gen_set_cr1_from_fpscr(ctx
);
2416 /* XXX: beware that fmr never checks for NaNs nor update FPSCR */
2417 static void gen_fmr(DisasContext
*ctx
)
2419 if (unlikely(!ctx
->fpu_enabled
)) {
2420 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2423 tcg_gen_mov_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)]);
2424 if (unlikely(Rc(ctx
->opcode
))) {
2425 gen_set_cr1_from_fpscr(ctx
);
2430 /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
2431 static void gen_fnabs(DisasContext
*ctx
)
2433 if (unlikely(!ctx
->fpu_enabled
)) {
2434 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2437 tcg_gen_ori_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)],
2439 if (unlikely(Rc(ctx
->opcode
))) {
2440 gen_set_cr1_from_fpscr(ctx
);
2445 /* XXX: beware that fneg never checks for NaNs nor update FPSCR */
2446 static void gen_fneg(DisasContext
*ctx
)
2448 if (unlikely(!ctx
->fpu_enabled
)) {
2449 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2452 tcg_gen_xori_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)],
2454 if (unlikely(Rc(ctx
->opcode
))) {
2455 gen_set_cr1_from_fpscr(ctx
);
2459 /* fcpsgn: PowerPC 2.05 specification */
2460 /* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */
2461 static void gen_fcpsgn(DisasContext
*ctx
)
2463 if (unlikely(!ctx
->fpu_enabled
)) {
2464 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2467 tcg_gen_deposit_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rA(ctx
->opcode
)],
2468 cpu_fpr
[rB(ctx
->opcode
)], 0, 63);
2469 if (unlikely(Rc(ctx
->opcode
))) {
2470 gen_set_cr1_from_fpscr(ctx
);
2474 static void gen_fmrgew(DisasContext
*ctx
)
2477 if (unlikely(!ctx
->fpu_enabled
)) {
2478 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2481 b0
= tcg_temp_new_i64();
2482 tcg_gen_shri_i64(b0
, cpu_fpr
[rB(ctx
->opcode
)], 32);
2483 tcg_gen_deposit_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rA(ctx
->opcode
)],
2485 tcg_temp_free_i64(b0
);
2488 static void gen_fmrgow(DisasContext
*ctx
)
2490 if (unlikely(!ctx
->fpu_enabled
)) {
2491 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2494 tcg_gen_deposit_i64(cpu_fpr
[rD(ctx
->opcode
)],
2495 cpu_fpr
[rB(ctx
->opcode
)],
2496 cpu_fpr
[rA(ctx
->opcode
)],
2500 /*** Floating-Point status & ctrl register ***/
2503 static void gen_mcrfs(DisasContext
*ctx
)
2505 TCGv tmp
= tcg_temp_new();
2507 TCGv_i64 tnew_fpscr
= tcg_temp_new_i64();
2512 if (unlikely(!ctx
->fpu_enabled
)) {
2513 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2516 bfa
= crfS(ctx
->opcode
);
2519 tcg_gen_shri_tl(tmp
, cpu_fpscr
, shift
);
2520 tcg_gen_trunc_tl_i32(cpu_crf
[crfD(ctx
->opcode
)], tmp
);
2521 tcg_gen_andi_i32(cpu_crf
[crfD(ctx
->opcode
)], cpu_crf
[crfD(ctx
->opcode
)], 0xf);
2523 tcg_gen_extu_tl_i64(tnew_fpscr
, cpu_fpscr
);
2524 /* Only the exception bits (including FX) should be cleared if read */
2525 tcg_gen_andi_i64(tnew_fpscr
, tnew_fpscr
, ~((0xF << shift
) & FP_EX_CLEAR_BITS
));
2526 /* FEX and VX need to be updated, so don't set fpscr directly */
2527 tmask
= tcg_const_i32(1 << nibble
);
2528 gen_helper_store_fpscr(cpu_env
, tnew_fpscr
, tmask
);
2529 tcg_temp_free_i32(tmask
);
2530 tcg_temp_free_i64(tnew_fpscr
);
2534 static void gen_mffs(DisasContext
*ctx
)
2536 if (unlikely(!ctx
->fpu_enabled
)) {
2537 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2540 gen_reset_fpstatus();
2541 tcg_gen_extu_tl_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpscr
);
2542 if (unlikely(Rc(ctx
->opcode
))) {
2543 gen_set_cr1_from_fpscr(ctx
);
2548 static void gen_mtfsb0(DisasContext
*ctx
)
2552 if (unlikely(!ctx
->fpu_enabled
)) {
2553 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2556 crb
= 31 - crbD(ctx
->opcode
);
2557 gen_reset_fpstatus();
2558 if (likely(crb
!= FPSCR_FEX
&& crb
!= FPSCR_VX
)) {
2560 /* NIP cannot be restored if the memory exception comes from an helper */
2561 gen_update_nip(ctx
, ctx
->nip
- 4);
2562 t0
= tcg_const_i32(crb
);
2563 gen_helper_fpscr_clrbit(cpu_env
, t0
);
2564 tcg_temp_free_i32(t0
);
2566 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2567 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2568 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2573 static void gen_mtfsb1(DisasContext
*ctx
)
2577 if (unlikely(!ctx
->fpu_enabled
)) {
2578 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2581 crb
= 31 - crbD(ctx
->opcode
);
2582 gen_reset_fpstatus();
2583 /* XXX: we pretend we can only do IEEE floating-point computations */
2584 if (likely(crb
!= FPSCR_FEX
&& crb
!= FPSCR_VX
&& crb
!= FPSCR_NI
)) {
2586 /* NIP cannot be restored if the memory exception comes from an helper */
2587 gen_update_nip(ctx
, ctx
->nip
- 4);
2588 t0
= tcg_const_i32(crb
);
2589 gen_helper_fpscr_setbit(cpu_env
, t0
);
2590 tcg_temp_free_i32(t0
);
2592 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2593 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2594 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2596 /* We can raise a differed exception */
2597 gen_helper_float_check_status(cpu_env
);
2601 static void gen_mtfsf(DisasContext
*ctx
)
2606 if (unlikely(!ctx
->fpu_enabled
)) {
2607 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2610 flm
= FPFLM(ctx
->opcode
);
2611 l
= FPL(ctx
->opcode
);
2612 w
= FPW(ctx
->opcode
);
2613 if (unlikely(w
& !(ctx
->insns_flags2
& PPC2_ISA205
))) {
2614 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2617 /* NIP cannot be restored if the memory exception comes from an helper */
2618 gen_update_nip(ctx
, ctx
->nip
- 4);
2619 gen_reset_fpstatus();
2621 t0
= tcg_const_i32((ctx
->insns_flags2
& PPC2_ISA205
) ?
0xffff : 0xff);
2623 t0
= tcg_const_i32(flm
<< (w
* 8));
2625 gen_helper_store_fpscr(cpu_env
, cpu_fpr
[rB(ctx
->opcode
)], t0
);
2626 tcg_temp_free_i32(t0
);
2627 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2628 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2629 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2631 /* We can raise a differed exception */
2632 gen_helper_float_check_status(cpu_env
);
2636 static void gen_mtfsfi(DisasContext
*ctx
)
2642 if (unlikely(!ctx
->fpu_enabled
)) {
2643 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2646 w
= FPW(ctx
->opcode
);
2647 bf
= FPBF(ctx
->opcode
);
2648 if (unlikely(w
& !(ctx
->insns_flags2
& PPC2_ISA205
))) {
2649 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2652 sh
= (8 * w
) + 7 - bf
;
2653 /* NIP cannot be restored if the memory exception comes from an helper */
2654 gen_update_nip(ctx
, ctx
->nip
- 4);
2655 gen_reset_fpstatus();
2656 t0
= tcg_const_i64(((uint64_t)FPIMM(ctx
->opcode
)) << (4 * sh
));
2657 t1
= tcg_const_i32(1 << sh
);
2658 gen_helper_store_fpscr(cpu_env
, t0
, t1
);
2659 tcg_temp_free_i64(t0
);
2660 tcg_temp_free_i32(t1
);
2661 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2662 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2663 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2665 /* We can raise a differed exception */
2666 gen_helper_float_check_status(cpu_env
);
2669 /*** Addressing modes ***/
2670 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2671 static inline void gen_addr_imm_index(DisasContext
*ctx
, TCGv EA
,
2674 target_long simm
= SIMM(ctx
->opcode
);
2677 if (rA(ctx
->opcode
) == 0) {
2678 if (NARROW_MODE(ctx
)) {
2679 simm
= (uint32_t)simm
;
2681 tcg_gen_movi_tl(EA
, simm
);
2682 } else if (likely(simm
!= 0)) {
2683 tcg_gen_addi_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)], simm
);
2684 if (NARROW_MODE(ctx
)) {
2685 tcg_gen_ext32u_tl(EA
, EA
);
2688 if (NARROW_MODE(ctx
)) {
2689 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2691 tcg_gen_mov_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2696 static inline void gen_addr_reg_index(DisasContext
*ctx
, TCGv EA
)
2698 if (rA(ctx
->opcode
) == 0) {
2699 if (NARROW_MODE(ctx
)) {
2700 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rB(ctx
->opcode
)]);
2702 tcg_gen_mov_tl(EA
, cpu_gpr
[rB(ctx
->opcode
)]);
2705 tcg_gen_add_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
2706 if (NARROW_MODE(ctx
)) {
2707 tcg_gen_ext32u_tl(EA
, EA
);
2712 static inline void gen_addr_register(DisasContext
*ctx
, TCGv EA
)
2714 if (rA(ctx
->opcode
) == 0) {
2715 tcg_gen_movi_tl(EA
, 0);
2716 } else if (NARROW_MODE(ctx
)) {
2717 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2719 tcg_gen_mov_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2723 static inline void gen_addr_add(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
2726 tcg_gen_addi_tl(ret
, arg1
, val
);
2727 if (NARROW_MODE(ctx
)) {
2728 tcg_gen_ext32u_tl(ret
, ret
);
2732 static inline void gen_check_align(DisasContext
*ctx
, TCGv EA
, int mask
)
2734 TCGLabel
*l1
= gen_new_label();
2735 TCGv t0
= tcg_temp_new();
2737 /* NIP cannot be restored if the memory exception comes from an helper */
2738 gen_update_nip(ctx
, ctx
->nip
- 4);
2739 tcg_gen_andi_tl(t0
, EA
, mask
);
2740 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, l1
);
2741 t1
= tcg_const_i32(POWERPC_EXCP_ALIGN
);
2742 t2
= tcg_const_i32(0);
2743 gen_helper_raise_exception_err(cpu_env
, t1
, t2
);
2744 tcg_temp_free_i32(t1
);
2745 tcg_temp_free_i32(t2
);
2750 /*** Integer load ***/
2751 static inline void gen_qemu_ld8u(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2753 tcg_gen_qemu_ld8u(arg1
, arg2
, ctx
->mem_idx
);
2756 static inline void gen_qemu_ld16u(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2758 TCGMemOp op
= MO_UW
| ctx
->default_tcg_memop_mask
;
2759 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2762 static inline void gen_qemu_ld16s(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2764 TCGMemOp op
= MO_SW
| ctx
->default_tcg_memop_mask
;
2765 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2768 static inline void gen_qemu_ld32u(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2770 TCGMemOp op
= MO_UL
| ctx
->default_tcg_memop_mask
;
2771 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2774 static void gen_qemu_ld32u_i64(DisasContext
*ctx
, TCGv_i64 val
, TCGv addr
)
2776 TCGv tmp
= tcg_temp_new();
2777 gen_qemu_ld32u(ctx
, tmp
, addr
);
2778 tcg_gen_extu_tl_i64(val
, tmp
);
2782 static inline void gen_qemu_ld32s(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2784 TCGMemOp op
= MO_SL
| ctx
->default_tcg_memop_mask
;
2785 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2788 static void gen_qemu_ld32s_i64(DisasContext
*ctx
, TCGv_i64 val
, TCGv addr
)
2790 TCGv tmp
= tcg_temp_new();
2791 gen_qemu_ld32s(ctx
, tmp
, addr
);
2792 tcg_gen_ext_tl_i64(val
, tmp
);
2796 static inline void gen_qemu_ld64(DisasContext
*ctx
, TCGv_i64 arg1
, TCGv arg2
)
2798 TCGMemOp op
= MO_Q
| ctx
->default_tcg_memop_mask
;
2799 tcg_gen_qemu_ld_i64(arg1
, arg2
, ctx
->mem_idx
, op
);
2802 static inline void gen_qemu_st8(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2804 tcg_gen_qemu_st8(arg1
, arg2
, ctx
->mem_idx
);
2807 static inline void gen_qemu_st16(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2809 TCGMemOp op
= MO_UW
| ctx
->default_tcg_memop_mask
;
2810 tcg_gen_qemu_st_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2813 static inline void gen_qemu_st32(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2815 TCGMemOp op
= MO_UL
| ctx
->default_tcg_memop_mask
;
2816 tcg_gen_qemu_st_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2819 static void gen_qemu_st32_i64(DisasContext
*ctx
, TCGv_i64 val
, TCGv addr
)
2821 TCGv tmp
= tcg_temp_new();
2822 tcg_gen_trunc_i64_tl(tmp
, val
);
2823 gen_qemu_st32(ctx
, tmp
, addr
);
2827 static inline void gen_qemu_st64(DisasContext
*ctx
, TCGv_i64 arg1
, TCGv arg2
)
2829 TCGMemOp op
= MO_Q
| ctx
->default_tcg_memop_mask
;
2830 tcg_gen_qemu_st_i64(arg1
, arg2
, ctx
->mem_idx
, op
);
2833 #define GEN_LD(name, ldop, opc, type) \
2834 static void glue(gen_, name)(DisasContext *ctx) \
2837 gen_set_access_type(ctx, ACCESS_INT); \
2838 EA = tcg_temp_new(); \
2839 gen_addr_imm_index(ctx, EA, 0); \
2840 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2841 tcg_temp_free(EA); \
2844 #define GEN_LDU(name, ldop, opc, type) \
2845 static void glue(gen_, name##u)(DisasContext *ctx) \
2848 if (unlikely(rA(ctx->opcode) == 0 || \
2849 rA(ctx->opcode) == rD(ctx->opcode))) { \
2850 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2853 gen_set_access_type(ctx, ACCESS_INT); \
2854 EA = tcg_temp_new(); \
2855 if (type == PPC_64B) \
2856 gen_addr_imm_index(ctx, EA, 0x03); \
2858 gen_addr_imm_index(ctx, EA, 0); \
2859 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2860 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2861 tcg_temp_free(EA); \
2864 #define GEN_LDUX(name, ldop, opc2, opc3, type) \
2865 static void glue(gen_, name##ux)(DisasContext *ctx) \
2868 if (unlikely(rA(ctx->opcode) == 0 || \
2869 rA(ctx->opcode) == rD(ctx->opcode))) { \
2870 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2873 gen_set_access_type(ctx, ACCESS_INT); \
2874 EA = tcg_temp_new(); \
2875 gen_addr_reg_index(ctx, EA); \
2876 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2877 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2878 tcg_temp_free(EA); \
2881 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2) \
2882 static void glue(gen_, name##x)(DisasContext *ctx) \
2885 gen_set_access_type(ctx, ACCESS_INT); \
2886 EA = tcg_temp_new(); \
2887 gen_addr_reg_index(ctx, EA); \
2888 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2889 tcg_temp_free(EA); \
2891 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2892 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE)
2894 #define GEN_LDS(name, ldop, op, type) \
2895 GEN_LD(name, ldop, op | 0x20, type); \
2896 GEN_LDU(name, ldop, op | 0x21, type); \
2897 GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
2898 GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2900 /* lbz lbzu lbzux lbzx */
2901 GEN_LDS(lbz
, ld8u
, 0x02, PPC_INTEGER
);
2902 /* lha lhau lhaux lhax */
2903 GEN_LDS(lha
, ld16s
, 0x0A, PPC_INTEGER
);
2904 /* lhz lhzu lhzux lhzx */
2905 GEN_LDS(lhz
, ld16u
, 0x08, PPC_INTEGER
);
2906 /* lwz lwzu lwzux lwzx */
2907 GEN_LDS(lwz
, ld32u
, 0x00, PPC_INTEGER
);
2908 #if defined(TARGET_PPC64)
2910 GEN_LDUX(lwa
, ld32s
, 0x15, 0x0B, PPC_64B
);
2912 GEN_LDX(lwa
, ld32s
, 0x15, 0x0A, PPC_64B
);
2914 GEN_LDUX(ld
, ld64
, 0x15, 0x01, PPC_64B
);
2916 GEN_LDX(ld
, ld64
, 0x15, 0x00, PPC_64B
);
2918 static void gen_ld(DisasContext
*ctx
)
2921 if (Rc(ctx
->opcode
)) {
2922 if (unlikely(rA(ctx
->opcode
) == 0 ||
2923 rA(ctx
->opcode
) == rD(ctx
->opcode
))) {
2924 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2928 gen_set_access_type(ctx
, ACCESS_INT
);
2929 EA
= tcg_temp_new();
2930 gen_addr_imm_index(ctx
, EA
, 0x03);
2931 if (ctx
->opcode
& 0x02) {
2932 /* lwa (lwau is undefined) */
2933 gen_qemu_ld32s(ctx
, cpu_gpr
[rD(ctx
->opcode
)], EA
);
2936 gen_qemu_ld64(ctx
, cpu_gpr
[rD(ctx
->opcode
)], EA
);
2938 if (Rc(ctx
->opcode
))
2939 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], EA
);
2944 static void gen_lq(DisasContext
*ctx
)
2949 /* lq is a legal user mode instruction starting in ISA 2.07 */
2950 bool legal_in_user_mode
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
2951 bool le_is_supported
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
2953 if (!legal_in_user_mode
&& ctx
->pr
) {
2954 gen_inval_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
2958 if (!le_is_supported
&& ctx
->le_mode
) {
2959 gen_exception_err(ctx
, POWERPC_EXCP_ALIGN
, POWERPC_EXCP_ALIGN_LE
);
2963 ra
= rA(ctx
->opcode
);
2964 rd
= rD(ctx
->opcode
);
2965 if (unlikely((rd
& 1) || rd
== ra
)) {
2966 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2970 gen_set_access_type(ctx
, ACCESS_INT
);
2971 EA
= tcg_temp_new();
2972 gen_addr_imm_index(ctx
, EA
, 0x0F);
2974 /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
2975 64-bit byteswap already. */
2976 if (unlikely(ctx
->le_mode
)) {
2977 gen_qemu_ld64(ctx
, cpu_gpr
[rd
+1], EA
);
2978 gen_addr_add(ctx
, EA
, EA
, 8);
2979 gen_qemu_ld64(ctx
, cpu_gpr
[rd
], EA
);
2981 gen_qemu_ld64(ctx
, cpu_gpr
[rd
], EA
);
2982 gen_addr_add(ctx
, EA
, EA
, 8);
2983 gen_qemu_ld64(ctx
, cpu_gpr
[rd
+1], EA
);
2989 /*** Integer store ***/
2990 #define GEN_ST(name, stop, opc, type) \
2991 static void glue(gen_, name)(DisasContext *ctx) \
2994 gen_set_access_type(ctx, ACCESS_INT); \
2995 EA = tcg_temp_new(); \
2996 gen_addr_imm_index(ctx, EA, 0); \
2997 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2998 tcg_temp_free(EA); \
3001 #define GEN_STU(name, stop, opc, type) \
3002 static void glue(gen_, stop##u)(DisasContext *ctx) \
3005 if (unlikely(rA(ctx->opcode) == 0)) { \
3006 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3009 gen_set_access_type(ctx, ACCESS_INT); \
3010 EA = tcg_temp_new(); \
3011 if (type == PPC_64B) \
3012 gen_addr_imm_index(ctx, EA, 0x03); \
3014 gen_addr_imm_index(ctx, EA, 0); \
3015 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3016 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3017 tcg_temp_free(EA); \
3020 #define GEN_STUX(name, stop, opc2, opc3, type) \
3021 static void glue(gen_, name##ux)(DisasContext *ctx) \
3024 if (unlikely(rA(ctx->opcode) == 0)) { \
3025 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3028 gen_set_access_type(ctx, ACCESS_INT); \
3029 EA = tcg_temp_new(); \
3030 gen_addr_reg_index(ctx, EA); \
3031 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3032 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3033 tcg_temp_free(EA); \
3036 #define GEN_STX_E(name, stop, opc2, opc3, type, type2) \
3037 static void glue(gen_, name##x)(DisasContext *ctx) \
3040 gen_set_access_type(ctx, ACCESS_INT); \
3041 EA = tcg_temp_new(); \
3042 gen_addr_reg_index(ctx, EA); \
3043 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3044 tcg_temp_free(EA); \
3046 #define GEN_STX(name, stop, opc2, opc3, type) \
3047 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE)
3049 #define GEN_STS(name, stop, op, type) \
3050 GEN_ST(name, stop, op | 0x20, type); \
3051 GEN_STU(name, stop, op | 0x21, type); \
3052 GEN_STUX(name, stop, 0x17, op | 0x01, type); \
3053 GEN_STX(name, stop, 0x17, op | 0x00, type)
3055 /* stb stbu stbux stbx */
3056 GEN_STS(stb
, st8
, 0x06, PPC_INTEGER
);
3057 /* sth sthu sthux sthx */
3058 GEN_STS(sth
, st16
, 0x0C, PPC_INTEGER
);
3059 /* stw stwu stwux stwx */
3060 GEN_STS(stw
, st32
, 0x04, PPC_INTEGER
);
3061 #if defined(TARGET_PPC64)
3062 GEN_STUX(std
, st64
, 0x15, 0x05, PPC_64B
);
3063 GEN_STX(std
, st64
, 0x15, 0x04, PPC_64B
);
3065 static void gen_std(DisasContext
*ctx
)
3070 rs
= rS(ctx
->opcode
);
3071 if ((ctx
->opcode
& 0x3) == 0x2) { /* stq */
3073 bool legal_in_user_mode
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
3074 bool le_is_supported
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
3076 if (!legal_in_user_mode
&& ctx
->pr
) {
3077 gen_inval_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
3081 if (!le_is_supported
&& ctx
->le_mode
) {
3082 gen_exception_err(ctx
, POWERPC_EXCP_ALIGN
, POWERPC_EXCP_ALIGN_LE
);
3086 if (unlikely(rs
& 1)) {
3087 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
3090 gen_set_access_type(ctx
, ACCESS_INT
);
3091 EA
= tcg_temp_new();
3092 gen_addr_imm_index(ctx
, EA
, 0x03);
3094 /* We only need to swap high and low halves. gen_qemu_st64 does
3095 necessary 64-bit byteswap already. */
3096 if (unlikely(ctx
->le_mode
)) {
3097 gen_qemu_st64(ctx
, cpu_gpr
[rs
+1], EA
);
3098 gen_addr_add(ctx
, EA
, EA
, 8);
3099 gen_qemu_st64(ctx
, cpu_gpr
[rs
], EA
);
3101 gen_qemu_st64(ctx
, cpu_gpr
[rs
], EA
);
3102 gen_addr_add(ctx
, EA
, EA
, 8);
3103 gen_qemu_st64(ctx
, cpu_gpr
[rs
+1], EA
);
3108 if (Rc(ctx
->opcode
)) {
3109 if (unlikely(rA(ctx
->opcode
) == 0)) {
3110 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
3114 gen_set_access_type(ctx
, ACCESS_INT
);
3115 EA
= tcg_temp_new();
3116 gen_addr_imm_index(ctx
, EA
, 0x03);