2 * PowerPC emulation for qemu: main translation routines.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "qemu/host-utils.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
36 #define CPU_SINGLE_STEP 0x1
37 #define CPU_BRANCH_STEP 0x2
38 #define GDBSTUB_SINGLE_STEP 0x4
40 /* Include definitions for instructions classes and implementations flags */
41 //#define PPC_DEBUG_DISAS
42 //#define DO_PPC_STATISTICS
44 #ifdef PPC_DEBUG_DISAS
45 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
47 # define LOG_DISAS(...) do { } while (0)
49 /*****************************************************************************/
50 /* Code translation helpers */
52 /* global register indexes */
53 static TCGv_env cpu_env
;
54 static char cpu_reg_names
[10*3 + 22*4 /* GPR */
55 + 10*4 + 22*5 /* SPE GPRh */
56 + 10*4 + 22*5 /* FPR */
57 + 2*(10*6 + 22*7) /* AVRh, AVRl */
58 + 10*5 + 22*6 /* VSR */
60 static TCGv cpu_gpr
[32];
61 static TCGv cpu_gprh
[32];
62 static TCGv_i64 cpu_fpr
[32];
63 static TCGv_i64 cpu_avrh
[32], cpu_avrl
[32];
64 static TCGv_i64 cpu_vsr
[32];
65 static TCGv_i32 cpu_crf
[8];
70 #if defined(TARGET_PPC64)
73 static TCGv cpu_xer
, cpu_so
, cpu_ov
, cpu_ca
;
74 static TCGv cpu_reserve
;
75 static TCGv cpu_fpscr
;
76 static TCGv_i32 cpu_access_type
;
78 #include "exec/gen-icount.h"
80 void ppc_translate_init(void)
84 size_t cpu_reg_names_size
;
85 static int done_init
= 0;
90 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
93 cpu_reg_names_size
= sizeof(cpu_reg_names
);
95 for (i
= 0; i
< 8; i
++) {
96 snprintf(p
, cpu_reg_names_size
, "crf%d", i
);
97 cpu_crf
[i
] = tcg_global_mem_new_i32(cpu_env
,
98 offsetof(CPUPPCState
, crf
[i
]), p
);
100 cpu_reg_names_size
-= 5;
103 for (i
= 0; i
< 32; i
++) {
104 snprintf(p
, cpu_reg_names_size
, "r%d", i
);
105 cpu_gpr
[i
] = tcg_global_mem_new(cpu_env
,
106 offsetof(CPUPPCState
, gpr
[i
]), p
);
107 p
+= (i
< 10) ?
3 : 4;
108 cpu_reg_names_size
-= (i
< 10) ?
3 : 4;
109 snprintf(p
, cpu_reg_names_size
, "r%dH", i
);
110 cpu_gprh
[i
] = tcg_global_mem_new(cpu_env
,
111 offsetof(CPUPPCState
, gprh
[i
]), p
);
112 p
+= (i
< 10) ?
4 : 5;
113 cpu_reg_names_size
-= (i
< 10) ?
4 : 5;
115 snprintf(p
, cpu_reg_names_size
, "fp%d", i
);
116 cpu_fpr
[i
] = tcg_global_mem_new_i64(cpu_env
,
117 offsetof(CPUPPCState
, fpr
[i
]), p
);
118 p
+= (i
< 10) ?
4 : 5;
119 cpu_reg_names_size
-= (i
< 10) ?
4 : 5;
121 snprintf(p
, cpu_reg_names_size
, "avr%dH", i
);
122 #ifdef HOST_WORDS_BIGENDIAN
123 cpu_avrh
[i
] = tcg_global_mem_new_i64(cpu_env
,
124 offsetof(CPUPPCState
, avr
[i
].u64
[0]), p
);
126 cpu_avrh
[i
] = tcg_global_mem_new_i64(cpu_env
,
127 offsetof(CPUPPCState
, avr
[i
].u64
[1]), p
);
129 p
+= (i
< 10) ?
6 : 7;
130 cpu_reg_names_size
-= (i
< 10) ?
6 : 7;
132 snprintf(p
, cpu_reg_names_size
, "avr%dL", i
);
133 #ifdef HOST_WORDS_BIGENDIAN
134 cpu_avrl
[i
] = tcg_global_mem_new_i64(cpu_env
,
135 offsetof(CPUPPCState
, avr
[i
].u64
[1]), p
);
137 cpu_avrl
[i
] = tcg_global_mem_new_i64(cpu_env
,
138 offsetof(CPUPPCState
, avr
[i
].u64
[0]), p
);
140 p
+= (i
< 10) ?
6 : 7;
141 cpu_reg_names_size
-= (i
< 10) ?
6 : 7;
142 snprintf(p
, cpu_reg_names_size
, "vsr%d", i
);
143 cpu_vsr
[i
] = tcg_global_mem_new_i64(cpu_env
,
144 offsetof(CPUPPCState
, vsr
[i
]), p
);
145 p
+= (i
< 10) ?
5 : 6;
146 cpu_reg_names_size
-= (i
< 10) ?
5 : 6;
149 cpu_nip
= tcg_global_mem_new(cpu_env
,
150 offsetof(CPUPPCState
, nip
), "nip");
152 cpu_msr
= tcg_global_mem_new(cpu_env
,
153 offsetof(CPUPPCState
, msr
), "msr");
155 cpu_ctr
= tcg_global_mem_new(cpu_env
,
156 offsetof(CPUPPCState
, ctr
), "ctr");
158 cpu_lr
= tcg_global_mem_new(cpu_env
,
159 offsetof(CPUPPCState
, lr
), "lr");
161 #if defined(TARGET_PPC64)
162 cpu_cfar
= tcg_global_mem_new(cpu_env
,
163 offsetof(CPUPPCState
, cfar
), "cfar");
166 cpu_xer
= tcg_global_mem_new(cpu_env
,
167 offsetof(CPUPPCState
, xer
), "xer");
168 cpu_so
= tcg_global_mem_new(cpu_env
,
169 offsetof(CPUPPCState
, so
), "SO");
170 cpu_ov
= tcg_global_mem_new(cpu_env
,
171 offsetof(CPUPPCState
, ov
), "OV");
172 cpu_ca
= tcg_global_mem_new(cpu_env
,
173 offsetof(CPUPPCState
, ca
), "CA");
175 cpu_reserve
= tcg_global_mem_new(cpu_env
,
176 offsetof(CPUPPCState
, reserve_addr
),
179 cpu_fpscr
= tcg_global_mem_new(cpu_env
,
180 offsetof(CPUPPCState
, fpscr
), "fpscr");
182 cpu_access_type
= tcg_global_mem_new_i32(cpu_env
,
183 offsetof(CPUPPCState
, access_type
), "access_type");
188 /* internal defines */
189 struct DisasContext
{
190 struct TranslationBlock
*tb
;
194 /* Routine used to access memory */
199 /* Translation flags */
201 TCGMemOp default_tcg_memop_mask
;
202 #if defined(TARGET_PPC64)
211 ppc_spr_t
*spr_cb
; /* Needed to check rights for mfspr/mtspr */
212 int singlestep_enabled
;
213 uint64_t insns_flags
;
214 uint64_t insns_flags2
;
217 /* Return true iff byteswap is needed in a scalar memop */
218 static inline bool need_byteswap(const DisasContext
*ctx
)
220 #if defined(TARGET_WORDS_BIGENDIAN)
223 return !ctx
->le_mode
;
227 /* True when active word size < size of target_long. */
229 # define NARROW_MODE(C) (!(C)->sf_mode)
231 # define NARROW_MODE(C) 0
234 struct opc_handler_t
{
235 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
237 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
239 /* instruction type */
241 /* extended instruction type */
244 void (*handler
)(DisasContext
*ctx
);
245 #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
248 #if defined(DO_PPC_STATISTICS)
253 static inline void gen_reset_fpstatus(void)
255 gen_helper_reset_fpstatus(cpu_env
);
258 static inline void gen_compute_fprf(TCGv_i64 arg
)
260 gen_helper_compute_fprf(cpu_env
, arg
);
261 gen_helper_float_check_status(cpu_env
);
264 static inline void gen_set_access_type(DisasContext
*ctx
, int access_type
)
266 if (ctx
->access_type
!= access_type
) {
267 tcg_gen_movi_i32(cpu_access_type
, access_type
);
268 ctx
->access_type
= access_type
;
272 static inline void gen_update_nip(DisasContext
*ctx
, target_ulong nip
)
274 if (NARROW_MODE(ctx
)) {
277 tcg_gen_movi_tl(cpu_nip
, nip
);
280 void gen_update_current_nip(void *opaque
)
282 DisasContext
*ctx
= opaque
;
284 tcg_gen_movi_tl(cpu_nip
, ctx
->nip
);
287 static inline void gen_exception_err(DisasContext
*ctx
, uint32_t excp
, uint32_t error
)
290 if (ctx
->exception
== POWERPC_EXCP_NONE
) {
291 gen_update_nip(ctx
, ctx
->nip
);
293 t0
= tcg_const_i32(excp
);
294 t1
= tcg_const_i32(error
);
295 gen_helper_raise_exception_err(cpu_env
, t0
, t1
);
296 tcg_temp_free_i32(t0
);
297 tcg_temp_free_i32(t1
);
298 ctx
->exception
= (excp
);
301 static inline void gen_exception(DisasContext
*ctx
, uint32_t excp
)
304 if (ctx
->exception
== POWERPC_EXCP_NONE
) {
305 gen_update_nip(ctx
, ctx
->nip
);
307 t0
= tcg_const_i32(excp
);
308 gen_helper_raise_exception(cpu_env
, t0
);
309 tcg_temp_free_i32(t0
);
310 ctx
->exception
= (excp
);
313 static inline void gen_debug_exception(DisasContext
*ctx
)
317 if ((ctx
->exception
!= POWERPC_EXCP_BRANCH
) &&
318 (ctx
->exception
!= POWERPC_EXCP_SYNC
)) {
319 gen_update_nip(ctx
, ctx
->nip
);
321 t0
= tcg_const_i32(EXCP_DEBUG
);
322 gen_helper_raise_exception(cpu_env
, t0
);
323 tcg_temp_free_i32(t0
);
326 static inline void gen_inval_exception(DisasContext
*ctx
, uint32_t error
)
328 gen_exception_err(ctx
, POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_INVAL
| error
);
331 /* Stop translation */
332 static inline void gen_stop_exception(DisasContext
*ctx
)
334 gen_update_nip(ctx
, ctx
->nip
);
335 ctx
->exception
= POWERPC_EXCP_STOP
;
338 #ifndef CONFIG_USER_ONLY
339 /* No need to update nip here, as execution flow will change */
340 static inline void gen_sync_exception(DisasContext
*ctx
)
342 ctx
->exception
= POWERPC_EXCP_SYNC
;
346 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
347 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
349 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
350 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
352 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
353 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
355 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
356 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
358 typedef struct opcode_t
{
359 unsigned char opc1
, opc2
, opc3
;
360 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
361 unsigned char pad
[5];
363 unsigned char pad
[1];
365 opc_handler_t handler
;
369 /*****************************************************************************/
370 /*** Instruction decoding ***/
371 #define EXTRACT_HELPER(name, shift, nb) \
372 static inline uint32_t name(uint32_t opcode) \
374 return (opcode >> (shift)) & ((1 << (nb)) - 1); \
377 #define EXTRACT_SHELPER(name, shift, nb) \
378 static inline int32_t name(uint32_t opcode) \
380 return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
383 #define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \
384 static inline uint32_t name(uint32_t opcode) \
386 return (((opcode >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
387 ((opcode >> (shift2)) & ((1 << (nb2)) - 1)); \
390 EXTRACT_HELPER(opc1
, 26, 6);
392 EXTRACT_HELPER(opc2
, 1, 5);
394 EXTRACT_HELPER(opc3
, 6, 5);
395 /* Update Cr0 flags */
396 EXTRACT_HELPER(Rc
, 0, 1);
397 /* Update Cr6 flags (Altivec) */
398 EXTRACT_HELPER(Rc21
, 10, 1);
400 EXTRACT_HELPER(rD
, 21, 5);
402 EXTRACT_HELPER(rS
, 21, 5);
404 EXTRACT_HELPER(rA
, 16, 5);
406 EXTRACT_HELPER(rB
, 11, 5);
408 EXTRACT_HELPER(rC
, 6, 5);
410 EXTRACT_HELPER(crfD
, 23, 3);
411 EXTRACT_HELPER(crfS
, 18, 3);
412 EXTRACT_HELPER(crbD
, 21, 5);
413 EXTRACT_HELPER(crbA
, 16, 5);
414 EXTRACT_HELPER(crbB
, 11, 5);
416 EXTRACT_HELPER(_SPR
, 11, 10);
417 static inline uint32_t SPR(uint32_t opcode
)
419 uint32_t sprn
= _SPR(opcode
);
421 return ((sprn
>> 5) & 0x1F) | ((sprn
& 0x1F) << 5);
423 /*** Get constants ***/
424 /* 16 bits signed immediate value */
425 EXTRACT_SHELPER(SIMM
, 0, 16);
426 /* 16 bits unsigned immediate value */
427 EXTRACT_HELPER(UIMM
, 0, 16);
428 /* 5 bits signed immediate value */
429 EXTRACT_HELPER(SIMM5
, 16, 5);
430 /* 5 bits signed immediate value */
431 EXTRACT_HELPER(UIMM5
, 16, 5);
433 EXTRACT_HELPER(NB
, 11, 5);
435 EXTRACT_HELPER(SH
, 11, 5);
436 /* Vector shift count */
437 EXTRACT_HELPER(VSH
, 6, 4);
439 EXTRACT_HELPER(MB
, 6, 5);
441 EXTRACT_HELPER(ME
, 1, 5);
443 EXTRACT_HELPER(TO
, 21, 5);
445 EXTRACT_HELPER(CRM
, 12, 8);
447 #ifndef CONFIG_USER_ONLY
448 EXTRACT_HELPER(SR
, 16, 4);
452 EXTRACT_HELPER(FPBF
, 23, 3);
453 EXTRACT_HELPER(FPIMM
, 12, 4);
454 EXTRACT_HELPER(FPL
, 25, 1);
455 EXTRACT_HELPER(FPFLM
, 17, 8);
456 EXTRACT_HELPER(FPW
, 16, 1);
458 /*** Jump target decoding ***/
459 /* Immediate address */
460 static inline target_ulong
LI(uint32_t opcode
)
462 return (opcode
>> 0) & 0x03FFFFFC;
465 static inline uint32_t BD(uint32_t opcode
)
467 return (opcode
>> 0) & 0xFFFC;
470 EXTRACT_HELPER(BO
, 21, 5);
471 EXTRACT_HELPER(BI
, 16, 5);
472 /* Absolute/relative address */
473 EXTRACT_HELPER(AA
, 1, 1);
475 EXTRACT_HELPER(LK
, 0, 1);
478 EXTRACT_HELPER(DCM
, 10, 6)
481 EXTRACT_HELPER(RMC
, 9, 2)
483 /* Create a mask between <start> and <end> bits */
484 static inline target_ulong
MASK(uint32_t start
, uint32_t end
)
488 #if defined(TARGET_PPC64)
489 if (likely(start
== 0)) {
490 ret
= UINT64_MAX
<< (63 - end
);
491 } else if (likely(end
== 63)) {
492 ret
= UINT64_MAX
>> start
;
495 if (likely(start
== 0)) {
496 ret
= UINT32_MAX
<< (31 - end
);
497 } else if (likely(end
== 31)) {
498 ret
= UINT32_MAX
>> start
;
502 ret
= (((target_ulong
)(-1ULL)) >> (start
)) ^
503 (((target_ulong
)(-1ULL) >> (end
)) >> 1);
504 if (unlikely(start
> end
))
511 EXTRACT_HELPER_SPLIT(xT
, 0, 1, 21, 5);
512 EXTRACT_HELPER_SPLIT(xS
, 0, 1, 21, 5);
513 EXTRACT_HELPER_SPLIT(xA
, 2, 1, 16, 5);
514 EXTRACT_HELPER_SPLIT(xB
, 1, 1, 11, 5);
515 EXTRACT_HELPER_SPLIT(xC
, 3, 1, 6, 5);
516 EXTRACT_HELPER(DM
, 8, 2);
517 EXTRACT_HELPER(UIM
, 16, 2);
518 EXTRACT_HELPER(SHW
, 8, 2);
519 EXTRACT_HELPER(SP
, 19, 2);
520 /*****************************************************************************/
521 /* PowerPC instructions table */
523 #if defined(DO_PPC_STATISTICS)
524 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
534 .handler = &gen_##name, \
535 .oname = stringify(name), \
537 .oname = stringify(name), \
539 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
550 .handler = &gen_##name, \
551 .oname = stringify(name), \
553 .oname = stringify(name), \
555 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
565 .handler = &gen_##name, \
571 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
581 .handler = &gen_##name, \
583 .oname = stringify(name), \
585 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
596 .handler = &gen_##name, \
598 .oname = stringify(name), \
600 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
610 .handler = &gen_##name, \
616 /* SPR load/store helpers */
617 static inline void gen_load_spr(TCGv t
, int reg
)
619 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUPPCState
, spr
[reg
]));
622 static inline void gen_store_spr(int reg
, TCGv t
)
624 tcg_gen_st_tl(t
, cpu_env
, offsetof(CPUPPCState
, spr
[reg
]));
627 /* Invalid instruction */
628 static void gen_invalid(DisasContext
*ctx
)
630 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
633 static opc_handler_t invalid_handler
= {
634 .inval1
= 0xFFFFFFFF,
635 .inval2
= 0xFFFFFFFF,
638 .handler
= gen_invalid
,
641 /*** Integer comparison ***/
643 static inline void gen_op_cmp(TCGv arg0
, TCGv arg1
, int s
, int crf
)
645 TCGv t0
= tcg_temp_new();
646 TCGv_i32 t1
= tcg_temp_new_i32();
648 tcg_gen_trunc_tl_i32(cpu_crf
[crf
], cpu_so
);
650 tcg_gen_setcond_tl((s ? TCG_COND_LT
: TCG_COND_LTU
), t0
, arg0
, arg1
);
651 tcg_gen_trunc_tl_i32(t1
, t0
);
652 tcg_gen_shli_i32(t1
, t1
, CRF_LT
);
653 tcg_gen_or_i32(cpu_crf
[crf
], cpu_crf
[crf
], t1
);
655 tcg_gen_setcond_tl((s ? TCG_COND_GT
: TCG_COND_GTU
), t0
, arg0
, arg1
);
656 tcg_gen_trunc_tl_i32(t1
, t0
);
657 tcg_gen_shli_i32(t1
, t1
, CRF_GT
);
658 tcg_gen_or_i32(cpu_crf
[crf
], cpu_crf
[crf
], t1
);
660 tcg_gen_setcond_tl(TCG_COND_EQ
, t0
, arg0
, arg1
);
661 tcg_gen_trunc_tl_i32(t1
, t0
);
662 tcg_gen_shli_i32(t1
, t1
, CRF_EQ
);
663 tcg_gen_or_i32(cpu_crf
[crf
], cpu_crf
[crf
], t1
);
666 tcg_temp_free_i32(t1
);
669 static inline void gen_op_cmpi(TCGv arg0
, target_ulong arg1
, int s
, int crf
)
671 TCGv t0
= tcg_const_tl(arg1
);
672 gen_op_cmp(arg0
, t0
, s
, crf
);
676 static inline void gen_op_cmp32(TCGv arg0
, TCGv arg1
, int s
, int crf
)
682 tcg_gen_ext32s_tl(t0
, arg0
);
683 tcg_gen_ext32s_tl(t1
, arg1
);
685 tcg_gen_ext32u_tl(t0
, arg0
);
686 tcg_gen_ext32u_tl(t1
, arg1
);
688 gen_op_cmp(t0
, t1
, s
, crf
);
693 static inline void gen_op_cmpi32(TCGv arg0
, target_ulong arg1
, int s
, int crf
)
695 TCGv t0
= tcg_const_tl(arg1
);
696 gen_op_cmp32(arg0
, t0
, s
, crf
);
700 static inline void gen_set_Rc0(DisasContext
*ctx
, TCGv reg
)
702 if (NARROW_MODE(ctx
)) {
703 gen_op_cmpi32(reg
, 0, 1, 0);
705 gen_op_cmpi(reg
, 0, 1, 0);
710 static void gen_cmp(DisasContext
*ctx
)
712 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
713 gen_op_cmp(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
714 1, crfD(ctx
->opcode
));
716 gen_op_cmp32(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
717 1, crfD(ctx
->opcode
));
722 static void gen_cmpi(DisasContext
*ctx
)
724 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
725 gen_op_cmpi(cpu_gpr
[rA(ctx
->opcode
)], SIMM(ctx
->opcode
),
726 1, crfD(ctx
->opcode
));
728 gen_op_cmpi32(cpu_gpr
[rA(ctx
->opcode
)], SIMM(ctx
->opcode
),
729 1, crfD(ctx
->opcode
));
734 static void gen_cmpl(DisasContext
*ctx
)
736 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
737 gen_op_cmp(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
738 0, crfD(ctx
->opcode
));
740 gen_op_cmp32(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
741 0, crfD(ctx
->opcode
));
746 static void gen_cmpli(DisasContext
*ctx
)
748 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
749 gen_op_cmpi(cpu_gpr
[rA(ctx
->opcode
)], UIMM(ctx
->opcode
),
750 0, crfD(ctx
->opcode
));
752 gen_op_cmpi32(cpu_gpr
[rA(ctx
->opcode
)], UIMM(ctx
->opcode
),
753 0, crfD(ctx
->opcode
));
757 /* isel (PowerPC 2.03 specification) */
758 static void gen_isel(DisasContext
*ctx
)
760 uint32_t bi
= rC(ctx
->opcode
);
761 uint32_t mask
= 0x08 >> (bi
& 0x03);
762 TCGv t0
= tcg_temp_new();
765 tcg_gen_extu_i32_tl(t0
, cpu_crf
[bi
>> 2]);
766 tcg_gen_andi_tl(t0
, t0
, mask
);
768 zr
= tcg_const_tl(0);
769 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr
[rD(ctx
->opcode
)], t0
, zr
,
770 rA(ctx
->opcode
) ? cpu_gpr
[rA(ctx
->opcode
)] : zr
,
771 cpu_gpr
[rB(ctx
->opcode
)]);
776 /* cmpb: PowerPC 2.05 specification */
777 static void gen_cmpb(DisasContext
*ctx
)
779 gen_helper_cmpb(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)],
780 cpu_gpr
[rB(ctx
->opcode
)]);
783 /*** Integer arithmetic ***/
785 static inline void gen_op_arith_compute_ov(DisasContext
*ctx
, TCGv arg0
,
786 TCGv arg1
, TCGv arg2
, int sub
)
788 TCGv t0
= tcg_temp_new();
790 tcg_gen_xor_tl(cpu_ov
, arg0
, arg2
);
791 tcg_gen_xor_tl(t0
, arg1
, arg2
);
793 tcg_gen_and_tl(cpu_ov
, cpu_ov
, t0
);
795 tcg_gen_andc_tl(cpu_ov
, cpu_ov
, t0
);
798 if (NARROW_MODE(ctx
)) {
799 tcg_gen_ext32s_tl(cpu_ov
, cpu_ov
);
801 tcg_gen_shri_tl(cpu_ov
, cpu_ov
, TARGET_LONG_BITS
- 1);
802 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
805 /* Common add function */
806 static inline void gen_op_arith_add(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
807 TCGv arg2
, bool add_ca
, bool compute_ca
,
808 bool compute_ov
, bool compute_rc0
)
812 if (compute_ca
|| compute_ov
) {
817 if (NARROW_MODE(ctx
)) {
818 /* Caution: a non-obvious corner case of the spec is that we
819 must produce the *entire* 64-bit addition, but produce the
820 carry into bit 32. */
821 TCGv t1
= tcg_temp_new();
822 tcg_gen_xor_tl(t1
, arg1
, arg2
); /* add without carry */
823 tcg_gen_add_tl(t0
, arg1
, arg2
);
825 tcg_gen_add_tl(t0
, t0
, cpu_ca
);
827 tcg_gen_xor_tl(cpu_ca
, t0
, t1
); /* bits changed w/ carry */
829 tcg_gen_shri_tl(cpu_ca
, cpu_ca
, 32); /* extract bit 32 */
830 tcg_gen_andi_tl(cpu_ca
, cpu_ca
, 1);
832 TCGv zero
= tcg_const_tl(0);
834 tcg_gen_add2_tl(t0
, cpu_ca
, arg1
, zero
, cpu_ca
, zero
);
835 tcg_gen_add2_tl(t0
, cpu_ca
, t0
, cpu_ca
, arg2
, zero
);
837 tcg_gen_add2_tl(t0
, cpu_ca
, arg1
, zero
, arg2
, zero
);
842 tcg_gen_add_tl(t0
, arg1
, arg2
);
844 tcg_gen_add_tl(t0
, t0
, cpu_ca
);
849 gen_op_arith_compute_ov(ctx
, t0
, arg1
, arg2
, 0);
851 if (unlikely(compute_rc0
)) {
852 gen_set_Rc0(ctx
, t0
);
855 if (!TCGV_EQUAL(t0
, ret
)) {
856 tcg_gen_mov_tl(ret
, t0
);
860 /* Add functions with two operands */
861 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
862 static void glue(gen_, name)(DisasContext *ctx) \
864 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
865 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
866 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
868 /* Add functions with one operand and one immediate */
869 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
870 add_ca, compute_ca, compute_ov) \
871 static void glue(gen_, name)(DisasContext *ctx) \
873 TCGv t0 = tcg_const_tl(const_val); \
874 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
875 cpu_gpr[rA(ctx->opcode)], t0, \
876 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
880 /* add add. addo addo. */
881 GEN_INT_ARITH_ADD(add
, 0x08, 0, 0, 0)
882 GEN_INT_ARITH_ADD(addo
, 0x18, 0, 0, 1)
883 /* addc addc. addco addco. */
884 GEN_INT_ARITH_ADD(addc
, 0x00, 0, 1, 0)
885 GEN_INT_ARITH_ADD(addco
, 0x10, 0, 1, 1)
886 /* adde adde. addeo addeo. */
887 GEN_INT_ARITH_ADD(adde
, 0x04, 1, 1, 0)
888 GEN_INT_ARITH_ADD(addeo
, 0x14, 1, 1, 1)
889 /* addme addme. addmeo addmeo. */
890 GEN_INT_ARITH_ADD_CONST(addme
, 0x07, -1LL, 1, 1, 0)
891 GEN_INT_ARITH_ADD_CONST(addmeo
, 0x17, -1LL, 1, 1, 1)
892 /* addze addze. addzeo addzeo.*/
893 GEN_INT_ARITH_ADD_CONST(addze
, 0x06, 0, 1, 1, 0)
894 GEN_INT_ARITH_ADD_CONST(addzeo
, 0x16, 0, 1, 1, 1)
896 static void gen_addi(DisasContext
*ctx
)
898 target_long simm
= SIMM(ctx
->opcode
);
900 if (rA(ctx
->opcode
) == 0) {
902 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], simm
);
904 tcg_gen_addi_tl(cpu_gpr
[rD(ctx
->opcode
)],
905 cpu_gpr
[rA(ctx
->opcode
)], simm
);
909 static inline void gen_op_addic(DisasContext
*ctx
, bool compute_rc0
)
911 TCGv c
= tcg_const_tl(SIMM(ctx
->opcode
));
912 gen_op_arith_add(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
913 c
, 0, 1, 0, compute_rc0
);
917 static void gen_addic(DisasContext
*ctx
)
919 gen_op_addic(ctx
, 0);
922 static void gen_addic_(DisasContext
*ctx
)
924 gen_op_addic(ctx
, 1);
928 static void gen_addis(DisasContext
*ctx
)
930 target_long simm
= SIMM(ctx
->opcode
);
932 if (rA(ctx
->opcode
) == 0) {
934 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], simm
<< 16);
936 tcg_gen_addi_tl(cpu_gpr
[rD(ctx
->opcode
)],
937 cpu_gpr
[rA(ctx
->opcode
)], simm
<< 16);
941 static inline void gen_op_arith_divw(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
942 TCGv arg2
, int sign
, int compute_ov
)
944 TCGLabel
*l1
= gen_new_label();
945 TCGLabel
*l2
= gen_new_label();
946 TCGv_i32 t0
= tcg_temp_local_new_i32();
947 TCGv_i32 t1
= tcg_temp_local_new_i32();
949 tcg_gen_trunc_tl_i32(t0
, arg1
);
950 tcg_gen_trunc_tl_i32(t1
, arg2
);
951 tcg_gen_brcondi_i32(TCG_COND_EQ
, t1
, 0, l1
);
953 TCGLabel
*l3
= gen_new_label();
954 tcg_gen_brcondi_i32(TCG_COND_NE
, t1
, -1, l3
);
955 tcg_gen_brcondi_i32(TCG_COND_EQ
, t0
, INT32_MIN
, l1
);
957 tcg_gen_div_i32(t0
, t0
, t1
);
959 tcg_gen_divu_i32(t0
, t0
, t1
);
962 tcg_gen_movi_tl(cpu_ov
, 0);
967 tcg_gen_sari_i32(t0
, t0
, 31);
969 tcg_gen_movi_i32(t0
, 0);
972 tcg_gen_movi_tl(cpu_ov
, 1);
973 tcg_gen_movi_tl(cpu_so
, 1);
976 tcg_gen_extu_i32_tl(ret
, t0
);
977 tcg_temp_free_i32(t0
);
978 tcg_temp_free_i32(t1
);
979 if (unlikely(Rc(ctx
->opcode
) != 0))
980 gen_set_Rc0(ctx
, ret
);
983 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
984 static void glue(gen_, name)(DisasContext *ctx) \
986 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
987 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
990 /* divwu divwu. divwuo divwuo. */
991 GEN_INT_ARITH_DIVW(divwu
, 0x0E, 0, 0);
992 GEN_INT_ARITH_DIVW(divwuo
, 0x1E, 0, 1);
993 /* divw divw. divwo divwo. */
994 GEN_INT_ARITH_DIVW(divw
, 0x0F, 1, 0);
995 GEN_INT_ARITH_DIVW(divwo
, 0x1F, 1, 1);
997 /* div[wd]eu[o][.] */
998 #define GEN_DIVE(name, hlpr, compute_ov) \
999 static void gen_##name(DisasContext *ctx) \
1001 TCGv_i32 t0 = tcg_const_i32(compute_ov); \
1002 gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
1003 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1004 tcg_temp_free_i32(t0); \
1005 if (unlikely(Rc(ctx->opcode) != 0)) { \
1006 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
1010 GEN_DIVE(divweu
, divweu
, 0);
1011 GEN_DIVE(divweuo
, divweu
, 1);
1012 GEN_DIVE(divwe
, divwe
, 0);
1013 GEN_DIVE(divweo
, divwe
, 1);
1015 #if defined(TARGET_PPC64)
1016 static inline void gen_op_arith_divd(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
1017 TCGv arg2
, int sign
, int compute_ov
)
1019 TCGLabel
*l1
= gen_new_label();
1020 TCGLabel
*l2
= gen_new_label();
1022 tcg_gen_brcondi_i64(TCG_COND_EQ
, arg2
, 0, l1
);
1024 TCGLabel
*l3
= gen_new_label();
1025 tcg_gen_brcondi_i64(TCG_COND_NE
, arg2
, -1, l3
);
1026 tcg_gen_brcondi_i64(TCG_COND_EQ
, arg1
, INT64_MIN
, l1
);
1028 tcg_gen_div_i64(ret
, arg1
, arg2
);
1030 tcg_gen_divu_i64(ret
, arg1
, arg2
);
1033 tcg_gen_movi_tl(cpu_ov
, 0);
1038 tcg_gen_sari_i64(ret
, arg1
, 63);
1040 tcg_gen_movi_i64(ret
, 0);
1043 tcg_gen_movi_tl(cpu_ov
, 1);
1044 tcg_gen_movi_tl(cpu_so
, 1);
1047 if (unlikely(Rc(ctx
->opcode
) != 0))
1048 gen_set_Rc0(ctx
, ret
);
1050 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1051 static void glue(gen_, name)(DisasContext *ctx) \
1053 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1054 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1055 sign, compute_ov); \
1057 /* divwu divwu. divwuo divwuo. */
1058 GEN_INT_ARITH_DIVD(divdu
, 0x0E, 0, 0);
1059 GEN_INT_ARITH_DIVD(divduo
, 0x1E, 0, 1);
1060 /* divw divw. divwo divwo. */
1061 GEN_INT_ARITH_DIVD(divd
, 0x0F, 1, 0);
1062 GEN_INT_ARITH_DIVD(divdo
, 0x1F, 1, 1);
1064 GEN_DIVE(divdeu
, divdeu
, 0);
1065 GEN_DIVE(divdeuo
, divdeu
, 1);
1066 GEN_DIVE(divde
, divde
, 0);
1067 GEN_DIVE(divdeo
, divde
, 1);
1071 static void gen_mulhw(DisasContext
*ctx
)
1073 TCGv_i32 t0
= tcg_temp_new_i32();
1074 TCGv_i32 t1
= tcg_temp_new_i32();
1076 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1077 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1078 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
1079 tcg_gen_extu_i32_tl(cpu_gpr
[rD(ctx
->opcode
)], t1
);
1080 tcg_temp_free_i32(t0
);
1081 tcg_temp_free_i32(t1
);
1082 if (unlikely(Rc(ctx
->opcode
) != 0))
1083 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1086 /* mulhwu mulhwu. */
1087 static void gen_mulhwu(DisasContext
*ctx
)
1089 TCGv_i32 t0
= tcg_temp_new_i32();
1090 TCGv_i32 t1
= tcg_temp_new_i32();
1092 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1093 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1094 tcg_gen_mulu2_i32(t0
, t1
, t0
, t1
);
1095 tcg_gen_extu_i32_tl(cpu_gpr
[rD(ctx
->opcode
)], t1
);
1096 tcg_temp_free_i32(t0
);
1097 tcg_temp_free_i32(t1
);
1098 if (unlikely(Rc(ctx
->opcode
) != 0))
1099 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1103 static void gen_mullw(DisasContext
*ctx
)
1105 #if defined(TARGET_PPC64)
1107 t0
= tcg_temp_new_i64();
1108 t1
= tcg_temp_new_i64();
1109 tcg_gen_ext32s_tl(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1110 tcg_gen_ext32s_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1111 tcg_gen_mul_i64(cpu_gpr
[rD(ctx
->opcode
)], t0
, t1
);
1115 tcg_gen_mul_i32(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1116 cpu_gpr
[rB(ctx
->opcode
)]);
1118 if (unlikely(Rc(ctx
->opcode
) != 0))
1119 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1122 /* mullwo mullwo. */
1123 static void gen_mullwo(DisasContext
*ctx
)
1125 TCGv_i32 t0
= tcg_temp_new_i32();
1126 TCGv_i32 t1
= tcg_temp_new_i32();
1128 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1129 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1130 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
1131 #if defined(TARGET_PPC64)
1132 tcg_gen_concat_i32_i64(cpu_gpr
[rD(ctx
->opcode
)], t0
, t1
);
1134 tcg_gen_mov_i32(cpu_gpr
[rD(ctx
->opcode
)], t0
);
1137 tcg_gen_sari_i32(t0
, t0
, 31);
1138 tcg_gen_setcond_i32(TCG_COND_NE
, t0
, t0
, t1
);
1139 tcg_gen_extu_i32_tl(cpu_ov
, t0
);
1140 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
1142 tcg_temp_free_i32(t0
);
1143 tcg_temp_free_i32(t1
);
1144 if (unlikely(Rc(ctx
->opcode
) != 0))
1145 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1149 static void gen_mulli(DisasContext
*ctx
)
1151 tcg_gen_muli_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1155 #if defined(TARGET_PPC64)
1157 static void gen_mulhd(DisasContext
*ctx
)
1159 TCGv lo
= tcg_temp_new();
1160 tcg_gen_muls2_tl(lo
, cpu_gpr
[rD(ctx
->opcode
)],
1161 cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1163 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1164 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1168 /* mulhdu mulhdu. */
1169 static void gen_mulhdu(DisasContext
*ctx
)
1171 TCGv lo
= tcg_temp_new();
1172 tcg_gen_mulu2_tl(lo
, cpu_gpr
[rD(ctx
->opcode
)],
1173 cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1175 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1176 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1181 static void gen_mulld(DisasContext
*ctx
)
1183 tcg_gen_mul_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1184 cpu_gpr
[rB(ctx
->opcode
)]);
1185 if (unlikely(Rc(ctx
->opcode
) != 0))
1186 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1189 /* mulldo mulldo. */
1190 static void gen_mulldo(DisasContext
*ctx
)
1192 TCGv_i64 t0
= tcg_temp_new_i64();
1193 TCGv_i64 t1
= tcg_temp_new_i64();
1195 tcg_gen_muls2_i64(t0
, t1
, cpu_gpr
[rA(ctx
->opcode
)],
1196 cpu_gpr
[rB(ctx
->opcode
)]);
1197 tcg_gen_mov_i64(cpu_gpr
[rD(ctx
->opcode
)], t0
);
1199 tcg_gen_sari_i64(t0
, t0
, 63);
1200 tcg_gen_setcond_i64(TCG_COND_NE
, cpu_ov
, t0
, t1
);
1201 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
1203 tcg_temp_free_i64(t0
);
1204 tcg_temp_free_i64(t1
);
1206 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1207 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1212 /* Common subf function */
1213 static inline void gen_op_arith_subf(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
1214 TCGv arg2
, bool add_ca
, bool compute_ca
,
1215 bool compute_ov
, bool compute_rc0
)
1219 if (compute_ca
|| compute_ov
) {
1220 t0
= tcg_temp_new();
1224 /* dest = ~arg1 + arg2 [+ ca]. */
1225 if (NARROW_MODE(ctx
)) {
1226 /* Caution: a non-obvious corner case of the spec is that we
1227 must produce the *entire* 64-bit addition, but produce the
1228 carry into bit 32. */
1229 TCGv inv1
= tcg_temp_new();
1230 TCGv t1
= tcg_temp_new();
1231 tcg_gen_not_tl(inv1
, arg1
);
1233 tcg_gen_add_tl(t0
, arg2
, cpu_ca
);
1235 tcg_gen_addi_tl(t0
, arg2
, 1);
1237 tcg_gen_xor_tl(t1
, arg2
, inv1
); /* add without carry */
1238 tcg_gen_add_tl(t0
, t0
, inv1
);
1239 tcg_temp_free(inv1
);
1240 tcg_gen_xor_tl(cpu_ca
, t0
, t1
); /* bits changes w/ carry */
1242 tcg_gen_shri_tl(cpu_ca
, cpu_ca
, 32); /* extract bit 32 */
1243 tcg_gen_andi_tl(cpu_ca
, cpu_ca
, 1);
1244 } else if (add_ca
) {
1245 TCGv zero
, inv1
= tcg_temp_new();
1246 tcg_gen_not_tl(inv1
, arg1
);
1247 zero
= tcg_const_tl(0);
1248 tcg_gen_add2_tl(t0
, cpu_ca
, arg2
, zero
, cpu_ca
, zero
);
1249 tcg_gen_add2_tl(t0
, cpu_ca
, t0
, cpu_ca
, inv1
, zero
);
1250 tcg_temp_free(zero
);
1251 tcg_temp_free(inv1
);
1253 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_ca
, arg2
, arg1
);
1254 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1256 } else if (add_ca
) {
1257 /* Since we're ignoring carry-out, we can simplify the
1258 standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */
1259 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1260 tcg_gen_add_tl(t0
, t0
, cpu_ca
);
1261 tcg_gen_subi_tl(t0
, t0
, 1);
1263 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1267 gen_op_arith_compute_ov(ctx
, t0
, arg1
, arg2
, 1);
1269 if (unlikely(compute_rc0
)) {
1270 gen_set_Rc0(ctx
, t0
);
1273 if (!TCGV_EQUAL(t0
, ret
)) {
1274 tcg_gen_mov_tl(ret
, t0
);
1278 /* Sub functions with Two operands functions */
1279 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
1280 static void glue(gen_, name)(DisasContext *ctx) \
1282 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1283 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1284 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1286 /* Sub functions with one operand and one immediate */
1287 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
1288 add_ca, compute_ca, compute_ov) \
1289 static void glue(gen_, name)(DisasContext *ctx) \
1291 TCGv t0 = tcg_const_tl(const_val); \
1292 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1293 cpu_gpr[rA(ctx->opcode)], t0, \
1294 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1295 tcg_temp_free(t0); \
1297 /* subf subf. subfo subfo. */
1298 GEN_INT_ARITH_SUBF(subf
, 0x01, 0, 0, 0)
1299 GEN_INT_ARITH_SUBF(subfo
, 0x11, 0, 0, 1)
1300 /* subfc subfc. subfco subfco. */
1301 GEN_INT_ARITH_SUBF(subfc
, 0x00, 0, 1, 0)
1302 GEN_INT_ARITH_SUBF(subfco
, 0x10, 0, 1, 1)
1303 /* subfe subfe. subfeo subfo. */
1304 GEN_INT_ARITH_SUBF(subfe
, 0x04, 1, 1, 0)
1305 GEN_INT_ARITH_SUBF(subfeo
, 0x14, 1, 1, 1)
1306 /* subfme subfme. subfmeo subfmeo. */
1307 GEN_INT_ARITH_SUBF_CONST(subfme
, 0x07, -1LL, 1, 1, 0)
1308 GEN_INT_ARITH_SUBF_CONST(subfmeo
, 0x17, -1LL, 1, 1, 1)
1309 /* subfze subfze. subfzeo subfzeo.*/
1310 GEN_INT_ARITH_SUBF_CONST(subfze
, 0x06, 0, 1, 1, 0)
1311 GEN_INT_ARITH_SUBF_CONST(subfzeo
, 0x16, 0, 1, 1, 1)
1314 static void gen_subfic(DisasContext
*ctx
)
1316 TCGv c
= tcg_const_tl(SIMM(ctx
->opcode
));
1317 gen_op_arith_subf(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1322 /* neg neg. nego nego. */
1323 static inline void gen_op_arith_neg(DisasContext
*ctx
, bool compute_ov
)
1325 TCGv zero
= tcg_const_tl(0);
1326 gen_op_arith_subf(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1327 zero
, 0, 0, compute_ov
, Rc(ctx
->opcode
));
1328 tcg_temp_free(zero
);
1331 static void gen_neg(DisasContext
*ctx
)
1333 gen_op_arith_neg(ctx
, 0);
1336 static void gen_nego(DisasContext
*ctx
)
1338 gen_op_arith_neg(ctx
, 1);
1341 /*** Integer logical ***/
1342 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
1343 static void glue(gen_, name)(DisasContext *ctx) \
1345 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
1346 cpu_gpr[rB(ctx->opcode)]); \
1347 if (unlikely(Rc(ctx->opcode) != 0)) \
1348 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1351 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
1352 static void glue(gen_, name)(DisasContext *ctx) \
1354 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
1355 if (unlikely(Rc(ctx->opcode) != 0)) \
1356 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1360 GEN_LOGICAL2(and, tcg_gen_and_tl
, 0x00, PPC_INTEGER
);
1362 GEN_LOGICAL2(andc
, tcg_gen_andc_tl
, 0x01, PPC_INTEGER
);
1365 static void gen_andi_(DisasContext
*ctx
)
1367 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], UIMM(ctx
->opcode
));
1368 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1372 static void gen_andis_(DisasContext
*ctx
)
1374 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], UIMM(ctx
->opcode
) << 16);
1375 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1379 static void gen_cntlzw(DisasContext
*ctx
)
1381 gen_helper_cntlzw(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1382 if (unlikely(Rc(ctx
->opcode
) != 0))
1383 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1386 GEN_LOGICAL2(eqv
, tcg_gen_eqv_tl
, 0x08, PPC_INTEGER
);
1387 /* extsb & extsb. */
1388 GEN_LOGICAL1(extsb
, tcg_gen_ext8s_tl
, 0x1D, PPC_INTEGER
);
1389 /* extsh & extsh. */
1390 GEN_LOGICAL1(extsh
, tcg_gen_ext16s_tl
, 0x1C, PPC_INTEGER
);
1392 GEN_LOGICAL2(nand
, tcg_gen_nand_tl
, 0x0E, PPC_INTEGER
);
1394 GEN_LOGICAL2(nor
, tcg_gen_nor_tl
, 0x03, PPC_INTEGER
);
1396 #if defined(TARGET_PPC64)
1397 static void gen_pause(DisasContext
*ctx
)
1399 TCGv_i32 t0
= tcg_const_i32(0);
1400 tcg_gen_st_i32(t0
, cpu_env
,
1401 -offsetof(PowerPCCPU
, env
) + offsetof(CPUState
, halted
));
1402 tcg_temp_free_i32(t0
);
1404 /* Stop translation, this gives other CPUs a chance to run */
1405 gen_exception_err(ctx
, EXCP_HLT
, 1);
1407 #endif /* defined(TARGET_PPC64) */
1410 static void gen_or(DisasContext
*ctx
)
1414 rs
= rS(ctx
->opcode
);
1415 ra
= rA(ctx
->opcode
);
1416 rb
= rB(ctx
->opcode
);
1417 /* Optimisation for mr. ri case */
1418 if (rs
!= ra
|| rs
!= rb
) {
1420 tcg_gen_or_tl(cpu_gpr
[ra
], cpu_gpr
[rs
], cpu_gpr
[rb
]);
1422 tcg_gen_mov_tl(cpu_gpr
[ra
], cpu_gpr
[rs
]);
1423 if (unlikely(Rc(ctx
->opcode
) != 0))
1424 gen_set_Rc0(ctx
, cpu_gpr
[ra
]);
1425 } else if (unlikely(Rc(ctx
->opcode
) != 0)) {
1426 gen_set_Rc0(ctx
, cpu_gpr
[rs
]);
1427 #if defined(TARGET_PPC64)
1433 /* Set process priority to low */
1437 /* Set process priority to medium-low */
1441 /* Set process priority to normal */
1444 #if !defined(CONFIG_USER_ONLY)
1447 /* Set process priority to very low */
1453 /* Set process priority to medium-hight */
1459 /* Set process priority to high */
1464 if (ctx
->hv
&& !ctx
->pr
) {
1465 /* Set process priority to very high */
1475 TCGv t0
= tcg_temp_new();
1476 gen_load_spr(t0
, SPR_PPR
);
1477 tcg_gen_andi_tl(t0
, t0
, ~0x001C000000000000ULL
);
1478 tcg_gen_ori_tl(t0
, t0
, ((uint64_t)prio
) << 50);
1479 gen_store_spr(SPR_PPR
, t0
);
1481 /* Pause us out of TCG otherwise spin loops with smt_low
1482 * eat too much CPU and the kernel hangs
1490 GEN_LOGICAL2(orc
, tcg_gen_orc_tl
, 0x0C, PPC_INTEGER
);
1493 static void gen_xor(DisasContext
*ctx
)
1495 /* Optimisation for "set to zero" case */
1496 if (rS(ctx
->opcode
) != rB(ctx
->opcode
))
1497 tcg_gen_xor_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1499 tcg_gen_movi_tl(cpu_gpr
[rA(ctx
->opcode
)], 0);
1500 if (unlikely(Rc(ctx
->opcode
) != 0))
1501 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1505 static void gen_ori(DisasContext
*ctx
)
1507 target_ulong uimm
= UIMM(ctx
->opcode
);
1509 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1512 tcg_gen_ori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
);
1516 static void gen_oris(DisasContext
*ctx
)
1518 target_ulong uimm
= UIMM(ctx
->opcode
);
1520 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1524 tcg_gen_ori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
<< 16);
1528 static void gen_xori(DisasContext
*ctx
)
1530 target_ulong uimm
= UIMM(ctx
->opcode
);
1532 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1536 tcg_gen_xori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
);
1540 static void gen_xoris(DisasContext
*ctx
)
1542 target_ulong uimm
= UIMM(ctx
->opcode
);
1544 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1548 tcg_gen_xori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
<< 16);
1551 /* popcntb : PowerPC 2.03 specification */
1552 static void gen_popcntb(DisasContext
*ctx
)
1554 gen_helper_popcntb(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1557 static void gen_popcntw(DisasContext
*ctx
)
1559 gen_helper_popcntw(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1562 #if defined(TARGET_PPC64)
1563 /* popcntd: PowerPC 2.06 specification */
1564 static void gen_popcntd(DisasContext
*ctx
)
1566 gen_helper_popcntd(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1570 /* prtyw: PowerPC 2.05 specification */
1571 static void gen_prtyw(DisasContext
*ctx
)
1573 TCGv ra
= cpu_gpr
[rA(ctx
->opcode
)];
1574 TCGv rs
= cpu_gpr
[rS(ctx
->opcode
)];
1575 TCGv t0
= tcg_temp_new();
1576 tcg_gen_shri_tl(t0
, rs
, 16);
1577 tcg_gen_xor_tl(ra
, rs
, t0
);
1578 tcg_gen_shri_tl(t0
, ra
, 8);
1579 tcg_gen_xor_tl(ra
, ra
, t0
);
1580 tcg_gen_andi_tl(ra
, ra
, (target_ulong
)0x100000001ULL
);
1584 #if defined(TARGET_PPC64)
1585 /* prtyd: PowerPC 2.05 specification */
1586 static void gen_prtyd(DisasContext
*ctx
)
1588 TCGv ra
= cpu_gpr
[rA(ctx
->opcode
)];
1589 TCGv rs
= cpu_gpr
[rS(ctx
->opcode
)];
1590 TCGv t0
= tcg_temp_new();
1591 tcg_gen_shri_tl(t0
, rs
, 32);
1592 tcg_gen_xor_tl(ra
, rs
, t0
);
1593 tcg_gen_shri_tl(t0
, ra
, 16);
1594 tcg_gen_xor_tl(ra
, ra
, t0
);
1595 tcg_gen_shri_tl(t0
, ra
, 8);
1596 tcg_gen_xor_tl(ra
, ra
, t0
);
1597 tcg_gen_andi_tl(ra
, ra
, 1);
1602 #if defined(TARGET_PPC64)
1604 static void gen_bpermd(DisasContext
*ctx
)
1606 gen_helper_bpermd(cpu_gpr
[rA(ctx
->opcode
)],
1607 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1611 #if defined(TARGET_PPC64)
1612 /* extsw & extsw. */
1613 GEN_LOGICAL1(extsw
, tcg_gen_ext32s_tl
, 0x1E, PPC_64B
);
1616 static void gen_cntlzd(DisasContext
*ctx
)
1618 gen_helper_cntlzd(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1619 if (unlikely(Rc(ctx
->opcode
) != 0))
1620 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1624 /*** Integer rotate ***/
1626 /* rlwimi & rlwimi. */
1627 static void gen_rlwimi(DisasContext
*ctx
)
1629 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
1630 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
1631 uint32_t sh
= SH(ctx
->opcode
);
1632 uint32_t mb
= MB(ctx
->opcode
);
1633 uint32_t me
= ME(ctx
->opcode
);
1635 if (sh
== (31-me
) && mb
<= me
) {
1636 tcg_gen_deposit_tl(t_ra
, t_ra
, t_rs
, sh
, me
- mb
+ 1);
1642 #if defined(TARGET_PPC64)
1646 mask
= MASK(mb
, me
);
1648 t0
= tcg_temp_new_i32();
1649 t1
= tcg_temp_new();
1650 tcg_gen_trunc_tl_i32(t0
, t_rs
);
1651 tcg_gen_rotli_i32(t0
, t0
, sh
);
1652 tcg_gen_extu_i32_tl(t1
, t0
);
1653 tcg_temp_free_i32(t0
);
1655 tcg_gen_andi_tl(t1
, t1
, mask
);
1656 tcg_gen_andi_tl(t_ra
, t_ra
, ~mask
);
1657 tcg_gen_or_tl(t_ra
, t_ra
, t1
);
1660 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1661 gen_set_Rc0(ctx
, t_ra
);
1665 /* rlwinm & rlwinm. */
1666 static void gen_rlwinm(DisasContext
*ctx
)
1668 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
1669 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
1670 uint32_t sh
= SH(ctx
->opcode
);
1671 uint32_t mb
= MB(ctx
->opcode
);
1672 uint32_t me
= ME(ctx
->opcode
);
1674 if (mb
== 0 && me
== (31 - sh
)) {
1675 tcg_gen_shli_tl(t_ra
, t_rs
, sh
);
1676 tcg_gen_ext32u_tl(t_ra
, t_ra
);
1677 } else if (sh
!= 0 && me
== 31 && sh
== (32 - mb
)) {
1678 tcg_gen_ext32u_tl(t_ra
, t_rs
);
1679 tcg_gen_shri_tl(t_ra
, t_ra
, mb
);
1681 #if defined(TARGET_PPC64)
1686 tcg_gen_andi_tl(t_ra
, t_rs
, MASK(mb
, me
));
1688 TCGv_i32 t0
= tcg_temp_new_i32();
1690 tcg_gen_trunc_tl_i32(t0
, t_rs
);
1691 tcg_gen_rotli_i32(t0
, t0
, sh
);
1692 tcg_gen_andi_i32(t0
, t0
, MASK(mb
, me
));
1693 tcg_gen_extu_i32_tl(t_ra
, t0
);
1694 tcg_temp_free_i32(t0
);
1697 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1698 gen_set_Rc0(ctx
, t_ra
);
1702 /* rlwnm & rlwnm. */
1703 static void gen_rlwnm(DisasContext
*ctx
)
1705 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
1706 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
1707 TCGv t_rb
= cpu_gpr
[rB(ctx
->opcode
)];
1708 uint32_t mb
= MB(ctx
->opcode
);
1709 uint32_t me
= ME(ctx
->opcode
);
1712 #if defined(TARGET_PPC64)
1717 t0
= tcg_temp_new_i32();
1718 t1
= tcg_temp_new_i32();
1719 tcg_gen_trunc_tl_i32(t0
, t_rb
);
1720 tcg_gen_trunc_tl_i32(t1
, t_rs
);
1721 tcg_gen_andi_i32(t0
, t0
, 0x1f);
1722 tcg_gen_rotl_i32(t1
, t1
, t0
);
1723 tcg_temp_free_i32(t0
);
1725 tcg_gen_andi_i32(t1
, t1
, MASK(mb
, me
));
1726 tcg_gen_extu_i32_tl(t_ra
, t1
);
1727 tcg_temp_free_i32(t1
);
1729 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1730 gen_set_Rc0(ctx
, t_ra
);
1734 #if defined(TARGET_PPC64)
1735 #define GEN_PPC64_R2(name, opc1, opc2) \
1736 static void glue(gen_, name##0)(DisasContext *ctx) \
1738 gen_##name(ctx, 0); \
1741 static void glue(gen_, name##1)(DisasContext *ctx) \
1743 gen_##name(ctx, 1); \
1745 #define GEN_PPC64_R4(name, opc1, opc2) \
1746 static void glue(gen_, name##0)(DisasContext *ctx) \
1748 gen_##name(ctx, 0, 0); \
1751 static void glue(gen_, name##1)(DisasContext *ctx) \
1753 gen_##name(ctx, 0, 1); \
1756 static void glue(gen_, name##2)(DisasContext *ctx) \
1758 gen_##name(ctx, 1, 0); \
1761 static void glue(gen_, name##3)(DisasContext *ctx) \
1763 gen_##name(ctx, 1, 1); \
1766 static void gen_rldinm(DisasContext
*ctx
, int mb
, int me
, int sh
)
1768 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
1769 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
1771 if (sh
!= 0 && mb
== 0 && me
== (63 - sh
)) {
1772 tcg_gen_shli_tl(t_ra
, t_rs
, sh
);
1773 } else if (sh
!= 0 && me
== 63 && sh
== (64 - mb
)) {
1774 tcg_gen_shri_tl(t_ra
, t_rs
, mb
);
1776 tcg_gen_rotli_tl(t_ra
, t_rs
, sh
);
1777 tcg_gen_andi_tl(t_ra
, t_ra
, MASK(mb
, me
));
1779 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1780 gen_set_Rc0(ctx
, t_ra
);
1784 /* rldicl - rldicl. */
1785 static inline void gen_rldicl(DisasContext
*ctx
, int mbn
, int shn
)
1789 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1790 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1791 gen_rldinm(ctx
, mb
, 63, sh
);
1793 GEN_PPC64_R4(rldicl
, 0x1E, 0x00);
1795 /* rldicr - rldicr. */
1796 static inline void gen_rldicr(DisasContext
*ctx
, int men
, int shn
)
1800 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1801 me
= MB(ctx
->opcode
) | (men
<< 5);
1802 gen_rldinm(ctx
, 0, me
, sh
);
1804 GEN_PPC64_R4(rldicr
, 0x1E, 0x02);
1806 /* rldic - rldic. */
1807 static inline void gen_rldic(DisasContext
*ctx
, int mbn
, int shn
)
1811 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1812 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1813 gen_rldinm(ctx
, mb
, 63 - sh
, sh
);
1815 GEN_PPC64_R4(rldic
, 0x1E, 0x04);
1817 static void gen_rldnm(DisasContext
*ctx
, int mb
, int me
)
1819 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
1820 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
1821 TCGv t_rb
= cpu_gpr
[rB(ctx
->opcode
)];
1824 t0
= tcg_temp_new();
1825 tcg_gen_andi_tl(t0
, t_rb
, 0x3f);
1826 tcg_gen_rotl_tl(t_ra
, t_rs
, t0
);
1829 tcg_gen_andi_tl(t_ra
, t_ra
, MASK(mb
, me
));
1830 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1831 gen_set_Rc0(ctx
, t_ra
);
1835 /* rldcl - rldcl. */
1836 static inline void gen_rldcl(DisasContext
*ctx
, int mbn
)
1840 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1841 gen_rldnm(ctx
, mb
, 63);
1843 GEN_PPC64_R2(rldcl
, 0x1E, 0x08);
1845 /* rldcr - rldcr. */
1846 static inline void gen_rldcr(DisasContext
*ctx
, int men
)
1850 me
= MB(ctx
->opcode
) | (men
<< 5);
1851 gen_rldnm(ctx
, 0, me
);
1853 GEN_PPC64_R2(rldcr
, 0x1E, 0x09);
1855 /* rldimi - rldimi. */
1856 static void gen_rldimi(DisasContext
*ctx
, int mbn
, int shn
)
1858 TCGv t_ra
= cpu_gpr
[rA(ctx
->opcode
)];
1859 TCGv t_rs
= cpu_gpr
[rS(ctx
->opcode
)];
1860 uint32_t sh
= SH(ctx
->opcode
) | (shn
<< 5);
1861 uint32_t mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1862 uint32_t me
= 63 - sh
;
1865 tcg_gen_deposit_tl(t_ra
, t_ra
, t_rs
, sh
, me
- mb
+ 1);
1867 target_ulong mask
= MASK(mb
, me
);
1868 TCGv t1
= tcg_temp_new();
1870 tcg_gen_rotli_tl(t1
, t_rs
, sh
);
1871 tcg_gen_andi_tl(t1
, t1
, mask
);
1872 tcg_gen_andi_tl(t_ra
, t_ra
, ~mask
);
1873 tcg_gen_or_tl(t_ra
, t_ra
, t1
);
1876 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1877 gen_set_Rc0(ctx
, t_ra
);
1880 GEN_PPC64_R4(rldimi
, 0x1E, 0x06);
1883 /*** Integer shift ***/
1886 static void gen_slw(DisasContext
*ctx
)
1890 t0
= tcg_temp_new();
1891 /* AND rS with a mask that is 0 when rB >= 0x20 */
1892 #if defined(TARGET_PPC64)
1893 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x3a);
1894 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1896 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x1a);
1897 tcg_gen_sari_tl(t0
, t0
, 0x1f);
1899 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1900 t1
= tcg_temp_new();
1901 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x1f);
1902 tcg_gen_shl_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1905 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)]);
1906 if (unlikely(Rc(ctx
->opcode
) != 0))
1907 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1911 static void gen_sraw(DisasContext
*ctx
)
1913 gen_helper_sraw(cpu_gpr
[rA(ctx
->opcode
)], cpu_env
,
1914 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1915 if (unlikely(Rc(ctx
->opcode
) != 0))
1916 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1919 /* srawi & srawi. */
1920 static void gen_srawi(DisasContext
*ctx
)
1922 int sh
= SH(ctx
->opcode
);
1923 TCGv dst
= cpu_gpr
[rA(ctx
->opcode
)];
1924 TCGv src
= cpu_gpr
[rS(ctx
->opcode
)];
1926 tcg_gen_ext32s_tl(dst
, src
);
1927 tcg_gen_movi_tl(cpu_ca
, 0);
1930 tcg_gen_ext32s_tl(dst
, src
);
1931 tcg_gen_andi_tl(cpu_ca
, dst
, (1ULL << sh
) - 1);
1932 t0
= tcg_temp_new();
1933 tcg_gen_sari_tl(t0
, dst
, TARGET_LONG_BITS
- 1);
1934 tcg_gen_and_tl(cpu_ca
, cpu_ca
, t0
);
1936 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_ca
, cpu_ca
, 0);
1937 tcg_gen_sari_tl(dst
, dst
, sh
);
1939 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1940 gen_set_Rc0(ctx
, dst
);
1945 static void gen_srw(DisasContext
*ctx
)
1949 t0
= tcg_temp_new();
1950 /* AND rS with a mask that is 0 when rB >= 0x20 */
1951 #if defined(TARGET_PPC64)
1952 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x3a);
1953 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1955 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x1a);
1956 tcg_gen_sari_tl(t0
, t0
, 0x1f);
1958 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1959 tcg_gen_ext32u_tl(t0
, t0
);
1960 t1
= tcg_temp_new();
1961 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x1f);
1962 tcg_gen_shr_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1965 if (unlikely(Rc(ctx
->opcode
) != 0))
1966 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1969 #if defined(TARGET_PPC64)
1971 static void gen_sld(DisasContext
*ctx
)
1975 t0
= tcg_temp_new();
1976 /* AND rS with a mask that is 0 when rB >= 0x40 */
1977 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x39);
1978 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1979 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1980 t1
= tcg_temp_new();
1981 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x3f);
1982 tcg_gen_shl_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1985 if (unlikely(Rc(ctx
->opcode
) != 0))
1986 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1990 static void gen_srad(DisasContext
*ctx
)
1992 gen_helper_srad(cpu_gpr
[rA(ctx
->opcode
)], cpu_env
,
1993 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1994 if (unlikely(Rc(ctx
->opcode
) != 0))
1995 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1997 /* sradi & sradi. */
1998 static inline void gen_sradi(DisasContext
*ctx
, int n
)
2000 int sh
= SH(ctx
->opcode
) + (n
<< 5);
2001 TCGv dst
= cpu_gpr
[rA(ctx
->opcode
)];
2002 TCGv src
= cpu_gpr
[rS(ctx
->opcode
)];
2004 tcg_gen_mov_tl(dst
, src
);
2005 tcg_gen_movi_tl(cpu_ca
, 0);
2008 tcg_gen_andi_tl(cpu_ca
, src
, (1ULL << sh
) - 1);
2009 t0
= tcg_temp_new();
2010 tcg_gen_sari_tl(t0
, src
, TARGET_LONG_BITS
- 1);
2011 tcg_gen_and_tl(cpu_ca
, cpu_ca
, t0
);
2013 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_ca
, cpu_ca
, 0);
2014 tcg_gen_sari_tl(dst
, src
, sh
);
2016 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2017 gen_set_Rc0(ctx
, dst
);
2021 static void gen_sradi0(DisasContext
*ctx
)
2026 static void gen_sradi1(DisasContext
*ctx
)
2032 static void gen_srd(DisasContext
*ctx
)
2036 t0
= tcg_temp_new();
2037 /* AND rS with a mask that is 0 when rB >= 0x40 */
2038 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x39);
2039 tcg_gen_sari_tl(t0
, t0
, 0x3f);
2040 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
2041 t1
= tcg_temp_new();
2042 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x3f);
2043 tcg_gen_shr_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
2046 if (unlikely(Rc(ctx
->opcode
) != 0))
2047 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
2051 #if defined(TARGET_PPC64)
2052 static void gen_set_cr1_from_fpscr(DisasContext
*ctx
)
2054 TCGv_i32 tmp
= tcg_temp_new_i32();
2055 tcg_gen_trunc_tl_i32(tmp
, cpu_fpscr
);
2056 tcg_gen_shri_i32(cpu_crf
[1], tmp
, 28);
2057 tcg_temp_free_i32(tmp
);
2060 static void gen_set_cr1_from_fpscr(DisasContext
*ctx
)
2062 tcg_gen_shri_tl(cpu_crf
[1], cpu_fpscr
, 28);
2066 /*** Floating-Point arithmetic ***/
2067 #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
2068 static void gen_f##name(DisasContext *ctx) \
2070 if (unlikely(!ctx->fpu_enabled)) { \
2071 gen_exception(ctx, POWERPC_EXCP_FPU); \
2074 /* NIP cannot be restored if the memory exception comes from an helper */ \
2075 gen_update_nip(ctx, ctx->nip - 4); \
2076 gen_reset_fpstatus(); \
2077 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2078 cpu_fpr[rA(ctx->opcode)], \
2079 cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
2081 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2082 cpu_fpr[rD(ctx->opcode)]); \
2085 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2087 if (unlikely(Rc(ctx->opcode) != 0)) { \
2088 gen_set_cr1_from_fpscr(ctx); \
2092 #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
2093 _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
2094 _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
2096 #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2097 static void gen_f##name(DisasContext *ctx) \
2099 if (unlikely(!ctx->fpu_enabled)) { \
2100 gen_exception(ctx, POWERPC_EXCP_FPU); \
2103 /* NIP cannot be restored if the memory exception comes from an helper */ \
2104 gen_update_nip(ctx, ctx->nip - 4); \
2105 gen_reset_fpstatus(); \
2106 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2107 cpu_fpr[rA(ctx->opcode)], \
2108 cpu_fpr[rB(ctx->opcode)]); \
2110 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2111 cpu_fpr[rD(ctx->opcode)]); \
2114 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2116 if (unlikely(Rc(ctx->opcode) != 0)) { \
2117 gen_set_cr1_from_fpscr(ctx); \
2120 #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
2121 _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2122 _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2124 #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2125 static void gen_f##name(DisasContext *ctx) \
2127 if (unlikely(!ctx->fpu_enabled)) { \
2128 gen_exception(ctx, POWERPC_EXCP_FPU); \
2131 /* NIP cannot be restored if the memory exception comes from an helper */ \
2132 gen_update_nip(ctx, ctx->nip - 4); \
2133 gen_reset_fpstatus(); \
2134 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2135 cpu_fpr[rA(ctx->opcode)], \
2136 cpu_fpr[rC(ctx->opcode)]); \
2138 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2139 cpu_fpr[rD(ctx->opcode)]); \
2142 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2144 if (unlikely(Rc(ctx->opcode) != 0)) { \
2145 gen_set_cr1_from_fpscr(ctx); \
2148 #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
2149 _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2150 _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2152 #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
2153 static void gen_f##name(DisasContext *ctx) \
2155 if (unlikely(!ctx->fpu_enabled)) { \
2156 gen_exception(ctx, POWERPC_EXCP_FPU); \
2159 /* NIP cannot be restored if the memory exception comes from an helper */ \
2160 gen_update_nip(ctx, ctx->nip - 4); \
2161 gen_reset_fpstatus(); \
2162 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2163 cpu_fpr[rB(ctx->opcode)]); \
2165 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2167 if (unlikely(Rc(ctx->opcode) != 0)) { \
2168 gen_set_cr1_from_fpscr(ctx); \
2172 #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
2173 static void gen_f##name(DisasContext *ctx) \
2175 if (unlikely(!ctx->fpu_enabled)) { \
2176 gen_exception(ctx, POWERPC_EXCP_FPU); \
2179 /* NIP cannot be restored if the memory exception comes from an helper */ \
2180 gen_update_nip(ctx, ctx->nip - 4); \
2181 gen_reset_fpstatus(); \
2182 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2183 cpu_fpr[rB(ctx->opcode)]); \
2185 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2187 if (unlikely(Rc(ctx->opcode) != 0)) { \
2188 gen_set_cr1_from_fpscr(ctx); \
2193 GEN_FLOAT_AB(add
, 0x15, 0x000007C0, 1, PPC_FLOAT
);
2195 GEN_FLOAT_AB(div
, 0x12, 0x000007C0, 1, PPC_FLOAT
);
2197 GEN_FLOAT_AC(mul
, 0x19, 0x0000F800, 1, PPC_FLOAT
);
2200 GEN_FLOAT_BS(re
, 0x3F, 0x18, 1, PPC_FLOAT_EXT
);
2203 GEN_FLOAT_BS(res
, 0x3B, 0x18, 1, PPC_FLOAT_FRES
);
2206 GEN_FLOAT_BS(rsqrte
, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE
);
2209 static void gen_frsqrtes(DisasContext
*ctx
)
2211 if (unlikely(!ctx
->fpu_enabled
)) {
2212 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2215 /* NIP cannot be restored if the memory exception comes from an helper */
2216 gen_update_nip(ctx
, ctx
->nip
- 4);
2217 gen_reset_fpstatus();
2218 gen_helper_frsqrte(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2219 cpu_fpr
[rB(ctx
->opcode
)]);
2220 gen_helper_frsp(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2221 cpu_fpr
[rD(ctx
->opcode
)]);
2222 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)]);
2223 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2224 gen_set_cr1_from_fpscr(ctx
);
2229 _GEN_FLOAT_ACB(sel
, sel
, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL
);
2231 GEN_FLOAT_AB(sub
, 0x14, 0x000007C0, 1, PPC_FLOAT
);
2235 static void gen_fsqrt(DisasContext
*ctx
)
2237 if (unlikely(!ctx
->fpu_enabled
)) {
2238 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2241 /* NIP cannot be restored if the memory exception comes from an helper */
2242 gen_update_nip(ctx
, ctx
->nip
- 4);
2243 gen_reset_fpstatus();
2244 gen_helper_fsqrt(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2245 cpu_fpr
[rB(ctx
->opcode
)]);
2246 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)]);
2247 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2248 gen_set_cr1_from_fpscr(ctx
);
2252 static void gen_fsqrts(DisasContext
*ctx
)
2254 if (unlikely(!ctx
->fpu_enabled
)) {
2255 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2258 /* NIP cannot be restored if the memory exception comes from an helper */
2259 gen_update_nip(ctx
, ctx
->nip
- 4);
2260 gen_reset_fpstatus();
2261 gen_helper_fsqrt(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2262 cpu_fpr
[rB(ctx
->opcode
)]);
2263 gen_helper_frsp(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2264 cpu_fpr
[rD(ctx
->opcode
)]);
2265 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)]);
2266 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2267 gen_set_cr1_from_fpscr(ctx
);
2271 /*** Floating-Point multiply-and-add ***/
2272 /* fmadd - fmadds */
2273 GEN_FLOAT_ACB(madd
, 0x1D, 1, PPC_FLOAT
);
2274 /* fmsub - fmsubs */
2275 GEN_FLOAT_ACB(msub
, 0x1C, 1, PPC_FLOAT
);
2276 /* fnmadd - fnmadds */
2277 GEN_FLOAT_ACB(nmadd
, 0x1F, 1, PPC_FLOAT
);
2278 /* fnmsub - fnmsubs */
2279 GEN_FLOAT_ACB(nmsub
, 0x1E, 1, PPC_FLOAT
);
2281 /*** Floating-Point round & convert ***/
2283 GEN_FLOAT_B(ctiw
, 0x0E, 0x00, 0, PPC_FLOAT
);
2285 GEN_FLOAT_B(ctiwu
, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206
);
2287 GEN_FLOAT_B(ctiwz
, 0x0F, 0x00, 0, PPC_FLOAT
);
2289 GEN_FLOAT_B(ctiwuz
, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206
);
2291 GEN_FLOAT_B(rsp
, 0x0C, 0x00, 1, PPC_FLOAT
);
2293 GEN_FLOAT_B(cfid
, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64
);
2295 GEN_FLOAT_B(cfids
, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206
);
2297 GEN_FLOAT_B(cfidu
, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206
);
2299 GEN_FLOAT_B(cfidus
, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206
);
2301 GEN_FLOAT_B(ctid
, 0x0E, 0x19, 0, PPC2_FP_CVT_S64
);
2303 GEN_FLOAT_B(ctidu
, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206
);
2305 GEN_FLOAT_B(ctidz
, 0x0F, 0x19, 0, PPC2_FP_CVT_S64
);
2307 GEN_FLOAT_B(ctiduz
, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206
);
2310 GEN_FLOAT_B(rin
, 0x08, 0x0C, 1, PPC_FLOAT_EXT
);
2312 GEN_FLOAT_B(riz
, 0x08, 0x0D, 1, PPC_FLOAT_EXT
);
2314 GEN_FLOAT_B(rip
, 0x08, 0x0E, 1, PPC_FLOAT_EXT
);
2316 GEN_FLOAT_B(rim
, 0x08, 0x0F, 1, PPC_FLOAT_EXT
);
2318 static void gen_ftdiv(DisasContext
*ctx
)
2320 if (unlikely(!ctx
->fpu_enabled
)) {
2321 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2324 gen_helper_ftdiv(cpu_crf
[crfD(ctx
->opcode
)], cpu_fpr
[rA(ctx
->opcode
)],
2325 cpu_fpr
[rB(ctx
->opcode
)]);
2328 static void gen_ftsqrt(DisasContext
*ctx
)
2330 if (unlikely(!ctx
->fpu_enabled
)) {
2331 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2334 gen_helper_ftsqrt(cpu_crf
[crfD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)]);
2339 /*** Floating-Point compare ***/
2342 static void gen_fcmpo(DisasContext
*ctx
)
2345 if (unlikely(!ctx
->fpu_enabled
)) {
2346 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2349 /* NIP cannot be restored if the memory exception comes from an helper */
2350 gen_update_nip(ctx
, ctx
->nip
- 4);
2351 gen_reset_fpstatus();
2352 crf
= tcg_const_i32(crfD(ctx
->opcode
));
2353 gen_helper_fcmpo(cpu_env
, cpu_fpr
[rA(ctx
->opcode
)],
2354 cpu_fpr
[rB(ctx
->opcode
)], crf
);
2355 tcg_temp_free_i32(crf
);
2356 gen_helper_float_check_status(cpu_env
);
2360 static void gen_fcmpu(DisasContext
*ctx
)
2363 if (unlikely(!ctx
->fpu_enabled
)) {
2364 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2367 /* NIP cannot be restored if the memory exception comes from an helper */
2368 gen_update_nip(ctx
, ctx
->nip
- 4);
2369 gen_reset_fpstatus();
2370 crf
= tcg_const_i32(crfD(ctx
->opcode
));
2371 gen_helper_fcmpu(cpu_env
, cpu_fpr
[rA(ctx
->opcode
)],
2372 cpu_fpr
[rB(ctx
->opcode
)], crf
);
2373 tcg_temp_free_i32(crf
);
2374 gen_helper_float_check_status(cpu_env
);
2377 /*** Floating-point move ***/
2379 /* XXX: beware that fabs never checks for NaNs nor update FPSCR */
2380 static void gen_fabs(DisasContext
*ctx
)
2382 if (unlikely(!ctx
->fpu_enabled
)) {
2383 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2386 tcg_gen_andi_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)],
2388 if (unlikely(Rc(ctx
->opcode
))) {
2389 gen_set_cr1_from_fpscr(ctx
);
2394 /* XXX: beware that fmr never checks for NaNs nor update FPSCR */
2395 static void gen_fmr(DisasContext
*ctx
)
2397 if (unlikely(!ctx
->fpu_enabled
)) {
2398 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2401 tcg_gen_mov_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)]);
2402 if (unlikely(Rc(ctx
->opcode
))) {
2403 gen_set_cr1_from_fpscr(ctx
);
2408 /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
2409 static void gen_fnabs(DisasContext
*ctx
)
2411 if (unlikely(!ctx
->fpu_enabled
)) {
2412 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2415 tcg_gen_ori_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)],
2417 if (unlikely(Rc(ctx
->opcode
))) {
2418 gen_set_cr1_from_fpscr(ctx
);
2423 /* XXX: beware that fneg never checks for NaNs nor update FPSCR */
2424 static void gen_fneg(DisasContext
*ctx
)
2426 if (unlikely(!ctx
->fpu_enabled
)) {
2427 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2430 tcg_gen_xori_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)],
2432 if (unlikely(Rc(ctx
->opcode
))) {
2433 gen_set_cr1_from_fpscr(ctx
);
2437 /* fcpsgn: PowerPC 2.05 specification */
2438 /* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */
2439 static void gen_fcpsgn(DisasContext
*ctx
)
2441 if (unlikely(!ctx
->fpu_enabled
)) {
2442 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2445 tcg_gen_deposit_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rA(ctx
->opcode
)],
2446 cpu_fpr
[rB(ctx
->opcode
)], 0, 63);
2447 if (unlikely(Rc(ctx
->opcode
))) {
2448 gen_set_cr1_from_fpscr(ctx
);
2452 static void gen_fmrgew(DisasContext
*ctx
)
2455 if (unlikely(!ctx
->fpu_enabled
)) {
2456 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2459 b0
= tcg_temp_new_i64();
2460 tcg_gen_shri_i64(b0
, cpu_fpr
[rB(ctx
->opcode
)], 32);
2461 tcg_gen_deposit_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rA(ctx
->opcode
)],
2463 tcg_temp_free_i64(b0
);
2466 static void gen_fmrgow(DisasContext
*ctx
)
2468 if (unlikely(!ctx
->fpu_enabled
)) {
2469 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2472 tcg_gen_deposit_i64(cpu_fpr
[rD(ctx
->opcode
)],
2473 cpu_fpr
[rB(ctx
->opcode
)],
2474 cpu_fpr
[rA(ctx
->opcode
)],
2478 /*** Floating-Point status & ctrl register ***/
2481 static void gen_mcrfs(DisasContext
*ctx
)
2483 TCGv tmp
= tcg_temp_new();
2485 TCGv_i64 tnew_fpscr
= tcg_temp_new_i64();
2490 if (unlikely(!ctx
->fpu_enabled
)) {
2491 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2494 bfa
= crfS(ctx
->opcode
);
2497 tcg_gen_shri_tl(tmp
, cpu_fpscr
, shift
);
2498 tcg_gen_trunc_tl_i32(cpu_crf
[crfD(ctx
->opcode
)], tmp
);
2499 tcg_gen_andi_i32(cpu_crf
[crfD(ctx
->opcode
)], cpu_crf
[crfD(ctx
->opcode
)], 0xf);
2501 tcg_gen_extu_tl_i64(tnew_fpscr
, cpu_fpscr
);
2502 /* Only the exception bits (including FX) should be cleared if read */
2503 tcg_gen_andi_i64(tnew_fpscr
, tnew_fpscr
, ~((0xF << shift
) & FP_EX_CLEAR_BITS
));
2504 /* FEX and VX need to be updated, so don't set fpscr directly */
2505 tmask
= tcg_const_i32(1 << nibble
);
2506 gen_helper_store_fpscr(cpu_env
, tnew_fpscr
, tmask
);
2507 tcg_temp_free_i32(tmask
);
2508 tcg_temp_free_i64(tnew_fpscr
);
2512 static void gen_mffs(DisasContext
*ctx
)
2514 if (unlikely(!ctx
->fpu_enabled
)) {
2515 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2518 gen_reset_fpstatus();
2519 tcg_gen_extu_tl_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpscr
);
2520 if (unlikely(Rc(ctx
->opcode
))) {
2521 gen_set_cr1_from_fpscr(ctx
);
2526 static void gen_mtfsb0(DisasContext
*ctx
)
2530 if (unlikely(!ctx
->fpu_enabled
)) {
2531 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2534 crb
= 31 - crbD(ctx
->opcode
);
2535 gen_reset_fpstatus();
2536 if (likely(crb
!= FPSCR_FEX
&& crb
!= FPSCR_VX
)) {
2538 /* NIP cannot be restored if the memory exception comes from an helper */
2539 gen_update_nip(ctx
, ctx
->nip
- 4);
2540 t0
= tcg_const_i32(crb
);
2541 gen_helper_fpscr_clrbit(cpu_env
, t0
);
2542 tcg_temp_free_i32(t0
);
2544 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2545 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2546 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2551 static void gen_mtfsb1(DisasContext
*ctx
)
2555 if (unlikely(!ctx
->fpu_enabled
)) {
2556 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2559 crb
= 31 - crbD(ctx
->opcode
);
2560 gen_reset_fpstatus();
2561 /* XXX: we pretend we can only do IEEE floating-point computations */
2562 if (likely(crb
!= FPSCR_FEX
&& crb
!= FPSCR_VX
&& crb
!= FPSCR_NI
)) {
2564 /* NIP cannot be restored if the memory exception comes from an helper */
2565 gen_update_nip(ctx
, ctx
->nip
- 4);
2566 t0
= tcg_const_i32(crb
);
2567 gen_helper_fpscr_setbit(cpu_env
, t0
);
2568 tcg_temp_free_i32(t0
);
2570 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2571 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2572 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2574 /* We can raise a differed exception */
2575 gen_helper_float_check_status(cpu_env
);
2579 static void gen_mtfsf(DisasContext
*ctx
)
2584 if (unlikely(!ctx
->fpu_enabled
)) {
2585 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2588 flm
= FPFLM(ctx
->opcode
);
2589 l
= FPL(ctx
->opcode
);
2590 w
= FPW(ctx
->opcode
);
2591 if (unlikely(w
& !(ctx
->insns_flags2
& PPC2_ISA205
))) {
2592 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2595 /* NIP cannot be restored if the memory exception comes from an helper */
2596 gen_update_nip(ctx
, ctx
->nip
- 4);
2597 gen_reset_fpstatus();
2599 t0
= tcg_const_i32((ctx
->insns_flags2
& PPC2_ISA205
) ?
0xffff : 0xff);
2601 t0
= tcg_const_i32(flm
<< (w
* 8));
2603 gen_helper_store_fpscr(cpu_env
, cpu_fpr
[rB(ctx
->opcode
)], t0
);
2604 tcg_temp_free_i32(t0
);
2605 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2606 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2607 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2609 /* We can raise a differed exception */
2610 gen_helper_float_check_status(cpu_env
);
2614 static void gen_mtfsfi(DisasContext
*ctx
)
2620 if (unlikely(!ctx
->fpu_enabled
)) {
2621 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2624 w
= FPW(ctx
->opcode
);
2625 bf
= FPBF(ctx
->opcode
);
2626 if (unlikely(w
& !(ctx
->insns_flags2
& PPC2_ISA205
))) {
2627 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2630 sh
= (8 * w
) + 7 - bf
;
2631 /* NIP cannot be restored if the memory exception comes from an helper */
2632 gen_update_nip(ctx
, ctx
->nip
- 4);
2633 gen_reset_fpstatus();
2634 t0
= tcg_const_i64(((uint64_t)FPIMM(ctx
->opcode
)) << (4 * sh
));
2635 t1
= tcg_const_i32(1 << sh
);
2636 gen_helper_store_fpscr(cpu_env
, t0
, t1
);
2637 tcg_temp_free_i64(t0
);
2638 tcg_temp_free_i32(t1
);
2639 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2640 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2641 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2643 /* We can raise a differed exception */
2644 gen_helper_float_check_status(cpu_env
);
2647 /*** Addressing modes ***/
2648 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2649 static inline void gen_addr_imm_index(DisasContext
*ctx
, TCGv EA
,
2652 target_long simm
= SIMM(ctx
->opcode
);
2655 if (rA(ctx
->opcode
) == 0) {
2656 if (NARROW_MODE(ctx
)) {
2657 simm
= (uint32_t)simm
;
2659 tcg_gen_movi_tl(EA
, simm
);
2660 } else if (likely(simm
!= 0)) {
2661 tcg_gen_addi_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)], simm
);
2662 if (NARROW_MODE(ctx
)) {
2663 tcg_gen_ext32u_tl(EA
, EA
);
2666 if (NARROW_MODE(ctx
)) {
2667 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2669 tcg_gen_mov_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2674 static inline void gen_addr_reg_index(DisasContext
*ctx
, TCGv EA
)
2676 if (rA(ctx
->opcode
) == 0) {
2677 if (NARROW_MODE(ctx
)) {
2678 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rB(ctx
->opcode
)]);
2680 tcg_gen_mov_tl(EA
, cpu_gpr
[rB(ctx
->opcode
)]);
2683 tcg_gen_add_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
2684 if (NARROW_MODE(ctx
)) {
2685 tcg_gen_ext32u_tl(EA
, EA
);
2690 static inline void gen_addr_register(DisasContext
*ctx
, TCGv EA
)
2692 if (rA(ctx
->opcode
) == 0) {
2693 tcg_gen_movi_tl(EA
, 0);
2694 } else if (NARROW_MODE(ctx
)) {
2695 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2697 tcg_gen_mov_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2701 static inline void gen_addr_add(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
2704 tcg_gen_addi_tl(ret
, arg1
, val
);
2705 if (NARROW_MODE(ctx
)) {
2706 tcg_gen_ext32u_tl(ret
, ret
);
2710 static inline void gen_check_align(DisasContext
*ctx
, TCGv EA
, int mask
)
2712 TCGLabel
*l1
= gen_new_label();
2713 TCGv t0
= tcg_temp_new();
2715 /* NIP cannot be restored if the memory exception comes from an helper */
2716 gen_update_nip(ctx
, ctx
->nip
- 4);
2717 tcg_gen_andi_tl(t0
, EA
, mask
);
2718 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, l1
);
2719 t1
= tcg_const_i32(POWERPC_EXCP_ALIGN
);
2720 t2
= tcg_const_i32(0);
2721 gen_helper_raise_exception_err(cpu_env
, t1
, t2
);
2722 tcg_temp_free_i32(t1
);
2723 tcg_temp_free_i32(t2
);
2728 /*** Integer load ***/
2729 static inline void gen_qemu_ld8u(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2731 tcg_gen_qemu_ld8u(arg1
, arg2
, ctx
->mem_idx
);
2734 static inline void gen_qemu_ld16u(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2736 TCGMemOp op
= MO_UW
| ctx
->default_tcg_memop_mask
;
2737 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2740 static inline void gen_qemu_ld16s(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2742 TCGMemOp op
= MO_SW
| ctx
->default_tcg_memop_mask
;
2743 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2746 static inline void gen_qemu_ld32u(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2748 TCGMemOp op
= MO_UL
| ctx
->default_tcg_memop_mask
;
2749 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2752 static void gen_qemu_ld32u_i64(DisasContext
*ctx
, TCGv_i64 val
, TCGv addr
)
2754 TCGv tmp
= tcg_temp_new();
2755 gen_qemu_ld32u(ctx
, tmp
, addr
);
2756 tcg_gen_extu_tl_i64(val
, tmp
);
2760 static inline void gen_qemu_ld32s(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2762 TCGMemOp op
= MO_SL
| ctx
->default_tcg_memop_mask
;
2763 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2766 static void gen_qemu_ld32s_i64(DisasContext
*ctx
, TCGv_i64 val
, TCGv addr
)
2768 TCGv tmp
= tcg_temp_new();
2769 gen_qemu_ld32s(ctx
, tmp
, addr
);
2770 tcg_gen_ext_tl_i64(val
, tmp
);
2774 static inline void gen_qemu_ld64(DisasContext
*ctx
, TCGv_i64 arg1
, TCGv arg2
)
2776 TCGMemOp op
= MO_Q
| ctx
->default_tcg_memop_mask
;
2777 tcg_gen_qemu_ld_i64(arg1
, arg2
, ctx
->mem_idx
, op
);
2780 static inline void gen_qemu_st8(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2782 tcg_gen_qemu_st8(arg1
, arg2
, ctx
->mem_idx
);
2785 static inline void gen_qemu_st16(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2787 TCGMemOp op
= MO_UW
| ctx
->default_tcg_memop_mask
;
2788 tcg_gen_qemu_st_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2791 static inline void gen_qemu_st32(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2793 TCGMemOp op
= MO_UL
| ctx
->default_tcg_memop_mask
;
2794 tcg_gen_qemu_st_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
2797 static void gen_qemu_st32_i64(DisasContext
*ctx
, TCGv_i64 val
, TCGv addr
)
2799 TCGv tmp
= tcg_temp_new();
2800 tcg_gen_trunc_i64_tl(tmp
, val
);
2801 gen_qemu_st32(ctx
, tmp
, addr
);
2805 static inline void gen_qemu_st64(DisasContext
*ctx
, TCGv_i64 arg1
, TCGv arg2
)
2807 TCGMemOp op
= MO_Q
| ctx
->default_tcg_memop_mask
;
2808 tcg_gen_qemu_st_i64(arg1
, arg2
, ctx
->mem_idx
, op
);
2811 #define GEN_LD(name, ldop, opc, type) \
2812 static void glue(gen_, name)(DisasContext *ctx) \
2815 gen_set_access_type(ctx, ACCESS_INT); \
2816 EA = tcg_temp_new(); \
2817 gen_addr_imm_index(ctx, EA, 0); \
2818 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2819 tcg_temp_free(EA); \
2822 #define GEN_LDU(name, ldop, opc, type) \
2823 static void glue(gen_, name##u)(DisasContext *ctx) \
2826 if (unlikely(rA(ctx->opcode) == 0 || \
2827 rA(ctx->opcode) == rD(ctx->opcode))) { \
2828 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2831 gen_set_access_type(ctx, ACCESS_INT); \
2832 EA = tcg_temp_new(); \
2833 if (type == PPC_64B) \
2834 gen_addr_imm_index(ctx, EA, 0x03); \
2836 gen_addr_imm_index(ctx, EA, 0); \
2837 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2838 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2839 tcg_temp_free(EA); \
2842 #define GEN_LDUX(name, ldop, opc2, opc3, type) \
2843 static void glue(gen_, name##ux)(DisasContext *ctx) \
2846 if (unlikely(rA(ctx->opcode) == 0 || \
2847 rA(ctx->opcode) == rD(ctx->opcode))) { \
2848 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2851 gen_set_access_type(ctx, ACCESS_INT); \
2852 EA = tcg_temp_new(); \
2853 gen_addr_reg_index(ctx, EA); \
2854 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2855 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2856 tcg_temp_free(EA); \
2859 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2) \
2860 static void glue(gen_, name##x)(DisasContext *ctx) \
2863 gen_set_access_type(ctx, ACCESS_INT); \
2864 EA = tcg_temp_new(); \
2865 gen_addr_reg_index(ctx, EA); \
2866 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2867 tcg_temp_free(EA); \
2869 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2870 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE)
2872 #define GEN_LDS(name, ldop, op, type) \
2873 GEN_LD(name, ldop, op | 0x20, type); \
2874 GEN_LDU(name, ldop, op | 0x21, type); \
2875 GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
2876 GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2878 /* lbz lbzu lbzux lbzx */
2879 GEN_LDS(lbz
, ld8u
, 0x02, PPC_INTEGER
);
2880 /* lha lhau lhaux lhax */
2881 GEN_LDS(lha
, ld16s
, 0x0A, PPC_INTEGER
);
2882 /* lhz lhzu lhzux lhzx */
2883 GEN_LDS(lhz
, ld16u
, 0x08, PPC_INTEGER
);
2884 /* lwz lwzu lwzux lwzx */
2885 GEN_LDS(lwz
, ld32u
, 0x00, PPC_INTEGER
);
2886 #if defined(TARGET_PPC64)
2888 GEN_LDUX(lwa
, ld32s
, 0x15, 0x0B, PPC_64B
);
2890 GEN_LDX(lwa
, ld32s
, 0x15, 0x0A, PPC_64B
);
2892 GEN_LDUX(ld
, ld64
, 0x15, 0x01, PPC_64B
);
2894 GEN_LDX(ld
, ld64
, 0x15, 0x00, PPC_64B
);
2896 static void gen_ld(DisasContext
*ctx
)
2899 if (Rc(ctx
->opcode
)) {
2900 if (unlikely(rA(ctx
->opcode
) == 0 ||
2901 rA(ctx
->opcode
) == rD(ctx
->opcode
))) {
2902 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2906 gen_set_access_type(ctx
, ACCESS_INT
);
2907 EA
= tcg_temp_new();
2908 gen_addr_imm_index(ctx
, EA
, 0x03);
2909 if (ctx
->opcode
& 0x02) {
2910 /* lwa (lwau is undefined) */
2911 gen_qemu_ld32s(ctx
, cpu_gpr
[rD(ctx
->opcode
)], EA
);
2914 gen_qemu_ld64(ctx
, cpu_gpr
[rD(ctx
->opcode
)], EA
);
2916 if (Rc(ctx
->opcode
))
2917 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], EA
);
2922 static void gen_lq(DisasContext
*ctx
)
2927 /* lq is a legal user mode instruction starting in ISA 2.07 */
2928 bool legal_in_user_mode
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
2929 bool le_is_supported
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
2931 if (!legal_in_user_mode
&& ctx
->pr
) {
2932 gen_inval_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
2936 if (!le_is_supported
&& ctx
->le_mode
) {
2937 gen_exception_err(ctx
, POWERPC_EXCP_ALIGN
, POWERPC_EXCP_ALIGN_LE
);
2941 ra
= rA(ctx
->opcode
);
2942 rd
= rD(ctx
->opcode
);
2943 if (unlikely((rd
& 1) || rd
== ra
)) {
2944 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2948 gen_set_access_type(ctx
, ACCESS_INT
);
2949 EA
= tcg_temp_new();
2950 gen_addr_imm_index(ctx
, EA
, 0x0F);
2952 /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
2953 64-bit byteswap already. */
2954 if (unlikely(ctx
->le_mode
)) {
2955 gen_qemu_ld64(ctx
, cpu_gpr
[rd
+1], EA
);
2956 gen_addr_add(ctx
, EA
, EA
, 8);
2957 gen_qemu_ld64(ctx
, cpu_gpr
[rd
], EA
);
2959 gen_qemu_ld64(ctx
, cpu_gpr
[rd
], EA
);
2960 gen_addr_add(ctx
, EA
, EA
, 8);
2961 gen_qemu_ld64(ctx
, cpu_gpr
[rd
+1], EA
);
2967 /*** Integer store ***/
2968 #define GEN_ST(name, stop, opc, type) \
2969 static void glue(gen_, name)(DisasContext *ctx) \
2972 gen_set_access_type(ctx, ACCESS_INT); \
2973 EA = tcg_temp_new(); \
2974 gen_addr_imm_index(ctx, EA, 0); \
2975 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2976 tcg_temp_free(EA); \
2979 #define GEN_STU(name, stop, opc, type) \
2980 static void glue(gen_, stop##u)(DisasContext *ctx) \
2983 if (unlikely(rA(ctx->opcode) == 0)) { \
2984 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2987 gen_set_access_type(ctx, ACCESS_INT); \
2988 EA = tcg_temp_new(); \
2989 if (type == PPC_64B) \
2990 gen_addr_imm_index(ctx, EA, 0x03); \
2992 gen_addr_imm_index(ctx, EA, 0); \
2993 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2994 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2995 tcg_temp_free(EA); \
2998 #define GEN_STUX(name, stop, opc2, opc3, type) \
2999 static void glue(gen_, name##ux)(DisasContext *ctx) \
3002 if (unlikely(rA(ctx->opcode) == 0)) { \
3003 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3006 gen_set_access_type(ctx, ACCESS_INT); \
3007 EA = tcg_temp_new(); \
3008 gen_addr_reg_index(ctx, EA); \
3009 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3010 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3011 tcg_temp_free(EA); \
3014 #define GEN_STX_E(name, stop, opc2, opc3, type, type2) \
3015 static void glue(gen_, name##x)(DisasContext *ctx) \
3018 gen_set_access_type(ctx, ACCESS_INT); \
3019 EA = tcg_temp_new(); \
3020 gen_addr_reg_index(ctx, EA); \
3021 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3022 tcg_temp_free(EA); \
3024 #define GEN_STX(name, stop, opc2, opc3, type) \
3025 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE)
3027 #define GEN_STS(name, stop, op, type) \
3028 GEN_ST(name, stop, op | 0x20, type); \
3029 GEN_STU(name, stop, op | 0x21, type); \
3030 GEN_STUX(name, stop, 0x17, op | 0x01, type); \
3031 GEN_STX(name, stop, 0x17, op | 0x00, type)
3033 /* stb stbu stbux stbx */
3034 GEN_STS(stb
, st8
, 0x06, PPC_INTEGER
);
3035 /* sth sthu sthux sthx */
3036 GEN_STS(sth
, st16
, 0x0C, PPC_INTEGER
);
3037 /* stw stwu stwux stwx */
3038 GEN_STS(stw
, st32
, 0x04, PPC_INTEGER
);
3039 #if defined(TARGET_PPC64)
3040 GEN_STUX(std
, st64
, 0x15, 0x05, PPC_64B
);
3041 GEN_STX(std
, st64
, 0x15, 0x04, PPC_64B
);
3043 static void gen_std(DisasContext
*ctx
)
3048 rs
= rS(ctx
->opcode
);
3049 if ((ctx
->opcode
& 0x3) == 0x2) { /* stq */
3050 bool legal_in_user_mode
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
3051 bool le_is_supported
= (ctx
->insns_flags2
& PPC2_LSQ_ISA207
) != 0;
3053 if (!(ctx
->insns_flags
& PPC_64BX
)) {
3054 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
3057 if (!legal_in_user_mode
&& ctx
->pr
) {
3058 gen_inval_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
3062 if (!le_is_supported
&& ctx
->le_mode
) {
3063 gen_exception_err(ctx
, POWERPC_EXCP_ALIGN
, POWERPC_EXCP_ALIGN_LE
);
3067 if (unlikely(rs
& 1)) {
3068 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
3071 gen_set_access_type(ctx
, ACCESS_INT
);
3072 EA
= tcg_temp_new();
3073 gen_addr_imm_index(ctx
, EA
, 0x03);
3075 /* We only need to swap high and low halves. gen_qemu_st64 does
3076 necessary 64-bit byteswap already. */
3077 if (unlikely(ctx
->le_mode
)) {
3078 gen_qemu_st64(ctx
, cpu_gpr
[rs
+1], EA
);
3079 gen_addr_add(ctx
, EA
, EA
, 8);
3080 gen_qemu_st64(ctx
, cpu_gpr
[rs
], EA
);
3082 gen_qemu_st64(ctx
, cpu_gpr
[rs
], EA
);
3083 gen_addr_add(ctx
, EA
, EA
, 8);
3084 gen_qemu_st64(ctx
, cpu_gpr
[rs
+1], EA
);
3089 if (Rc(ctx
->opcode
)) {
3090 if (unlikely(rA(ctx
->opcode
) == 0)) {
3091 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
3095 gen_set_access_type(ctx
, ACCESS_INT
);
3096 EA
= tcg_temp_new();
3097 gen_addr_imm_index(ctx
, EA
, 0x03);
3098 gen_qemu_st64(ctx
, cpu_gpr
[rs
], EA
);
3099 if (Rc(ctx
->opcode
))
3100 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], EA
);
3105 /*** Integer load and store with byte reverse ***/
3108 static inline void gen_qemu_ld16ur(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
3110 TCGMemOp op
= MO_UW
| (ctx
->default_tcg_memop_mask
^ MO_BSWAP
);
3111 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
3113 GEN_LDX(lhbr
, ld16ur
, 0x16, 0x18, PPC_INTEGER
);
3116 static inline void gen_qemu_ld32ur(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
3118 TCGMemOp op
= MO_UL
| (ctx
->default_tcg_memop_mask
^ MO_BSWAP
);
3119 tcg_gen_qemu_ld_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
3121 GEN_LDX(lwbr
, ld32ur
, 0x16, 0x10, PPC_INTEGER
);
3123 #if defined(TARGET_PPC64)
3125 static inline void gen_qemu_ld64ur(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
3127 TCGMemOp op
= MO_Q
| (ctx
->default_tcg_memop_mask
^ MO_BSWAP
);
3128 tcg_gen_qemu_ld_i64(arg1
, arg2
, ctx
->mem_idx
, op
);
3130 GEN_LDX_E(ldbr
, ld64ur
, 0x14, 0x10, PPC_NONE
, PPC2_DBRX
);
3131 #endif /* TARGET_PPC64 */
3134 static inline void gen_qemu_st16r(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
3136 TCGMemOp op
= MO_UW
| (ctx
->default_tcg_memop_mask
^ MO_BSWAP
);
3137 tcg_gen_qemu_st_tl(arg1
, arg2
, ctx
->mem_idx
, op
);
3139 GEN_STX(sthbr
, st16r