2 * PowerPC emulation for qemu: main translation routines.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
30 #define CPU_SINGLE_STEP 0x1
31 #define CPU_BRANCH_STEP 0x2
32 #define GDBSTUB_SINGLE_STEP 0x4
34 /* Include definitions for instructions classes and implementations flags */
35 //#define PPC_DEBUG_DISAS
36 //#define DO_PPC_STATISTICS
38 #ifdef PPC_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
43 /*****************************************************************************/
44 /* Code translation helpers */
46 /* global register indexes */
47 static TCGv_ptr cpu_env
;
48 static char cpu_reg_names
[10*3 + 22*4 /* GPR */
49 #if !defined(TARGET_PPC64)
50 + 10*4 + 22*5 /* SPE GPRh */
52 + 10*4 + 22*5 /* FPR */
53 + 2*(10*6 + 22*7) /* AVRh, AVRl */
55 static TCGv cpu_gpr
[32];
56 #if !defined(TARGET_PPC64)
57 static TCGv cpu_gprh
[32];
59 static TCGv_i64 cpu_fpr
[32];
60 static TCGv_i64 cpu_avrh
[32], cpu_avrl
[32];
61 static TCGv_i32 cpu_crf
[8];
66 #if defined(TARGET_PPC64)
69 static TCGv cpu_xer
, cpu_so
, cpu_ov
, cpu_ca
;
70 static TCGv cpu_reserve
;
71 static TCGv cpu_fpscr
;
72 static TCGv_i32 cpu_access_type
;
74 #include "exec/gen-icount.h"
76 void ppc_translate_init(void)
80 size_t cpu_reg_names_size
;
81 static int done_init
= 0;
86 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
89 cpu_reg_names_size
= sizeof(cpu_reg_names
);
91 for (i
= 0; i
< 8; i
++) {
92 snprintf(p
, cpu_reg_names_size
, "crf%d", i
);
93 cpu_crf
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
94 offsetof(CPUPPCState
, crf
[i
]), p
);
96 cpu_reg_names_size
-= 5;
99 for (i
= 0; i
< 32; i
++) {
100 snprintf(p
, cpu_reg_names_size
, "r%d", i
);
101 cpu_gpr
[i
] = tcg_global_mem_new(TCG_AREG0
,
102 offsetof(CPUPPCState
, gpr
[i
]), p
);
103 p
+= (i
< 10) ?
3 : 4;
104 cpu_reg_names_size
-= (i
< 10) ?
3 : 4;
105 #if !defined(TARGET_PPC64)
106 snprintf(p
, cpu_reg_names_size
, "r%dH", i
);
107 cpu_gprh
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
108 offsetof(CPUPPCState
, gprh
[i
]), p
);
109 p
+= (i
< 10) ?
4 : 5;
110 cpu_reg_names_size
-= (i
< 10) ?
4 : 5;
113 snprintf(p
, cpu_reg_names_size
, "fp%d", i
);
114 cpu_fpr
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
115 offsetof(CPUPPCState
, fpr
[i
]), p
);
116 p
+= (i
< 10) ?
4 : 5;
117 cpu_reg_names_size
-= (i
< 10) ?
4 : 5;
119 snprintf(p
, cpu_reg_names_size
, "avr%dH", i
);
120 #ifdef HOST_WORDS_BIGENDIAN
121 cpu_avrh
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
122 offsetof(CPUPPCState
, avr
[i
].u64
[0]), p
);
124 cpu_avrh
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
125 offsetof(CPUPPCState
, avr
[i
].u64
[1]), p
);
127 p
+= (i
< 10) ?
6 : 7;
128 cpu_reg_names_size
-= (i
< 10) ?
6 : 7;
130 snprintf(p
, cpu_reg_names_size
, "avr%dL", i
);
131 #ifdef HOST_WORDS_BIGENDIAN
132 cpu_avrl
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
133 offsetof(CPUPPCState
, avr
[i
].u64
[1]), p
);
135 cpu_avrl
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
136 offsetof(CPUPPCState
, avr
[i
].u64
[0]), p
);
138 p
+= (i
< 10) ?
6 : 7;
139 cpu_reg_names_size
-= (i
< 10) ?
6 : 7;
142 cpu_nip
= tcg_global_mem_new(TCG_AREG0
,
143 offsetof(CPUPPCState
, nip
), "nip");
145 cpu_msr
= tcg_global_mem_new(TCG_AREG0
,
146 offsetof(CPUPPCState
, msr
), "msr");
148 cpu_ctr
= tcg_global_mem_new(TCG_AREG0
,
149 offsetof(CPUPPCState
, ctr
), "ctr");
151 cpu_lr
= tcg_global_mem_new(TCG_AREG0
,
152 offsetof(CPUPPCState
, lr
), "lr");
154 #if defined(TARGET_PPC64)
155 cpu_cfar
= tcg_global_mem_new(TCG_AREG0
,
156 offsetof(CPUPPCState
, cfar
), "cfar");
159 cpu_xer
= tcg_global_mem_new(TCG_AREG0
,
160 offsetof(CPUPPCState
, xer
), "xer");
161 cpu_so
= tcg_global_mem_new(TCG_AREG0
,
162 offsetof(CPUPPCState
, so
), "SO");
163 cpu_ov
= tcg_global_mem_new(TCG_AREG0
,
164 offsetof(CPUPPCState
, ov
), "OV");
165 cpu_ca
= tcg_global_mem_new(TCG_AREG0
,
166 offsetof(CPUPPCState
, ca
), "CA");
168 cpu_reserve
= tcg_global_mem_new(TCG_AREG0
,
169 offsetof(CPUPPCState
, reserve_addr
),
172 cpu_fpscr
= tcg_global_mem_new(TCG_AREG0
,
173 offsetof(CPUPPCState
, fpscr
), "fpscr");
175 cpu_access_type
= tcg_global_mem_new_i32(TCG_AREG0
,
176 offsetof(CPUPPCState
, access_type
), "access_type");
178 /* register helpers */
185 /* internal defines */
186 typedef struct DisasContext
{
187 struct TranslationBlock
*tb
;
191 /* Routine used to access memory */
194 /* Translation flags */
196 #if defined(TARGET_PPC64)
203 ppc_spr_t
*spr_cb
; /* Needed to check rights for mfspr/mtspr */
204 int singlestep_enabled
;
205 uint64_t insns_flags
;
206 uint64_t insns_flags2
;
209 /* True when active word size < size of target_long. */
211 # define NARROW_MODE(C) (!(C)->sf_mode)
213 # define NARROW_MODE(C) 0
216 struct opc_handler_t
{
217 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
219 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
221 /* instruction type */
223 /* extended instruction type */
226 void (*handler
)(DisasContext
*ctx
);
227 #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
230 #if defined(DO_PPC_STATISTICS)
235 static inline void gen_reset_fpstatus(void)
237 gen_helper_reset_fpstatus(cpu_env
);
240 static inline void gen_compute_fprf(TCGv_i64 arg
, int set_fprf
, int set_rc
)
242 TCGv_i32 t0
= tcg_temp_new_i32();
245 /* This case might be optimized later */
246 tcg_gen_movi_i32(t0
, 1);
247 gen_helper_compute_fprf(t0
, cpu_env
, arg
, t0
);
248 if (unlikely(set_rc
)) {
249 tcg_gen_mov_i32(cpu_crf
[1], t0
);
251 gen_helper_float_check_status(cpu_env
);
252 } else if (unlikely(set_rc
)) {
253 /* We always need to compute fpcc */
254 tcg_gen_movi_i32(t0
, 0);
255 gen_helper_compute_fprf(t0
, cpu_env
, arg
, t0
);
256 tcg_gen_mov_i32(cpu_crf
[1], t0
);
259 tcg_temp_free_i32(t0
);
262 static inline void gen_set_access_type(DisasContext
*ctx
, int access_type
)
264 if (ctx
->access_type
!= access_type
) {
265 tcg_gen_movi_i32(cpu_access_type
, access_type
);
266 ctx
->access_type
= access_type
;
270 static inline void gen_update_nip(DisasContext
*ctx
, target_ulong nip
)
272 if (NARROW_MODE(ctx
)) {
275 tcg_gen_movi_tl(cpu_nip
, nip
);
278 static inline void gen_exception_err(DisasContext
*ctx
, uint32_t excp
, uint32_t error
)
281 if (ctx
->exception
== POWERPC_EXCP_NONE
) {
282 gen_update_nip(ctx
, ctx
->nip
);
284 t0
= tcg_const_i32(excp
);
285 t1
= tcg_const_i32(error
);
286 gen_helper_raise_exception_err(cpu_env
, t0
, t1
);
287 tcg_temp_free_i32(t0
);
288 tcg_temp_free_i32(t1
);
289 ctx
->exception
= (excp
);
292 static inline void gen_exception(DisasContext
*ctx
, uint32_t excp
)
295 if (ctx
->exception
== POWERPC_EXCP_NONE
) {
296 gen_update_nip(ctx
, ctx
->nip
);
298 t0
= tcg_const_i32(excp
);
299 gen_helper_raise_exception(cpu_env
, t0
);
300 tcg_temp_free_i32(t0
);
301 ctx
->exception
= (excp
);
304 static inline void gen_debug_exception(DisasContext
*ctx
)
308 if ((ctx
->exception
!= POWERPC_EXCP_BRANCH
) &&
309 (ctx
->exception
!= POWERPC_EXCP_SYNC
)) {
310 gen_update_nip(ctx
, ctx
->nip
);
312 t0
= tcg_const_i32(EXCP_DEBUG
);
313 gen_helper_raise_exception(cpu_env
, t0
);
314 tcg_temp_free_i32(t0
);
317 static inline void gen_inval_exception(DisasContext
*ctx
, uint32_t error
)
319 gen_exception_err(ctx
, POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_INVAL
| error
);
322 /* Stop translation */
323 static inline void gen_stop_exception(DisasContext
*ctx
)
325 gen_update_nip(ctx
, ctx
->nip
);
326 ctx
->exception
= POWERPC_EXCP_STOP
;
329 /* No need to update nip here, as execution flow will change */
330 static inline void gen_sync_exception(DisasContext
*ctx
)
332 ctx
->exception
= POWERPC_EXCP_SYNC
;
335 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
336 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
338 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
339 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
341 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
342 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
344 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
345 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
347 typedef struct opcode_t
{
348 unsigned char opc1
, opc2
, opc3
;
349 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
350 unsigned char pad
[5];
352 unsigned char pad
[1];
354 opc_handler_t handler
;
358 /*****************************************************************************/
359 /*** Instruction decoding ***/
360 #define EXTRACT_HELPER(name, shift, nb) \
361 static inline uint32_t name(uint32_t opcode) \
363 return (opcode >> (shift)) & ((1 << (nb)) - 1); \
366 #define EXTRACT_SHELPER(name, shift, nb) \
367 static inline int32_t name(uint32_t opcode) \
369 return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
373 EXTRACT_HELPER(opc1
, 26, 6);
375 EXTRACT_HELPER(opc2
, 1, 5);
377 EXTRACT_HELPER(opc3
, 6, 5);
378 /* Update Cr0 flags */
379 EXTRACT_HELPER(Rc
, 0, 1);
381 EXTRACT_HELPER(rD
, 21, 5);
383 EXTRACT_HELPER(rS
, 21, 5);
385 EXTRACT_HELPER(rA
, 16, 5);
387 EXTRACT_HELPER(rB
, 11, 5);
389 EXTRACT_HELPER(rC
, 6, 5);
391 EXTRACT_HELPER(crfD
, 23, 3);
392 EXTRACT_HELPER(crfS
, 18, 3);
393 EXTRACT_HELPER(crbD
, 21, 5);
394 EXTRACT_HELPER(crbA
, 16, 5);
395 EXTRACT_HELPER(crbB
, 11, 5);
397 EXTRACT_HELPER(_SPR
, 11, 10);
398 static inline uint32_t SPR(uint32_t opcode
)
400 uint32_t sprn
= _SPR(opcode
);
402 return ((sprn
>> 5) & 0x1F) | ((sprn
& 0x1F) << 5);
404 /*** Get constants ***/
405 EXTRACT_HELPER(IMM
, 12, 8);
406 /* 16 bits signed immediate value */
407 EXTRACT_SHELPER(SIMM
, 0, 16);
408 /* 16 bits unsigned immediate value */
409 EXTRACT_HELPER(UIMM
, 0, 16);
410 /* 5 bits signed immediate value */
411 EXTRACT_HELPER(SIMM5
, 16, 5);
412 /* 5 bits signed immediate value */
413 EXTRACT_HELPER(UIMM5
, 16, 5);
415 EXTRACT_HELPER(NB
, 11, 5);
417 EXTRACT_HELPER(SH
, 11, 5);
418 /* Vector shift count */
419 EXTRACT_HELPER(VSH
, 6, 4);
421 EXTRACT_HELPER(MB
, 6, 5);
423 EXTRACT_HELPER(ME
, 1, 5);
425 EXTRACT_HELPER(TO
, 21, 5);
427 EXTRACT_HELPER(CRM
, 12, 8);
428 EXTRACT_HELPER(SR
, 16, 4);
431 EXTRACT_HELPER(FPBF
, 19, 3);
432 EXTRACT_HELPER(FPIMM
, 12, 4);
433 EXTRACT_HELPER(FPL
, 21, 1);
434 EXTRACT_HELPER(FPFLM
, 17, 8);
435 EXTRACT_HELPER(FPW
, 16, 1);
437 /*** Jump target decoding ***/
439 EXTRACT_SHELPER(d
, 0, 16);
440 /* Immediate address */
441 static inline target_ulong
LI(uint32_t opcode
)
443 return (opcode
>> 0) & 0x03FFFFFC;
446 static inline uint32_t BD(uint32_t opcode
)
448 return (opcode
>> 0) & 0xFFFC;
451 EXTRACT_HELPER(BO
, 21, 5);
452 EXTRACT_HELPER(BI
, 16, 5);
453 /* Absolute/relative address */
454 EXTRACT_HELPER(AA
, 1, 1);
456 EXTRACT_HELPER(LK
, 0, 1);
458 /* Create a mask between <start> and <end> bits */
459 static inline target_ulong
MASK(uint32_t start
, uint32_t end
)
463 #if defined(TARGET_PPC64)
464 if (likely(start
== 0)) {
465 ret
= UINT64_MAX
<< (63 - end
);
466 } else if (likely(end
== 63)) {
467 ret
= UINT64_MAX
>> start
;
470 if (likely(start
== 0)) {
471 ret
= UINT32_MAX
<< (31 - end
);
472 } else if (likely(end
== 31)) {
473 ret
= UINT32_MAX
>> start
;
477 ret
= (((target_ulong
)(-1ULL)) >> (start
)) ^
478 (((target_ulong
)(-1ULL) >> (end
)) >> 1);
479 if (unlikely(start
> end
))
486 /*****************************************************************************/
487 /* PowerPC instructions table */
489 #if defined(DO_PPC_STATISTICS)
490 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
500 .handler = &gen_##name, \
501 .oname = stringify(name), \
503 .oname = stringify(name), \
505 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
516 .handler = &gen_##name, \
517 .oname = stringify(name), \
519 .oname = stringify(name), \
521 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
531 .handler = &gen_##name, \
537 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
547 .handler = &gen_##name, \
549 .oname = stringify(name), \
551 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
562 .handler = &gen_##name, \
564 .oname = stringify(name), \
566 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
576 .handler = &gen_##name, \
582 /* SPR load/store helpers */
583 static inline void gen_load_spr(TCGv t
, int reg
)
585 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUPPCState
, spr
[reg
]));
588 static inline void gen_store_spr(int reg
, TCGv t
)
590 tcg_gen_st_tl(t
, cpu_env
, offsetof(CPUPPCState
, spr
[reg
]));
593 /* Invalid instruction */
594 static void gen_invalid(DisasContext
*ctx
)
596 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
599 static opc_handler_t invalid_handler
= {
600 .inval1
= 0xFFFFFFFF,
601 .inval2
= 0xFFFFFFFF,
604 .handler
= gen_invalid
,
607 /*** Integer comparison ***/
609 static inline void gen_op_cmp(TCGv arg0
, TCGv arg1
, int s
, int crf
)
611 TCGv t0
= tcg_temp_new();
612 TCGv_i32 t1
= tcg_temp_new_i32();
614 tcg_gen_trunc_tl_i32(cpu_crf
[crf
], cpu_so
);
616 tcg_gen_setcond_tl((s ? TCG_COND_LT
: TCG_COND_LTU
), t0
, arg0
, arg1
);
617 tcg_gen_trunc_tl_i32(t1
, t0
);
618 tcg_gen_shli_i32(t1
, t1
, CRF_LT
);
619 tcg_gen_or_i32(cpu_crf
[crf
], cpu_crf
[crf
], t1
);
621 tcg_gen_setcond_tl((s ? TCG_COND_GT
: TCG_COND_GTU
), t0
, arg0
, arg1
);
622 tcg_gen_trunc_tl_i32(t1
, t0
);
623 tcg_gen_shli_i32(t1
, t1
, CRF_GT
);
624 tcg_gen_or_i32(cpu_crf
[crf
], cpu_crf
[crf
], t1
);
626 tcg_gen_setcond_tl(TCG_COND_EQ
, t0
, arg0
, arg1
);
627 tcg_gen_trunc_tl_i32(t1
, t0
);
628 tcg_gen_shli_i32(t1
, t1
, CRF_EQ
);
629 tcg_gen_or_i32(cpu_crf
[crf
], cpu_crf
[crf
], t1
);
632 tcg_temp_free_i32(t1
);
635 static inline void gen_op_cmpi(TCGv arg0
, target_ulong arg1
, int s
, int crf
)
637 TCGv t0
= tcg_const_tl(arg1
);
638 gen_op_cmp(arg0
, t0
, s
, crf
);
642 static inline void gen_op_cmp32(TCGv arg0
, TCGv arg1
, int s
, int crf
)
648 tcg_gen_ext32s_tl(t0
, arg0
);
649 tcg_gen_ext32s_tl(t1
, arg1
);
651 tcg_gen_ext32u_tl(t0
, arg0
);
652 tcg_gen_ext32u_tl(t1
, arg1
);
654 gen_op_cmp(t0
, t1
, s
, crf
);
659 static inline void gen_op_cmpi32(TCGv arg0
, target_ulong arg1
, int s
, int crf
)
661 TCGv t0
= tcg_const_tl(arg1
);
662 gen_op_cmp32(arg0
, t0
, s
, crf
);
666 static inline void gen_set_Rc0(DisasContext
*ctx
, TCGv reg
)
668 if (NARROW_MODE(ctx
)) {
669 gen_op_cmpi32(reg
, 0, 1, 0);
671 gen_op_cmpi(reg
, 0, 1, 0);
676 static void gen_cmp(DisasContext
*ctx
)
678 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
679 gen_op_cmp(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
680 1, crfD(ctx
->opcode
));
682 gen_op_cmp32(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
683 1, crfD(ctx
->opcode
));
688 static void gen_cmpi(DisasContext
*ctx
)
690 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
691 gen_op_cmpi(cpu_gpr
[rA(ctx
->opcode
)], SIMM(ctx
->opcode
),
692 1, crfD(ctx
->opcode
));
694 gen_op_cmpi32(cpu_gpr
[rA(ctx
->opcode
)], SIMM(ctx
->opcode
),
695 1, crfD(ctx
->opcode
));
700 static void gen_cmpl(DisasContext
*ctx
)
702 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
703 gen_op_cmp(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
704 0, crfD(ctx
->opcode
));
706 gen_op_cmp32(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
707 0, crfD(ctx
->opcode
));
712 static void gen_cmpli(DisasContext
*ctx
)
714 if ((ctx
->opcode
& 0x00200000) && (ctx
->insns_flags
& PPC_64B
)) {
715 gen_op_cmpi(cpu_gpr
[rA(ctx
->opcode
)], UIMM(ctx
->opcode
),
716 0, crfD(ctx
->opcode
));
718 gen_op_cmpi32(cpu_gpr
[rA(ctx
->opcode
)], UIMM(ctx
->opcode
),
719 0, crfD(ctx
->opcode
));
723 /* isel (PowerPC 2.03 specification) */
724 static void gen_isel(DisasContext
*ctx
)
727 uint32_t bi
= rC(ctx
->opcode
);
731 l1
= gen_new_label();
732 l2
= gen_new_label();
734 mask
= 1 << (3 - (bi
& 0x03));
735 t0
= tcg_temp_new_i32();
736 tcg_gen_andi_i32(t0
, cpu_crf
[bi
>> 2], mask
);
737 tcg_gen_brcondi_i32(TCG_COND_EQ
, t0
, 0, l1
);
738 if (rA(ctx
->opcode
) == 0)
739 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], 0);
741 tcg_gen_mov_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)]);
744 tcg_gen_mov_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
746 tcg_temp_free_i32(t0
);
749 /* cmpb: PowerPC 2.05 specification */
750 static void gen_cmpb(DisasContext
*ctx
)
752 gen_helper_cmpb(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)],
753 cpu_gpr
[rB(ctx
->opcode
)]);
756 /*** Integer arithmetic ***/
758 static inline void gen_op_arith_compute_ov(DisasContext
*ctx
, TCGv arg0
,
759 TCGv arg1
, TCGv arg2
, int sub
)
761 TCGv t0
= tcg_temp_new();
763 tcg_gen_xor_tl(cpu_ov
, arg0
, arg2
);
764 tcg_gen_xor_tl(t0
, arg1
, arg2
);
766 tcg_gen_and_tl(cpu_ov
, cpu_ov
, t0
);
768 tcg_gen_andc_tl(cpu_ov
, cpu_ov
, t0
);
771 if (NARROW_MODE(ctx
)) {
772 tcg_gen_ext32s_tl(cpu_ov
, cpu_ov
);
774 tcg_gen_shri_tl(cpu_ov
, cpu_ov
, TARGET_LONG_BITS
- 1);
775 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
778 /* Common add function */
779 static inline void gen_op_arith_add(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
780 TCGv arg2
, bool add_ca
, bool compute_ca
,
781 bool compute_ov
, bool compute_rc0
)
785 if (compute_ca
|| compute_ov
) {
790 if (NARROW_MODE(ctx
)) {
791 /* Caution: a non-obvious corner case of the spec is that we
792 must produce the *entire* 64-bit addition, but produce the
793 carry into bit 32. */
794 TCGv t1
= tcg_temp_new();
795 tcg_gen_xor_tl(t1
, arg1
, arg2
); /* add without carry */
796 tcg_gen_add_tl(t0
, arg1
, arg2
);
798 tcg_gen_add_tl(t0
, t0
, cpu_ca
);
800 tcg_gen_xor_tl(cpu_ca
, t0
, t1
); /* bits changed w/ carry */
802 tcg_gen_shri_tl(cpu_ca
, cpu_ca
, 32); /* extract bit 32 */
803 tcg_gen_andi_tl(cpu_ca
, cpu_ca
, 1);
805 TCGv zero
= tcg_const_tl(0);
807 tcg_gen_add2_tl(t0
, cpu_ca
, arg1
, zero
, cpu_ca
, zero
);
808 tcg_gen_add2_tl(t0
, cpu_ca
, t0
, cpu_ca
, arg2
, zero
);
810 tcg_gen_add2_tl(t0
, cpu_ca
, arg1
, zero
, arg2
, zero
);
815 tcg_gen_add_tl(t0
, arg1
, arg2
);
817 tcg_gen_add_tl(t0
, t0
, cpu_ca
);
822 gen_op_arith_compute_ov(ctx
, t0
, arg1
, arg2
, 0);
824 if (unlikely(compute_rc0
)) {
825 gen_set_Rc0(ctx
, t0
);
828 if (!TCGV_EQUAL(t0
, ret
)) {
829 tcg_gen_mov_tl(ret
, t0
);
833 /* Add functions with two operands */
834 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
835 static void glue(gen_, name)(DisasContext *ctx) \
837 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
838 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
839 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
841 /* Add functions with one operand and one immediate */
842 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
843 add_ca, compute_ca, compute_ov) \
844 static void glue(gen_, name)(DisasContext *ctx) \
846 TCGv t0 = tcg_const_tl(const_val); \
847 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
848 cpu_gpr[rA(ctx->opcode)], t0, \
849 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
853 /* add add. addo addo. */
854 GEN_INT_ARITH_ADD(add
, 0x08, 0, 0, 0)
855 GEN_INT_ARITH_ADD(addo
, 0x18, 0, 0, 1)
856 /* addc addc. addco addco. */
857 GEN_INT_ARITH_ADD(addc
, 0x00, 0, 1, 0)
858 GEN_INT_ARITH_ADD(addco
, 0x10, 0, 1, 1)
859 /* adde adde. addeo addeo. */
860 GEN_INT_ARITH_ADD(adde
, 0x04, 1, 1, 0)
861 GEN_INT_ARITH_ADD(addeo
, 0x14, 1, 1, 1)
862 /* addme addme. addmeo addmeo. */
863 GEN_INT_ARITH_ADD_CONST(addme
, 0x07, -1LL, 1, 1, 0)
864 GEN_INT_ARITH_ADD_CONST(addmeo
, 0x17, -1LL, 1, 1, 1)
865 /* addze addze. addzeo addzeo.*/
866 GEN_INT_ARITH_ADD_CONST(addze
, 0x06, 0, 1, 1, 0)
867 GEN_INT_ARITH_ADD_CONST(addzeo
, 0x16, 0, 1, 1, 1)
869 static void gen_addi(DisasContext
*ctx
)
871 target_long simm
= SIMM(ctx
->opcode
);
873 if (rA(ctx
->opcode
) == 0) {
875 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], simm
);
877 tcg_gen_addi_tl(cpu_gpr
[rD(ctx
->opcode
)],
878 cpu_gpr
[rA(ctx
->opcode
)], simm
);
882 static inline void gen_op_addic(DisasContext
*ctx
, bool compute_rc0
)
884 TCGv c
= tcg_const_tl(SIMM(ctx
->opcode
));
885 gen_op_arith_add(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
886 c
, 0, 1, 0, compute_rc0
);
890 static void gen_addic(DisasContext
*ctx
)
892 gen_op_addic(ctx
, 0);
895 static void gen_addic_(DisasContext
*ctx
)
897 gen_op_addic(ctx
, 1);
901 static void gen_addis(DisasContext
*ctx
)
903 target_long simm
= SIMM(ctx
->opcode
);
905 if (rA(ctx
->opcode
) == 0) {
907 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], simm
<< 16);
909 tcg_gen_addi_tl(cpu_gpr
[rD(ctx
->opcode
)],
910 cpu_gpr
[rA(ctx
->opcode
)], simm
<< 16);
914 static inline void gen_op_arith_divw(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
915 TCGv arg2
, int sign
, int compute_ov
)
917 int l1
= gen_new_label();
918 int l2
= gen_new_label();
919 TCGv_i32 t0
= tcg_temp_local_new_i32();
920 TCGv_i32 t1
= tcg_temp_local_new_i32();
922 tcg_gen_trunc_tl_i32(t0
, arg1
);
923 tcg_gen_trunc_tl_i32(t1
, arg2
);
924 tcg_gen_brcondi_i32(TCG_COND_EQ
, t1
, 0, l1
);
926 int l3
= gen_new_label();
927 tcg_gen_brcondi_i32(TCG_COND_NE
, t1
, -1, l3
);
928 tcg_gen_brcondi_i32(TCG_COND_EQ
, t0
, INT32_MIN
, l1
);
930 tcg_gen_div_i32(t0
, t0
, t1
);
932 tcg_gen_divu_i32(t0
, t0
, t1
);
935 tcg_gen_movi_tl(cpu_ov
, 0);
940 tcg_gen_sari_i32(t0
, t0
, 31);
942 tcg_gen_movi_i32(t0
, 0);
945 tcg_gen_movi_tl(cpu_ov
, 1);
946 tcg_gen_movi_tl(cpu_so
, 1);
949 tcg_gen_extu_i32_tl(ret
, t0
);
950 tcg_temp_free_i32(t0
);
951 tcg_temp_free_i32(t1
);
952 if (unlikely(Rc(ctx
->opcode
) != 0))
953 gen_set_Rc0(ctx
, ret
);
956 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
957 static void glue(gen_, name)(DisasContext *ctx) \
959 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
960 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
963 /* divwu divwu. divwuo divwuo. */
964 GEN_INT_ARITH_DIVW(divwu
, 0x0E, 0, 0);
965 GEN_INT_ARITH_DIVW(divwuo
, 0x1E, 0, 1);
966 /* divw divw. divwo divwo. */
967 GEN_INT_ARITH_DIVW(divw
, 0x0F, 1, 0);
968 GEN_INT_ARITH_DIVW(divwo
, 0x1F, 1, 1);
969 #if defined(TARGET_PPC64)
970 static inline void gen_op_arith_divd(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
971 TCGv arg2
, int sign
, int compute_ov
)
973 int l1
= gen_new_label();
974 int l2
= gen_new_label();
976 tcg_gen_brcondi_i64(TCG_COND_EQ
, arg2
, 0, l1
);
978 int l3
= gen_new_label();
979 tcg_gen_brcondi_i64(TCG_COND_NE
, arg2
, -1, l3
);
980 tcg_gen_brcondi_i64(TCG_COND_EQ
, arg1
, INT64_MIN
, l1
);
982 tcg_gen_div_i64(ret
, arg1
, arg2
);
984 tcg_gen_divu_i64(ret
, arg1
, arg2
);
987 tcg_gen_movi_tl(cpu_ov
, 0);
992 tcg_gen_sari_i64(ret
, arg1
, 63);
994 tcg_gen_movi_i64(ret
, 0);
997 tcg_gen_movi_tl(cpu_ov
, 1);
998 tcg_gen_movi_tl(cpu_so
, 1);
1001 if (unlikely(Rc(ctx
->opcode
) != 0))
1002 gen_set_Rc0(ctx
, ret
);
1004 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1005 static void glue(gen_, name)(DisasContext *ctx) \
1007 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1008 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1009 sign, compute_ov); \
1011 /* divwu divwu. divwuo divwuo. */
1012 GEN_INT_ARITH_DIVD(divdu
, 0x0E, 0, 0);
1013 GEN_INT_ARITH_DIVD(divduo
, 0x1E, 0, 1);
1014 /* divw divw. divwo divwo. */
1015 GEN_INT_ARITH_DIVD(divd
, 0x0F, 1, 0);
1016 GEN_INT_ARITH_DIVD(divdo
, 0x1F, 1, 1);
1020 static void gen_mulhw(DisasContext
*ctx
)
1022 TCGv_i32 t0
= tcg_temp_new_i32();
1023 TCGv_i32 t1
= tcg_temp_new_i32();
1025 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1026 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1027 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
1028 tcg_gen_extu_i32_tl(cpu_gpr
[rD(ctx
->opcode
)], t1
);
1029 tcg_temp_free_i32(t0
);
1030 tcg_temp_free_i32(t1
);
1031 if (unlikely(Rc(ctx
->opcode
) != 0))
1032 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1035 /* mulhwu mulhwu. */
1036 static void gen_mulhwu(DisasContext
*ctx
)
1038 TCGv_i32 t0
= tcg_temp_new_i32();
1039 TCGv_i32 t1
= tcg_temp_new_i32();
1041 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1042 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1043 tcg_gen_mulu2_i32(t0
, t1
, t0
, t1
);
1044 tcg_gen_extu_i32_tl(cpu_gpr
[rD(ctx
->opcode
)], t1
);
1045 tcg_temp_free_i32(t0
);
1046 tcg_temp_free_i32(t1
);
1047 if (unlikely(Rc(ctx
->opcode
) != 0))
1048 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1052 static void gen_mullw(DisasContext
*ctx
)
1054 tcg_gen_mul_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1055 cpu_gpr
[rB(ctx
->opcode
)]);
1056 tcg_gen_ext32s_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rD(ctx
->opcode
)]);
1057 if (unlikely(Rc(ctx
->opcode
) != 0))
1058 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1061 /* mullwo mullwo. */
1062 static void gen_mullwo(DisasContext
*ctx
)
1064 TCGv_i32 t0
= tcg_temp_new_i32();
1065 TCGv_i32 t1
= tcg_temp_new_i32();
1067 tcg_gen_trunc_tl_i32(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1068 tcg_gen_trunc_tl_i32(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1069 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
1070 tcg_gen_ext_i32_tl(cpu_gpr
[rD(ctx
->opcode
)], t0
);
1072 tcg_gen_sari_i32(t0
, t0
, 31);
1073 tcg_gen_setcond_i32(TCG_COND_NE
, t0
, t0
, t1
);
1074 tcg_gen_extu_i32_tl(cpu_ov
, t0
);
1075 tcg_gen_or_tl(cpu_so
, cpu_so
, cpu_ov
);
1077 tcg_temp_free_i32(t0
);
1078 tcg_temp_free_i32(t1
);
1079 if (unlikely(Rc(ctx
->opcode
) != 0))
1080 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1084 static void gen_mulli(DisasContext
*ctx
)
1086 tcg_gen_muli_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1090 #if defined(TARGET_PPC64)
1092 static void gen_mulhd(DisasContext
*ctx
)
1094 TCGv lo
= tcg_temp_new();
1095 tcg_gen_muls2_tl(lo
, cpu_gpr
[rD(ctx
->opcode
)],
1096 cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1098 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1099 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1103 /* mulhdu mulhdu. */
1104 static void gen_mulhdu(DisasContext
*ctx
)
1106 TCGv lo
= tcg_temp_new();
1107 tcg_gen_mulu2_tl(lo
, cpu_gpr
[rD(ctx
->opcode
)],
1108 cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1110 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1111 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1116 static void gen_mulld(DisasContext
*ctx
)
1118 tcg_gen_mul_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1119 cpu_gpr
[rB(ctx
->opcode
)]);
1120 if (unlikely(Rc(ctx
->opcode
) != 0))
1121 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1124 /* mulldo mulldo. */
1125 static void gen_mulldo(DisasContext
*ctx
)
1127 gen_helper_mulldo(cpu_gpr
[rD(ctx
->opcode
)], cpu_env
,
1128 cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1129 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1130 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1135 /* Common subf function */
1136 static inline void gen_op_arith_subf(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
1137 TCGv arg2
, bool add_ca
, bool compute_ca
,
1138 bool compute_ov
, bool compute_rc0
)
1142 if (compute_ca
|| compute_ov
) {
1143 t0
= tcg_temp_new();
1147 /* dest = ~arg1 + arg2 [+ ca]. */
1148 if (NARROW_MODE(ctx
)) {
1149 /* Caution: a non-obvious corner case of the spec is that we
1150 must produce the *entire* 64-bit addition, but produce the
1151 carry into bit 32. */
1152 TCGv inv1
= tcg_temp_new();
1153 TCGv t1
= tcg_temp_new();
1154 tcg_gen_not_tl(inv1
, arg1
);
1156 tcg_gen_add_tl(t0
, arg2
, cpu_ca
);
1158 tcg_gen_addi_tl(t0
, arg2
, 1);
1160 tcg_gen_xor_tl(t1
, arg2
, inv1
); /* add without carry */
1161 tcg_gen_add_tl(t0
, t0
, inv1
);
1162 tcg_gen_xor_tl(cpu_ca
, t0
, t1
); /* bits changes w/ carry */
1164 tcg_gen_shri_tl(cpu_ca
, cpu_ca
, 32); /* extract bit 32 */
1165 tcg_gen_andi_tl(cpu_ca
, cpu_ca
, 1);
1166 } else if (add_ca
) {
1167 TCGv zero
, inv1
= tcg_temp_new();
1168 tcg_gen_not_tl(inv1
, arg1
);
1169 zero
= tcg_const_tl(0);
1170 tcg_gen_add2_tl(t0
, cpu_ca
, arg2
, zero
, cpu_ca
, zero
);
1171 tcg_gen_add2_tl(t0
, cpu_ca
, t0
, cpu_ca
, inv1
, zero
);
1172 tcg_temp_free(zero
);
1173 tcg_temp_free(inv1
);
1175 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_ca
, arg2
, arg1
);
1176 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1178 } else if (add_ca
) {
1179 /* Since we're ignoring carry-out, we can simplify the
1180 standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */
1181 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1182 tcg_gen_add_tl(t0
, t0
, cpu_ca
);
1183 tcg_gen_subi_tl(t0
, t0
, 1);
1185 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1189 gen_op_arith_compute_ov(ctx
, t0
, arg1
, arg2
, 1);
1191 if (unlikely(compute_rc0
)) {
1192 gen_set_Rc0(ctx
, t0
);
1195 if (!TCGV_EQUAL(t0
, ret
)) {
1196 tcg_gen_mov_tl(ret
, t0
);
1200 /* Sub functions with Two operands functions */
1201 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
1202 static void glue(gen_, name)(DisasContext *ctx) \
1204 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1205 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1206 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1208 /* Sub functions with one operand and one immediate */
1209 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
1210 add_ca, compute_ca, compute_ov) \
1211 static void glue(gen_, name)(DisasContext *ctx) \
1213 TCGv t0 = tcg_const_tl(const_val); \
1214 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1215 cpu_gpr[rA(ctx->opcode)], t0, \
1216 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1217 tcg_temp_free(t0); \
1219 /* subf subf. subfo subfo. */
1220 GEN_INT_ARITH_SUBF(subf
, 0x01, 0, 0, 0)
1221 GEN_INT_ARITH_SUBF(subfo
, 0x11, 0, 0, 1)
1222 /* subfc subfc. subfco subfco. */
1223 GEN_INT_ARITH_SUBF(subfc
, 0x00, 0, 1, 0)
1224 GEN_INT_ARITH_SUBF(subfco
, 0x10, 0, 1, 1)
1225 /* subfe subfe. subfeo subfo. */
1226 GEN_INT_ARITH_SUBF(subfe
, 0x04, 1, 1, 0)
1227 GEN_INT_ARITH_SUBF(subfeo
, 0x14, 1, 1, 1)
1228 /* subfme subfme. subfmeo subfmeo. */
1229 GEN_INT_ARITH_SUBF_CONST(subfme
, 0x07, -1LL, 1, 1, 0)
1230 GEN_INT_ARITH_SUBF_CONST(subfmeo
, 0x17, -1LL, 1, 1, 1)
1231 /* subfze subfze. subfzeo subfzeo.*/
1232 GEN_INT_ARITH_SUBF_CONST(subfze
, 0x06, 0, 1, 1, 0)
1233 GEN_INT_ARITH_SUBF_CONST(subfzeo
, 0x16, 0, 1, 1, 1)
1236 static void gen_subfic(DisasContext
*ctx
)
1238 TCGv c
= tcg_const_tl(SIMM(ctx
->opcode
));
1239 gen_op_arith_subf(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1244 /* neg neg. nego nego. */
1245 static inline void gen_op_arith_neg(DisasContext
*ctx
, bool compute_ov
)
1247 TCGv zero
= tcg_const_tl(0);
1248 gen_op_arith_subf(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1249 zero
, 0, 0, compute_ov
, Rc(ctx
->opcode
));
1250 tcg_temp_free(zero
);
1253 static void gen_neg(DisasContext
*ctx
)
1255 gen_op_arith_neg(ctx
, 0);
1258 static void gen_nego(DisasContext
*ctx
)
1260 gen_op_arith_neg(ctx
, 1);
1263 /*** Integer logical ***/
1264 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
1265 static void glue(gen_, name)(DisasContext *ctx) \
1267 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
1268 cpu_gpr[rB(ctx->opcode)]); \
1269 if (unlikely(Rc(ctx->opcode) != 0)) \
1270 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1273 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
1274 static void glue(gen_, name)(DisasContext *ctx) \
1276 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
1277 if (unlikely(Rc(ctx->opcode) != 0)) \
1278 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1282 GEN_LOGICAL2(and, tcg_gen_and_tl
, 0x00, PPC_INTEGER
);
1284 GEN_LOGICAL2(andc
, tcg_gen_andc_tl
, 0x01, PPC_INTEGER
);
1287 static void gen_andi_(DisasContext
*ctx
)
1289 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], UIMM(ctx
->opcode
));
1290 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1294 static void gen_andis_(DisasContext
*ctx
)
1296 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], UIMM(ctx
->opcode
) << 16);
1297 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1301 static void gen_cntlzw(DisasContext
*ctx
)
1303 gen_helper_cntlzw(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1304 if (unlikely(Rc(ctx
->opcode
) != 0))
1305 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1308 GEN_LOGICAL2(eqv
, tcg_gen_eqv_tl
, 0x08, PPC_INTEGER
);
1309 /* extsb & extsb. */
1310 GEN_LOGICAL1(extsb
, tcg_gen_ext8s_tl
, 0x1D, PPC_INTEGER
);
1311 /* extsh & extsh. */
1312 GEN_LOGICAL1(extsh
, tcg_gen_ext16s_tl
, 0x1C, PPC_INTEGER
);
1314 GEN_LOGICAL2(nand
, tcg_gen_nand_tl
, 0x0E, PPC_INTEGER
);
1316 GEN_LOGICAL2(nor
, tcg_gen_nor_tl
, 0x03, PPC_INTEGER
);
1319 static void gen_or(DisasContext
*ctx
)
1323 rs
= rS(ctx
->opcode
);
1324 ra
= rA(ctx
->opcode
);
1325 rb
= rB(ctx
->opcode
);
1326 /* Optimisation for mr. ri case */
1327 if (rs
!= ra
|| rs
!= rb
) {
1329 tcg_gen_or_tl(cpu_gpr
[ra
], cpu_gpr
[rs
], cpu_gpr
[rb
]);
1331 tcg_gen_mov_tl(cpu_gpr
[ra
], cpu_gpr
[rs
]);
1332 if (unlikely(Rc(ctx
->opcode
) != 0))
1333 gen_set_Rc0(ctx
, cpu_gpr
[ra
]);
1334 } else if (unlikely(Rc(ctx
->opcode
) != 0)) {
1335 gen_set_Rc0(ctx
, cpu_gpr
[rs
]);
1336 #if defined(TARGET_PPC64)
1342 /* Set process priority to low */
1346 /* Set process priority to medium-low */
1350 /* Set process priority to normal */
1353 #if !defined(CONFIG_USER_ONLY)
1355 if (ctx
->mem_idx
> 0) {
1356 /* Set process priority to very low */
1361 if (ctx
->mem_idx
> 0) {
1362 /* Set process priority to medium-hight */
1367 if (ctx
->mem_idx
> 0) {
1368 /* Set process priority to high */
1373 if (ctx
->mem_idx
> 1) {
1374 /* Set process priority to very high */
1384 TCGv t0
= tcg_temp_new();
1385 gen_load_spr(t0
, SPR_PPR
);
1386 tcg_gen_andi_tl(t0
, t0
, ~0x001C000000000000ULL
);
1387 tcg_gen_ori_tl(t0
, t0
, ((uint64_t)prio
) << 50);
1388 gen_store_spr(SPR_PPR
, t0
);
1395 GEN_LOGICAL2(orc
, tcg_gen_orc_tl
, 0x0C, PPC_INTEGER
);
1398 static void gen_xor(DisasContext
*ctx
)
1400 /* Optimisation for "set to zero" case */
1401 if (rS(ctx
->opcode
) != rB(ctx
->opcode
))
1402 tcg_gen_xor_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1404 tcg_gen_movi_tl(cpu_gpr
[rA(ctx
->opcode
)], 0);
1405 if (unlikely(Rc(ctx
->opcode
) != 0))
1406 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1410 static void gen_ori(DisasContext
*ctx
)
1412 target_ulong uimm
= UIMM(ctx
->opcode
);
1414 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1416 /* XXX: should handle special NOPs for POWER series */
1419 tcg_gen_ori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
);
1423 static void gen_oris(DisasContext
*ctx
)
1425 target_ulong uimm
= UIMM(ctx
->opcode
);
1427 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1431 tcg_gen_ori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
<< 16);
1435 static void gen_xori(DisasContext
*ctx
)
1437 target_ulong uimm
= UIMM(ctx
->opcode
);
1439 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1443 tcg_gen_xori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
);
1447 static void gen_xoris(DisasContext
*ctx
)
1449 target_ulong uimm
= UIMM(ctx
->opcode
);
1451 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1455 tcg_gen_xori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
<< 16);
1458 /* popcntb : PowerPC 2.03 specification */
1459 static void gen_popcntb(DisasContext
*ctx
)
1461 gen_helper_popcntb(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1464 static void gen_popcntw(DisasContext
*ctx
)
1466 gen_helper_popcntw(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1469 #if defined(TARGET_PPC64)
1470 /* popcntd: PowerPC 2.06 specification */
1471 static void gen_popcntd(DisasContext
*ctx
)
1473 gen_helper_popcntd(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1477 /* prtyw: PowerPC 2.05 specification */
1478 static void gen_prtyw(DisasContext
*ctx
)
1480 TCGv ra
= cpu_gpr
[rA(ctx
->opcode
)];
1481 TCGv rs
= cpu_gpr
[rS(ctx
->opcode
)];
1482 TCGv t0
= tcg_temp_new();
1483 tcg_gen_shri_tl(t0
, rs
, 16);
1484 tcg_gen_xor_tl(ra
, rs
, t0
);
1485 tcg_gen_shri_tl(t0
, ra
, 8);
1486 tcg_gen_xor_tl(ra
, ra
, t0
);
1487 tcg_gen_andi_tl(ra
, ra
, (target_ulong
)0x100000001ULL
);
1491 #if defined(TARGET_PPC64)
1492 /* prtyd: PowerPC 2.05 specification */
1493 static void gen_prtyd(DisasContext
*ctx
)
1495 TCGv ra
= cpu_gpr
[rA(ctx
->opcode
)];
1496 TCGv rs
= cpu_gpr
[rS(ctx
->opcode
)];
1497 TCGv t0
= tcg_temp_new();
1498 tcg_gen_shri_tl(t0
, rs
, 32);
1499 tcg_gen_xor_tl(ra
, rs
, t0
);
1500 tcg_gen_shri_tl(t0
, ra
, 16);
1501 tcg_gen_xor_tl(ra
, ra
, t0
);
1502 tcg_gen_shri_tl(t0
, ra
, 8);
1503 tcg_gen_xor_tl(ra
, ra
, t0
);
1504 tcg_gen_andi_tl(ra
, ra
, 1);
1509 #if defined(TARGET_PPC64)
1510 /* extsw & extsw. */
1511 GEN_LOGICAL1(extsw
, tcg_gen_ext32s_tl
, 0x1E, PPC_64B
);
1514 static void gen_cntlzd(DisasContext
*ctx
)
1516 gen_helper_cntlzd(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1517 if (unlikely(Rc(ctx
->opcode
) != 0))
1518 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1522 /*** Integer rotate ***/
1524 /* rlwimi & rlwimi. */
1525 static void gen_rlwimi(DisasContext
*ctx
)
1527 uint32_t mb
, me
, sh
;
1529 mb
= MB(ctx
->opcode
);
1530 me
= ME(ctx
->opcode
);
1531 sh
= SH(ctx
->opcode
);
1532 if (likely(sh
== 0 && mb
== 0 && me
== 31)) {
1533 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1537 TCGv t0
= tcg_temp_new();
1538 #if defined(TARGET_PPC64)
1539 TCGv_i32 t2
= tcg_temp_new_i32();
1540 tcg_gen_trunc_i64_i32(t2
, cpu_gpr
[rS(ctx
->opcode
)]);
1541 tcg_gen_rotli_i32(t2
, t2
, sh
);
1542 tcg_gen_extu_i32_i64(t0
, t2
);
1543 tcg_temp_free_i32(t2
);
1545 tcg_gen_rotli_i32(t0
, cpu_gpr
[rS(ctx
->opcode
)], sh
);
1547 #if defined(TARGET_PPC64)
1551 mask
= MASK(mb
, me
);
1552 t1
= tcg_temp_new();
1553 tcg_gen_andi_tl(t0
, t0
, mask
);
1554 tcg_gen_andi_tl(t1
, cpu_gpr
[rA(ctx
->opcode
)], ~mask
);
1555 tcg_gen_or_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1559 if (unlikely(Rc(ctx
->opcode
) != 0))
1560 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1563 /* rlwinm & rlwinm. */
1564 static void gen_rlwinm(DisasContext
*ctx
)
1566 uint32_t mb
, me
, sh
;
1568 sh
= SH(ctx
->opcode
);
1569 mb
= MB(ctx
->opcode
);
1570 me
= ME(ctx
->opcode
);
1572 if (likely(mb
== 0 && me
== (31 - sh
))) {
1573 if (likely(sh
== 0)) {
1574 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1576 TCGv t0
= tcg_temp_new();
1577 tcg_gen_ext32u_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)]);
1578 tcg_gen_shli_tl(t0
, t0
, sh
);
1579 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1582 } else if (likely(sh
!= 0 && me
== 31 && sh
== (32 - mb
))) {
1583 TCGv t0
= tcg_temp_new();
1584 tcg_gen_ext32u_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)]);
1585 tcg_gen_shri_tl(t0
, t0
, mb
);
1586 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1589 TCGv t0
= tcg_temp_new();
1590 #if defined(TARGET_PPC64)
1591 TCGv_i32 t1
= tcg_temp_new_i32();
1592 tcg_gen_trunc_i64_i32(t1
, cpu_gpr
[rS(ctx
->opcode
)]);
1593 tcg_gen_rotli_i32(t1
, t1
, sh
);
1594 tcg_gen_extu_i32_i64(t0
, t1
);
1595 tcg_temp_free_i32(t1
);
1597 tcg_gen_rotli_i32(t0
, cpu_gpr
[rS(ctx
->opcode
)], sh
);
1599 #if defined(TARGET_PPC64)
1603 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, MASK(mb
, me
));
1606 if (unlikely(Rc(ctx
->opcode
) != 0))
1607 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1610 /* rlwnm & rlwnm. */
1611 static void gen_rlwnm(DisasContext
*ctx
)
1615 #if defined(TARGET_PPC64)
1619 mb
= MB(ctx
->opcode
);
1620 me
= ME(ctx
->opcode
);
1621 t0
= tcg_temp_new();
1622 tcg_gen_andi_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x1f);
1623 #if defined(TARGET_PPC64)
1624 t1
= tcg_temp_new_i32();
1625 t2
= tcg_temp_new_i32();
1626 tcg_gen_trunc_i64_i32(t1
, cpu_gpr
[rS(ctx
->opcode
)]);
1627 tcg_gen_trunc_i64_i32(t2
, t0
);
1628 tcg_gen_rotl_i32(t1
, t1
, t2
);
1629 tcg_gen_extu_i32_i64(t0
, t1
);
1630 tcg_temp_free_i32(t1
);
1631 tcg_temp_free_i32(t2
);
1633 tcg_gen_rotl_i32(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1635 if (unlikely(mb
!= 0 || me
!= 31)) {
1636 #if defined(TARGET_PPC64)
1640 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, MASK(mb
, me
));
1642 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1645 if (unlikely(Rc(ctx
->opcode
) != 0))
1646 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1649 #if defined(TARGET_PPC64)
1650 #define GEN_PPC64_R2(name, opc1, opc2) \
1651 static void glue(gen_, name##0)(DisasContext *ctx) \
1653 gen_##name(ctx, 0); \
1656 static void glue(gen_, name##1)(DisasContext *ctx) \
1658 gen_##name(ctx, 1); \
1660 #define GEN_PPC64_R4(name, opc1, opc2) \
1661 static void glue(gen_, name##0)(DisasContext *ctx) \
1663 gen_##name(ctx, 0, 0); \
1666 static void glue(gen_, name##1)(DisasContext *ctx) \
1668 gen_##name(ctx, 0, 1); \
1671 static void glue(gen_, name##2)(DisasContext *ctx) \
1673 gen_##name(ctx, 1, 0); \
1676 static void glue(gen_, name##3)(DisasContext *ctx) \
1678 gen_##name(ctx, 1, 1); \
1681 static inline void gen_rldinm(DisasContext
*ctx
, uint32_t mb
, uint32_t me
,
1684 if (likely(sh
!= 0 && mb
== 0 && me
== (63 - sh
))) {
1685 tcg_gen_shli_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], sh
);
1686 } else if (likely(sh
!= 0 && me
== 63 && sh
== (64 - mb
))) {
1687 tcg_gen_shri_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], mb
);
1689 TCGv t0
= tcg_temp_new();
1690 tcg_gen_rotli_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], sh
);
1691 if (likely(mb
== 0 && me
== 63)) {
1692 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1694 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, MASK(mb
, me
));
1698 if (unlikely(Rc(ctx
->opcode
) != 0))
1699 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1701 /* rldicl - rldicl. */
1702 static inline void gen_rldicl(DisasContext
*ctx
, int mbn
, int shn
)
1706 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1707 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1708 gen_rldinm(ctx
, mb
, 63, sh
);
1710 GEN_PPC64_R4(rldicl
, 0x1E, 0x00);
1711 /* rldicr - rldicr. */
1712 static inline void gen_rldicr(DisasContext
*ctx
, int men
, int shn
)
1716 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1717 me
= MB(ctx
->opcode
) | (men
<< 5);
1718 gen_rldinm(ctx
, 0, me
, sh
);
1720 GEN_PPC64_R4(rldicr
, 0x1E, 0x02);
1721 /* rldic - rldic. */
1722 static inline void gen_rldic(DisasContext
*ctx
, int mbn
, int shn
)
1726 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1727 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1728 gen_rldinm(ctx
, mb
, 63 - sh
, sh
);
1730 GEN_PPC64_R4(rldic
, 0x1E, 0x04);
1732 static inline void gen_rldnm(DisasContext
*ctx
, uint32_t mb
, uint32_t me
)
1736 t0
= tcg_temp_new();
1737 tcg_gen_andi_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x3f);
1738 tcg_gen_rotl_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1739 if (unlikely(mb
!= 0 || me
!= 63)) {
1740 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, MASK(mb
, me
));
1742 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1745 if (unlikely(Rc(ctx
->opcode
) != 0))
1746 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1749 /* rldcl - rldcl. */
1750 static inline void gen_rldcl(DisasContext
*ctx
, int mbn
)
1754 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1755 gen_rldnm(ctx
, mb
, 63);
1757 GEN_PPC64_R2(rldcl
, 0x1E, 0x08);
1758 /* rldcr - rldcr. */
1759 static inline void gen_rldcr(DisasContext
*ctx
, int men
)
1763 me
= MB(ctx
->opcode
) | (men
<< 5);
1764 gen_rldnm(ctx
, 0, me
);
1766 GEN_PPC64_R2(rldcr
, 0x1E, 0x09);
1767 /* rldimi - rldimi. */
1768 static inline void gen_rldimi(DisasContext
*ctx
, int mbn
, int shn
)
1770 uint32_t sh
, mb
, me
;
1772 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1773 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1775 if (unlikely(sh
== 0 && mb
== 0)) {
1776 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1781 t0
= tcg_temp_new();
1782 tcg_gen_rotli_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], sh
);
1783 t1
= tcg_temp_new();
1784 mask
= MASK(mb
, me
);
1785 tcg_gen_andi_tl(t0
, t0
, mask
);
1786 tcg_gen_andi_tl(t1
, cpu_gpr
[rA(ctx
->opcode
)], ~mask
);
1787 tcg_gen_or_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1791 if (unlikely(Rc(ctx
->opcode
) != 0))
1792 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1794 GEN_PPC64_R4(rldimi
, 0x1E, 0x06);
1797 /*** Integer shift ***/
1800 static void gen_slw(DisasContext
*ctx
)
1804 t0
= tcg_temp_new();
1805 /* AND rS with a mask that is 0 when rB >= 0x20 */
1806 #if defined(TARGET_PPC64)
1807 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x3a);
1808 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1810 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x1a);
1811 tcg_gen_sari_tl(t0
, t0
, 0x1f);
1813 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1814 t1
= tcg_temp_new();
1815 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x1f);
1816 tcg_gen_shl_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1819 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)]);
1820 if (unlikely(Rc(ctx
->opcode
) != 0))
1821 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1825 static void gen_sraw(DisasContext
*ctx
)
1827 gen_helper_sraw(cpu_gpr
[rA(ctx
->opcode
)], cpu_env
,
1828 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1829 if (unlikely(Rc(ctx
->opcode
) != 0))
1830 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1833 /* srawi & srawi. */
1834 static void gen_srawi(DisasContext
*ctx
)
1836 int sh
= SH(ctx
->opcode
);
1837 TCGv dst
= cpu_gpr
[rA(ctx
->opcode
)];
1838 TCGv src
= cpu_gpr
[rS(ctx
->opcode
)];
1840 tcg_gen_mov_tl(dst
, src
);
1841 tcg_gen_movi_tl(cpu_ca
, 0);
1844 tcg_gen_ext32s_tl(dst
, src
);
1845 tcg_gen_andi_tl(cpu_ca
, dst
, (1ULL << sh
) - 1);
1846 t0
= tcg_temp_new();
1847 tcg_gen_sari_tl(t0
, dst
, TARGET_LONG_BITS
- 1);
1848 tcg_gen_and_tl(cpu_ca
, cpu_ca
, t0
);
1850 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_ca
, cpu_ca
, 0);
1851 tcg_gen_sari_tl(dst
, dst
, sh
);
1853 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1854 gen_set_Rc0(ctx
, dst
);
1859 static void gen_srw(DisasContext
*ctx
)
1863 t0
= tcg_temp_new();
1864 /* AND rS with a mask that is 0 when rB >= 0x20 */
1865 #if defined(TARGET_PPC64)
1866 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x3a);
1867 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1869 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x1a);
1870 tcg_gen_sari_tl(t0
, t0
, 0x1f);
1872 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1873 tcg_gen_ext32u_tl(t0
, t0
);
1874 t1
= tcg_temp_new();
1875 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x1f);
1876 tcg_gen_shr_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1879 if (unlikely(Rc(ctx
->opcode
) != 0))
1880 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1883 #if defined(TARGET_PPC64)
1885 static void gen_sld(DisasContext
*ctx
)
1889 t0
= tcg_temp_new();
1890 /* AND rS with a mask that is 0 when rB >= 0x40 */
1891 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x39);
1892 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1893 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1894 t1
= tcg_temp_new();
1895 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x3f);
1896 tcg_gen_shl_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1899 if (unlikely(Rc(ctx
->opcode
) != 0))
1900 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1904 static void gen_srad(DisasContext
*ctx
)
1906 gen_helper_srad(cpu_gpr
[rA(ctx
->opcode
)], cpu_env
,
1907 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1908 if (unlikely(Rc(ctx
->opcode
) != 0))
1909 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1911 /* sradi & sradi. */
1912 static inline void gen_sradi(DisasContext
*ctx
, int n
)
1914 int sh
= SH(ctx
->opcode
) + (n
<< 5);
1915 TCGv dst
= cpu_gpr
[rA(ctx
->opcode
)];
1916 TCGv src
= cpu_gpr
[rS(ctx
->opcode
)];
1918 tcg_gen_mov_tl(dst
, src
);
1919 tcg_gen_movi_tl(cpu_ca
, 0);
1922 tcg_gen_andi_tl(cpu_ca
, src
, (1ULL << sh
) - 1);
1923 t0
= tcg_temp_new();
1924 tcg_gen_sari_tl(t0
, src
, TARGET_LONG_BITS
- 1);
1925 tcg_gen_and_tl(cpu_ca
, cpu_ca
, t0
);
1927 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_ca
, cpu_ca
, 0);
1928 tcg_gen_sari_tl(dst
, src
, sh
);
1930 if (unlikely(Rc(ctx
->opcode
) != 0)) {
1931 gen_set_Rc0(ctx
, dst
);
1935 static void gen_sradi0(DisasContext
*ctx
)
1940 static void gen_sradi1(DisasContext
*ctx
)
1946 static void gen_srd(DisasContext
*ctx
)
1950 t0
= tcg_temp_new();
1951 /* AND rS with a mask that is 0 when rB >= 0x40 */
1952 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x39);
1953 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1954 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1955 t1
= tcg_temp_new();
1956 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x3f);
1957 tcg_gen_shr_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1960 if (unlikely(Rc(ctx
->opcode
) != 0))
1961 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1965 /*** Floating-Point arithmetic ***/
1966 #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
1967 static void gen_f##name(DisasContext *ctx) \
1969 if (unlikely(!ctx->fpu_enabled)) { \
1970 gen_exception(ctx, POWERPC_EXCP_FPU); \
1973 /* NIP cannot be restored if the memory exception comes from an helper */ \
1974 gen_update_nip(ctx, ctx->nip - 4); \
1975 gen_reset_fpstatus(); \
1976 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
1977 cpu_fpr[rA(ctx->opcode)], \
1978 cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
1980 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
1981 cpu_fpr[rD(ctx->opcode)]); \
1983 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], set_fprf, \
1984 Rc(ctx->opcode) != 0); \
1987 #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
1988 _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
1989 _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
1991 #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
1992 static void gen_f##name(DisasContext *ctx) \
1994 if (unlikely(!ctx->fpu_enabled)) { \
1995 gen_exception(ctx, POWERPC_EXCP_FPU); \
1998 /* NIP cannot be restored if the memory exception comes from an helper */ \
1999 gen_update_nip(ctx, ctx->nip - 4); \
2000 gen_reset_fpstatus(); \
2001 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2002 cpu_fpr[rA(ctx->opcode)], \
2003 cpu_fpr[rB(ctx->opcode)]); \
2005 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2006 cpu_fpr[rD(ctx->opcode)]); \
2008 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2009 set_fprf, Rc(ctx->opcode) != 0); \
2011 #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
2012 _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2013 _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2015 #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2016 static void gen_f##name(DisasContext *ctx) \
2018 if (unlikely(!ctx->fpu_enabled)) { \
2019 gen_exception(ctx, POWERPC_EXCP_FPU); \
2022 /* NIP cannot be restored if the memory exception comes from an helper */ \
2023 gen_update_nip(ctx, ctx->nip - 4); \
2024 gen_reset_fpstatus(); \
2025 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2026 cpu_fpr[rA(ctx->opcode)], \
2027 cpu_fpr[rC(ctx->opcode)]); \
2029 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2030 cpu_fpr[rD(ctx->opcode)]); \
2032 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2033 set_fprf, Rc(ctx->opcode) != 0); \
2035 #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
2036 _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2037 _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2039 #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
2040 static void gen_f##name(DisasContext *ctx) \
2042 if (unlikely(!ctx->fpu_enabled)) { \
2043 gen_exception(ctx, POWERPC_EXCP_FPU); \
2046 /* NIP cannot be restored if the memory exception comes from an helper */ \
2047 gen_update_nip(ctx, ctx->nip - 4); \
2048 gen_reset_fpstatus(); \
2049 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2050 cpu_fpr[rB(ctx->opcode)]); \
2051 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2052 set_fprf, Rc(ctx->opcode) != 0); \
2055 #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
2056 static void gen_f##name(DisasContext *ctx) \
2058 if (unlikely(!ctx->fpu_enabled)) { \
2059 gen_exception(ctx, POWERPC_EXCP_FPU); \
2062 /* NIP cannot be restored if the memory exception comes from an helper */ \
2063 gen_update_nip(ctx, ctx->nip - 4); \
2064 gen_reset_fpstatus(); \
2065 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2066 cpu_fpr[rB(ctx->opcode)]); \
2067 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2068 set_fprf, Rc(ctx->opcode) != 0); \
2072 GEN_FLOAT_AB(add
, 0x15, 0x000007C0, 1, PPC_FLOAT
);
2074 GEN_FLOAT_AB(div
, 0x12, 0x000007C0, 1, PPC_FLOAT
);
2076 GEN_FLOAT_AC(mul
, 0x19, 0x0000F800, 1, PPC_FLOAT
);
2079 GEN_FLOAT_BS(re
, 0x3F, 0x18, 1, PPC_FLOAT_EXT
);
2082 GEN_FLOAT_BS(res
, 0x3B, 0x18, 1, PPC_FLOAT_FRES
);
2085 GEN_FLOAT_BS(rsqrte
, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE
);
2088 static void gen_frsqrtes(DisasContext
*ctx
)
2090 if (unlikely(!ctx
->fpu_enabled
)) {
2091 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2094 /* NIP cannot be restored if the memory exception comes from an helper */
2095 gen_update_nip(ctx
, ctx
->nip
- 4);
2096 gen_reset_fpstatus();
2097 gen_helper_frsqrte(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2098 cpu_fpr
[rB(ctx
->opcode
)]);
2099 gen_helper_frsp(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2100 cpu_fpr
[rD(ctx
->opcode
)]);
2101 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)], 1, Rc(ctx
->opcode
) != 0);
2105 _GEN_FLOAT_ACB(sel
, sel
, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL
);
2107 GEN_FLOAT_AB(sub
, 0x14, 0x000007C0, 1, PPC_FLOAT
);
2111 static void gen_fsqrt(DisasContext
*ctx
)
2113 if (unlikely(!ctx
->fpu_enabled
)) {
2114 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2117 /* NIP cannot be restored if the memory exception comes from an helper */
2118 gen_update_nip(ctx
, ctx
->nip
- 4);
2119 gen_reset_fpstatus();
2120 gen_helper_fsqrt(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2121 cpu_fpr
[rB(ctx
->opcode
)]);
2122 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)], 1, Rc(ctx
->opcode
) != 0);
2125 static void gen_fsqrts(DisasContext
*ctx
)
2127 if (unlikely(!ctx
->fpu_enabled
)) {
2128 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2131 /* NIP cannot be restored if the memory exception comes from an helper */
2132 gen_update_nip(ctx
, ctx
->nip
- 4);
2133 gen_reset_fpstatus();
2134 gen_helper_fsqrt(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2135 cpu_fpr
[rB(ctx
->opcode
)]);
2136 gen_helper_frsp(cpu_fpr
[rD(ctx
->opcode
)], cpu_env
,
2137 cpu_fpr
[rD(ctx
->opcode
)]);
2138 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)], 1, Rc(ctx
->opcode
) != 0);
2141 /*** Floating-Point multiply-and-add ***/
2142 /* fmadd - fmadds */
2143 GEN_FLOAT_ACB(madd
, 0x1D, 1, PPC_FLOAT
);
2144 /* fmsub - fmsubs */
2145 GEN_FLOAT_ACB(msub
, 0x1C, 1, PPC_FLOAT
);
2146 /* fnmadd - fnmadds */
2147 GEN_FLOAT_ACB(nmadd
, 0x1F, 1, PPC_FLOAT
);
2148 /* fnmsub - fnmsubs */
2149 GEN_FLOAT_ACB(nmsub
, 0x1E, 1, PPC_FLOAT
);
2151 /*** Floating-Point round & convert ***/
2153 GEN_FLOAT_B(ctiw
, 0x0E, 0x00, 0, PPC_FLOAT
);
2155 GEN_FLOAT_B(ctiwz
, 0x0F, 0x00, 0, PPC_FLOAT
);
2157 GEN_FLOAT_B(rsp
, 0x0C, 0x00, 1, PPC_FLOAT
);
2158 #if defined(TARGET_PPC64)
2160 GEN_FLOAT_B(cfid
, 0x0E, 0x1A, 1, PPC_64B
);
2162 GEN_FLOAT_B(ctid
, 0x0E, 0x19, 0, PPC_64B
);
2164 GEN_FLOAT_B(ctidz
, 0x0F, 0x19, 0, PPC_64B
);
2168 GEN_FLOAT_B(rin
, 0x08, 0x0C, 1, PPC_FLOAT_EXT
);
2170 GEN_FLOAT_B(riz
, 0x08, 0x0D, 1, PPC_FLOAT_EXT
);
2172 GEN_FLOAT_B(rip
, 0x08, 0x0E, 1, PPC_FLOAT_EXT
);
2174 GEN_FLOAT_B(rim
, 0x08, 0x0F, 1, PPC_FLOAT_EXT
);
2176 /*** Floating-Point compare ***/
2179 static void gen_fcmpo(DisasContext
*ctx
)
2182 if (unlikely(!ctx
->fpu_enabled
)) {
2183 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2186 /* NIP cannot be restored if the memory exception comes from an helper */
2187 gen_update_nip(ctx
, ctx
->nip
- 4);
2188 gen_reset_fpstatus();
2189 crf
= tcg_const_i32(crfD(ctx
->opcode
));
2190 gen_helper_fcmpo(cpu_env
, cpu_fpr
[rA(ctx
->opcode
)],
2191 cpu_fpr
[rB(ctx
->opcode
)], crf
);
2192 tcg_temp_free_i32(crf
);
2193 gen_helper_float_check_status(cpu_env
);
2197 static void gen_fcmpu(DisasContext
*ctx
)
2200 if (unlikely(!ctx
->fpu_enabled
)) {
2201 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2204 /* NIP cannot be restored if the memory exception comes from an helper */
2205 gen_update_nip(ctx
, ctx
->nip
- 4);
2206 gen_reset_fpstatus();
2207 crf
= tcg_const_i32(crfD(ctx
->opcode
));
2208 gen_helper_fcmpu(cpu_env
, cpu_fpr
[rA(ctx
->opcode
)],
2209 cpu_fpr
[rB(ctx
->opcode
)], crf
);
2210 tcg_temp_free_i32(crf
);
2211 gen_helper_float_check_status(cpu_env
);
2214 /*** Floating-point move ***/
2216 /* XXX: beware that fabs never checks for NaNs nor update FPSCR */
2217 static void gen_fabs(DisasContext
*ctx
)
2219 if (unlikely(!ctx
->fpu_enabled
)) {
2220 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2223 tcg_gen_andi_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)],
2225 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)], 0, Rc(ctx
->opcode
) != 0);
2229 /* XXX: beware that fmr never checks for NaNs nor update FPSCR */
2230 static void gen_fmr(DisasContext
*ctx
)
2232 if (unlikely(!ctx
->fpu_enabled
)) {
2233 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2236 tcg_gen_mov_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)]);
2237 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)], 0, Rc(ctx
->opcode
) != 0);
2241 /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
2242 static void gen_fnabs(DisasContext
*ctx
)
2244 if (unlikely(!ctx
->fpu_enabled
)) {
2245 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2248 tcg_gen_ori_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)],
2250 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)], 0, Rc(ctx
->opcode
) != 0);
2254 /* XXX: beware that fneg never checks for NaNs nor update FPSCR */
2255 static void gen_fneg(DisasContext
*ctx
)
2257 if (unlikely(!ctx
->fpu_enabled
)) {
2258 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2261 tcg_gen_xori_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)],
2263 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)], 0, Rc(ctx
->opcode
) != 0);
2266 /* fcpsgn: PowerPC 2.05 specification */
2267 /* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */
2268 static void gen_fcpsgn(DisasContext
*ctx
)
2270 if (unlikely(!ctx
->fpu_enabled
)) {
2271 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2274 tcg_gen_deposit_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rA(ctx
->opcode
)],
2275 cpu_fpr
[rB(ctx
->opcode
)], 0, 63);
2276 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)], 0, Rc(ctx
->opcode
) != 0);
2279 /*** Floating-Point status & ctrl register ***/
2282 static void gen_mcrfs(DisasContext
*ctx
)
2284 TCGv tmp
= tcg_temp_new();
2287 if (unlikely(!ctx
->fpu_enabled
)) {
2288 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2291 bfa
= 4 * (7 - crfS(ctx
->opcode
));
2292 tcg_gen_shri_tl(tmp
, cpu_fpscr
, bfa
);
2293 tcg_gen_trunc_tl_i32(cpu_crf
[crfD(ctx
->opcode
)], tmp
);
2295 tcg_gen_andi_i32(cpu_crf
[crfD(ctx
->opcode
)], cpu_crf
[crfD(ctx
->opcode
)], 0xf);
2296 tcg_gen_andi_tl(cpu_fpscr
, cpu_fpscr
, ~(0xF << bfa
));
2300 static void gen_mffs(DisasContext
*ctx
)
2302 if (unlikely(!ctx
->fpu_enabled
)) {
2303 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2306 gen_reset_fpstatus();
2307 tcg_gen_extu_tl_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpscr
);
2308 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)], 0, Rc(ctx
->opcode
) != 0);
2312 static void gen_mtfsb0(DisasContext
*ctx
)
2316 if (unlikely(!ctx
->fpu_enabled
)) {
2317 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2320 crb
= 31 - crbD(ctx
->opcode
);
2321 gen_reset_fpstatus();
2322 if (likely(crb
!= FPSCR_FEX
&& crb
!= FPSCR_VX
)) {
2324 /* NIP cannot be restored if the memory exception comes from an helper */
2325 gen_update_nip(ctx
, ctx
->nip
- 4);
2326 t0
= tcg_const_i32(crb
);
2327 gen_helper_fpscr_clrbit(cpu_env
, t0
);
2328 tcg_temp_free_i32(t0
);
2330 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2331 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2332 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2337 static void gen_mtfsb1(DisasContext
*ctx
)
2341 if (unlikely(!ctx
->fpu_enabled
)) {
2342 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2345 crb
= 31 - crbD(ctx
->opcode
);
2346 gen_reset_fpstatus();
2347 /* XXX: we pretend we can only do IEEE floating-point computations */
2348 if (likely(crb
!= FPSCR_FEX
&& crb
!= FPSCR_VX
&& crb
!= FPSCR_NI
)) {
2350 /* NIP cannot be restored if the memory exception comes from an helper */
2351 gen_update_nip(ctx
, ctx
->nip
- 4);
2352 t0
= tcg_const_i32(crb
);
2353 gen_helper_fpscr_setbit(cpu_env
, t0
);
2354 tcg_temp_free_i32(t0
);
2356 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2357 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2358 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2360 /* We can raise a differed exception */
2361 gen_helper_float_check_status(cpu_env
);
2365 static void gen_mtfsf(DisasContext
*ctx
)
2370 if (unlikely(!ctx
->fpu_enabled
)) {
2371 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2374 flm
= FPFLM(ctx
->opcode
);
2375 l
= FPL(ctx
->opcode
);
2376 w
= FPW(ctx
->opcode
);
2377 if (unlikely(w
& !(ctx
->insns_flags2
& PPC2_ISA205
))) {
2378 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2381 /* NIP cannot be restored if the memory exception comes from an helper */
2382 gen_update_nip(ctx
, ctx
->nip
- 4);
2383 gen_reset_fpstatus();
2385 t0
= tcg_const_i32((ctx
->insns_flags2
& PPC2_ISA205
) ?
0xffff : 0xff);
2387 t0
= tcg_const_i32(flm
<< (w
* 8));
2389 gen_helper_store_fpscr(cpu_env
, cpu_fpr
[rB(ctx
->opcode
)], t0
);
2390 tcg_temp_free_i32(t0
);
2391 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2392 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2393 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2395 /* We can raise a differed exception */
2396 gen_helper_float_check_status(cpu_env
);
2400 static void gen_mtfsfi(DisasContext
*ctx
)
2406 if (unlikely(!ctx
->fpu_enabled
)) {
2407 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2410 w
= FPW(ctx
->opcode
);
2411 bf
= FPBF(ctx
->opcode
);
2412 if (unlikely(w
& !(ctx
->insns_flags2
& PPC2_ISA205
))) {
2413 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2416 sh
= (8 * w
) + 7 - bf
;
2417 /* NIP cannot be restored if the memory exception comes from an helper */
2418 gen_update_nip(ctx
, ctx
->nip
- 4);
2419 gen_reset_fpstatus();
2420 t0
= tcg_const_i64(((uint64_t)FPIMM(ctx
->opcode
)) << (4 * sh
));
2421 t1
= tcg_const_i32(1 << sh
);
2422 gen_helper_store_fpscr(cpu_env
, t0
, t1
);
2423 tcg_temp_free_i64(t0
);
2424 tcg_temp_free_i32(t1
);
2425 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2426 tcg_gen_trunc_tl_i32(cpu_crf
[1], cpu_fpscr
);
2427 tcg_gen_shri_i32(cpu_crf
[1], cpu_crf
[1], FPSCR_OX
);
2429 /* We can raise a differed exception */
2430 gen_helper_float_check_status(cpu_env
);
2433 /*** Addressing modes ***/
2434 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2435 static inline void gen_addr_imm_index(DisasContext
*ctx
, TCGv EA
,
2438 target_long simm
= SIMM(ctx
->opcode
);
2441 if (rA(ctx
->opcode
) == 0) {
2442 if (NARROW_MODE(ctx
)) {
2443 simm
= (uint32_t)simm
;
2445 tcg_gen_movi_tl(EA
, simm
);
2446 } else if (likely(simm
!= 0)) {
2447 tcg_gen_addi_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)], simm
);
2448 if (NARROW_MODE(ctx
)) {
2449 tcg_gen_ext32u_tl(EA
, EA
);
2452 if (NARROW_MODE(ctx
)) {
2453 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2455 tcg_gen_mov_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2460 static inline void gen_addr_reg_index(DisasContext
*ctx
, TCGv EA
)
2462 if (rA(ctx
->opcode
) == 0) {
2463 if (NARROW_MODE(ctx
)) {
2464 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rB(ctx
->opcode
)]);
2466 tcg_gen_mov_tl(EA
, cpu_gpr
[rB(ctx
->opcode
)]);
2469 tcg_gen_add_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
2470 if (NARROW_MODE(ctx
)) {
2471 tcg_gen_ext32u_tl(EA
, EA
);
2476 static inline void gen_addr_register(DisasContext
*ctx
, TCGv EA
)
2478 if (rA(ctx
->opcode
) == 0) {
2479 tcg_gen_movi_tl(EA
, 0);
2480 } else if (NARROW_MODE(ctx
)) {
2481 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2483 tcg_gen_mov_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2487 static inline void gen_addr_add(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
2490 tcg_gen_addi_tl(ret
, arg1
, val
);
2491 if (NARROW_MODE(ctx
)) {
2492 tcg_gen_ext32u_tl(ret
, ret
);
2496 static inline void gen_check_align(DisasContext
*ctx
, TCGv EA
, int mask
)
2498 int l1
= gen_new_label();
2499 TCGv t0
= tcg_temp_new();
2501 /* NIP cannot be restored if the memory exception comes from an helper */
2502 gen_update_nip(ctx
, ctx
->nip
- 4);
2503 tcg_gen_andi_tl(t0
, EA
, mask
);
2504 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, l1
);
2505 t1
= tcg_const_i32(POWERPC_EXCP_ALIGN
);
2506 t2
= tcg_const_i32(0);
2507 gen_helper_raise_exception_err(cpu_env
, t1
, t2
);
2508 tcg_temp_free_i32(t1
);
2509 tcg_temp_free_i32(t2
);
2514 /*** Integer load ***/
2515 static inline void gen_qemu_ld8u(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2517 tcg_gen_qemu_ld8u(arg1
, arg2
, ctx
->mem_idx
);
2520 static inline void gen_qemu_ld8s(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2522 tcg_gen_qemu_ld8s(arg1
, arg2
, ctx
->mem_idx
);
2525 static inline void gen_qemu_ld16u(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2527 tcg_gen_qemu_ld16u(arg1
, arg2
, ctx
->mem_idx
);
2528 if (unlikely(ctx
->le_mode
)) {
2529 tcg_gen_bswap16_tl(arg1
, arg1
);
2533 static inline void gen_qemu_ld16s(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2535 if (unlikely(ctx
->le_mode
)) {
2536 tcg_gen_qemu_ld16u(arg1
, arg2
, ctx
->mem_idx
);
2537 tcg_gen_bswap16_tl(arg1
, arg1
);
2538 tcg_gen_ext16s_tl(arg1
, arg1
);
2540 tcg_gen_qemu_ld16s(arg1
, arg2
, ctx
->mem_idx
);
2544 static inline void gen_qemu_ld32u(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2546 tcg_gen_qemu_ld32u(arg1
, arg2
, ctx
->mem_idx
);
2547 if (unlikely(ctx
->le_mode
)) {
2548 tcg_gen_bswap32_tl(arg1
, arg1
);
2552 static inline void gen_qemu_ld32s(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2554 if (unlikely(ctx
->le_mode
)) {
2555 tcg_gen_qemu_ld32u(arg1
, arg2
, ctx
->mem_idx
);
2556 tcg_gen_bswap32_tl(arg1
, arg1
);
2557 tcg_gen_ext32s_tl(arg1
, arg1
);
2559 tcg_gen_qemu_ld32s(arg1
, arg2
, ctx
->mem_idx
);
2562 static inline void gen_qemu_ld64(DisasContext
*ctx
, TCGv_i64 arg1
, TCGv arg2
)
2564 tcg_gen_qemu_ld64(arg1
, arg2
, ctx
->mem_idx
);
2565 if (unlikely(ctx
->le_mode
)) {
2566 tcg_gen_bswap64_i64(arg1
, arg1
);
2570 static inline void gen_qemu_st8(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2572 tcg_gen_qemu_st8(arg1
, arg2
, ctx
->mem_idx
);
2575 static inline void gen_qemu_st16(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2577 if (unlikely(ctx
->le_mode
)) {
2578 TCGv t0
= tcg_temp_new();
2579 tcg_gen_ext16u_tl(t0
, arg1
);
2580 tcg_gen_bswap16_tl(t0
, t0
);
2581 tcg_gen_qemu_st16(t0
, arg2
, ctx
->mem_idx
);
2584 tcg_gen_qemu_st16(arg1
, arg2
, ctx
->mem_idx
);
2588 static inline void gen_qemu_st32(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2590 if (unlikely(ctx
->le_mode
)) {
2591 TCGv t0
= tcg_temp_new();
2592 tcg_gen_ext32u_tl(t0
, arg1
);
2593 tcg_gen_bswap32_tl(t0
, t0
);
2594 tcg_gen_qemu_st32(t0
, arg2
, ctx
->mem_idx
);
2597 tcg_gen_qemu_st32(arg1
, arg2
, ctx
->mem_idx
);
2601 static inline void gen_qemu_st64(DisasContext
*ctx
, TCGv_i64 arg1
, TCGv arg2
)
2603 if (unlikely(ctx
->le_mode
)) {
2604 TCGv_i64 t0
= tcg_temp_new_i64();
2605 tcg_gen_bswap64_i64(t0
, arg1
);
2606 tcg_gen_qemu_st64(t0
, arg2
, ctx
->mem_idx
);
2607 tcg_temp_free_i64(t0
);
2609 tcg_gen_qemu_st64(arg1
, arg2
, ctx
->mem_idx
);
2612 #define GEN_LD(name, ldop, opc, type) \
2613 static void glue(gen_, name)(DisasContext *ctx) \
2616 gen_set_access_type(ctx, ACCESS_INT); \
2617 EA = tcg_temp_new(); \
2618 gen_addr_imm_index(ctx, EA, 0); \
2619 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2620 tcg_temp_free(EA); \
2623 #define GEN_LDU(name, ldop, opc, type) \
2624 static void glue(gen_, name##u)(DisasContext *ctx) \
2627 if (unlikely(rA(ctx->opcode) == 0 || \
2628 rA(ctx->opcode) == rD(ctx->opcode))) { \
2629 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2632 gen_set_access_type(ctx, ACCESS_INT); \
2633 EA = tcg_temp_new(); \
2634 if (type == PPC_64B) \
2635 gen_addr_imm_index(ctx, EA, 0x03); \
2637 gen_addr_imm_index(ctx, EA, 0); \
2638 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2639 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2640 tcg_temp_free(EA); \
2643 #define GEN_LDUX(name, ldop, opc2, opc3, type) \
2644 static void glue(gen_, name##ux)(DisasContext *ctx) \
2647 if (unlikely(rA(ctx->opcode) == 0 || \
2648 rA(ctx->opcode) == rD(ctx->opcode))) { \
2649 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2652 gen_set_access_type(ctx, ACCESS_INT); \
2653 EA = tcg_temp_new(); \
2654 gen_addr_reg_index(ctx, EA); \
2655 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2656 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2657 tcg_temp_free(EA); \
2660 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2) \
2661 static void glue(gen_, name##x)(DisasContext *ctx) \
2664 gen_set_access_type(ctx, ACCESS_INT); \
2665 EA = tcg_temp_new(); \
2666 gen_addr_reg_index(ctx, EA); \
2667 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2668 tcg_temp_free(EA); \
2670 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2671 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE)
2673 #define GEN_LDS(name, ldop, op, type) \
2674 GEN_LD(name, ldop, op | 0x20, type); \
2675 GEN_LDU(name, ldop, op | 0x21, type); \
2676 GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
2677 GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2679 /* lbz lbzu lbzux lbzx */
2680 GEN_LDS(lbz
, ld8u
, 0x02, PPC_INTEGER
);
2681 /* lha lhau lhaux lhax */
2682 GEN_LDS(lha
, ld16s
, 0x0A, PPC_INTEGER
);
2683 /* lhz lhzu lhzux lhzx */
2684 GEN_LDS(lhz
, ld16u
, 0x08, PPC_INTEGER
);
2685 /* lwz lwzu lwzux lwzx */
2686 GEN_LDS(lwz
, ld32u
, 0x00, PPC_INTEGER
);
2687 #if defined(TARGET_PPC64)
2689 GEN_LDUX(lwa
, ld32s
, 0x15, 0x0B, PPC_64B
);
2691 GEN_LDX(lwa
, ld32s
, 0x15, 0x0A, PPC_64B
);
2693 GEN_LDUX(ld
, ld64
, 0x15, 0x01, PPC_64B
);
2695 GEN_LDX(ld
, ld64
, 0x15, 0x00, PPC_64B
);
2697 static void gen_ld(DisasContext
*ctx
)
2700 if (Rc(ctx
->opcode
)) {
2701 if (unlikely(rA(ctx
->opcode
) == 0 ||
2702 rA(ctx
->opcode
) == rD(ctx
->opcode
))) {
2703 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2707 gen_set_access_type(ctx
, ACCESS_INT
);
2708 EA
= tcg_temp_new();
2709 gen_addr_imm_index(ctx
, EA
, 0x03);
2710 if (ctx
->opcode
& 0x02) {
2711 /* lwa (lwau is undefined) */
2712 gen_qemu_ld32s(ctx
, cpu_gpr
[rD(ctx
->opcode
)], EA
);
2715 gen_qemu_ld64(ctx
, cpu_gpr
[rD(ctx
->opcode
)], EA
);
2717 if (Rc(ctx
->opcode
))
2718 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], EA
);
2723 static void gen_lq(DisasContext
*ctx
)
2725 #if defined(CONFIG_USER_ONLY)
2726 gen_inval_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
2731 /* Restore CPU state */
2732 if (unlikely(ctx
->mem_idx
== 0)) {
2733 gen_inval_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
2736 ra
= rA(ctx
->opcode
);
2737 rd
= rD(ctx
->opcode
);
2738 if (unlikely((rd
& 1) || rd
== ra
)) {
2739 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2742 if (unlikely(ctx
->le_mode
)) {
2743 /* Little-endian mode is not handled */
2744 gen_exception_err(ctx
, POWERPC_EXCP_ALIGN
, POWERPC_EXCP_ALIGN_LE
);
2747 gen_set_access_type(ctx
, ACCESS_INT
);
2748 EA
= tcg_temp_new();
2749 gen_addr_imm_index(ctx
, EA
, 0x0F);
2750 gen_qemu_ld64(ctx
, cpu_gpr
[rd
], EA
);
2751 gen_addr_add(ctx
, EA
, EA
, 8);
2752 gen_qemu_ld64(ctx
, cpu_gpr
[rd
+1], EA
);
2758 /*** Integer store ***/
2759 #define GEN_ST(name, stop, opc, type) \
2760 static void glue(gen_, name)(DisasContext *ctx) \
2763 gen_set_access_type(ctx, ACCESS_INT); \
2764 EA = tcg_temp_new(); \
2765 gen_addr_imm_index(ctx, EA, 0); \
2766 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2767 tcg_temp_free(EA); \
2770 #define GEN_STU(name, stop, opc, type) \
2771 static void glue(gen_, stop##u)(DisasContext *ctx) \
2774 if (unlikely(rA(ctx->opcode) == 0)) { \
2775 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2778 gen_set_access_type(ctx, ACCESS_INT); \
2779 EA = tcg_temp_new(); \
2780 if (type == PPC_64B) \
2781 gen_addr_imm_index(ctx, EA, 0x03); \
2783 gen_addr_imm_index(ctx, EA, 0); \
2784 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2785 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2786 tcg_temp_free(EA); \
2789 #define GEN_STUX(name, stop, opc2, opc3, type) \
2790 static void glue(gen_, name##ux)(DisasContext *ctx) \
2793 if (unlikely(rA(ctx->opcode) == 0)) { \
2794 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2797 gen_set_access_type(ctx, ACCESS_INT); \
2798 EA = tcg_temp_new(); \
2799 gen_addr_reg_index(ctx, EA); \
2800 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2801 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2802 tcg_temp_free(EA); \
2805 #define GEN_STX_E(name, stop, opc2, opc3, type, type2) \
2806 static void glue(gen_, name##x)(DisasContext *ctx) \
2809 gen_set_access_type(ctx, ACCESS_INT); \
2810 EA = tcg_temp_new(); \
2811 gen_addr_reg_index(ctx, EA); \
2812 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2813 tcg_temp_free(EA); \
2815 #define GEN_STX(name, stop, opc2, opc3, type) \
2816 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE)
2818 #define GEN_STS(name, stop, op, type) \
2819 GEN_ST(name, stop, op | 0x20, type); \
2820 GEN_STU(name, stop, op | 0x21, type); \
2821 GEN_STUX(name, stop, 0x17, op | 0x01, type); \
2822 GEN_STX(name, stop, 0x17, op | 0x00, type)
2824 /* stb stbu stbux stbx */
2825 GEN_STS(stb
, st8
, 0x06, PPC_INTEGER
);
2826 /* sth sthu sthux sthx */
2827 GEN_STS(sth
, st16
, 0x0C, PPC_INTEGER
);
2828 /* stw stwu stwux stwx */
2829 GEN_STS(stw
, st32
, 0x04, PPC_INTEGER
);
2830 #if defined(TARGET_PPC64)
2831 GEN_STUX(std
, st64
, 0x15, 0x05, PPC_64B
);
2832 GEN_STX(std
, st64
, 0x15, 0x04, PPC_64B
);
2834 static void gen_std(DisasContext
*ctx
)
2839 rs
= rS(ctx
->opcode
);
2840 if ((ctx
->opcode
& 0x3) == 0x2) {
2841 #if defined(CONFIG_USER_ONLY)
2842 gen_inval_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
2845 if (unlikely(ctx
->mem_idx
== 0)) {
2846 gen_inval_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
2849 if (unlikely(rs
& 1)) {
2850 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2853 if (unlikely(ctx
->le_mode
)) {
2854 /* Little-endian mode is not handled */
2855 gen_exception_err(ctx
, POWERPC_EXCP_ALIGN
, POWERPC_EXCP_ALIGN_LE
);
2858 gen_set_access_type(ctx
, ACCESS_INT
);
2859 EA
= tcg_temp_new();
2860 gen_addr_imm_index(ctx
, EA
, 0x03);
2861 gen_qemu_st64(ctx
, cpu_gpr
[rs
], EA
);
2862 gen_addr_add(ctx
, EA
, EA
, 8);
2863 gen_qemu_st64(ctx
, cpu_gpr
[rs
+1], EA
);
2868 if (Rc(ctx
->opcode
)) {
2869 if (unlikely(rA(ctx
->opcode
) == 0)) {
2870 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2874 gen_set_access_type(ctx
, ACCESS_INT
);
2875 EA
= tcg_temp_new();
2876 gen_addr_imm_index(ctx
, EA
, 0x03);
2877 gen_qemu_st64(ctx
, cpu_gpr
[rs
], EA
);
2878 if (Rc(ctx
->opcode
))
2879 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], EA
);
2884 /*** Integer load and store with byte reverse ***/
2886 static inline void gen_qemu_ld16ur(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2888 tcg_gen_qemu_ld16u(arg1
, arg2
, ctx
->mem_idx
);
2889 if (likely(!ctx
->le_mode
)) {
2890 tcg_gen_bswap16_tl(arg1
, arg1
);
2893 GEN_LDX(lhbr
, ld16ur
, 0x16, 0x18, PPC_INTEGER
);
2896 static inline void gen_qemu_ld32ur(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2898 tcg_gen_qemu_ld32u(arg1
, arg2
, ctx
->mem_idx
);
2899 if (likely(!ctx
->le_mode
)) {
2900 tcg_gen_bswap32_tl(arg1
, arg1
);
2903 GEN_LDX(lwbr
, ld32ur
, 0x16, 0x10, PPC_INTEGER
);
2905 #if defined(TARGET_PPC64)
2907 static inline void gen_qemu_ld64ur(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2909 tcg_gen_qemu_ld64(arg1
, arg2
, ctx
->mem_idx
);
2910 if (likely(!ctx
->le_mode
)) {
2911 tcg_gen_bswap64_tl(arg1
, arg1
);
2914 GEN_LDX_E(ldbr
, ld64ur
, 0x14, 0x10, PPC_NONE
, PPC2_DBRX
);
2915 #endif /* TARGET_PPC64 */
2918 static inline void gen_qemu_st16r(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2920 if (likely(!ctx
->le_mode
)) {
2921 TCGv t0
= tcg_temp_new();
2922 tcg_gen_ext16u_tl(t0
, arg1
);
2923 tcg_gen_bswap16_tl(t0
, t0
);
2924 tcg_gen_qemu_st16(t0
, arg2
, ctx
->mem_idx
);
2927 tcg_gen_qemu_st16(arg1
, arg2
, ctx
->mem_idx
);
2930 GEN_STX(sthbr
, st16r
, 0x16, 0x1C, PPC_INTEGER
);
2933 static inline void gen_qemu_st32r(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2935 if (likely(!ctx
->le_mode
)) {
2936 TCGv t0
= tcg_temp_new();
2937 tcg_gen_ext32u_tl(t0
, arg1
);
2938 tcg_gen_bswap32_tl(t0
, t0
);
2939 tcg_gen_qemu_st32(t0
, arg2
, ctx
->mem_idx
);
2942 tcg_gen_qemu_st32(arg1
, arg2
, ctx
->mem_idx
);
2945 GEN_STX(stwbr
, st32r
, 0x16, 0x14, PPC_INTEGER
);
2947 #if defined(TARGET_PPC64)
2949 static inline void gen_qemu_st64r(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2951 if (likely(!ctx
->le_mode
)) {
2952 TCGv t0
= tcg_temp_new();
2953 tcg_gen_bswap64_tl(t0
, arg1
);
2954 tcg_gen_qemu_st64(t0
, arg2
, ctx
->mem_idx
);
2957 tcg_gen_qemu_st64(arg1
, arg2
, ctx
->mem_idx
);
2960 GEN_STX_E(stdbr
, st64r
, 0x14, 0x14, PPC_NONE
, PPC2_DBRX
);
2961 #endif /* TARGET_PPC64 */
2963 /*** Integer load and store multiple ***/
2966 static void gen_lmw(DisasContext
*ctx
)
2970 gen_set_access_type(ctx
, ACCESS_INT
);
2971 /* NIP cannot be restored if the memory exception comes from an helper */
2972 gen_update_nip(ctx
, ctx
->nip
- 4);
2973 t0
= tcg_temp_new();
2974 t1
= tcg_const_i32(rD(ctx
->opcode
));
2975 gen_addr_imm_index(ctx
, t0
, 0);
2976 gen_helper_lmw(cpu_env
, t0
, t1
);
2978 tcg_temp_free_i32(t1
);
2982 static void gen_stmw(DisasContext
*ctx
)
2986 gen_set_access_type(ctx
, ACCESS_INT
);
2987 /* NIP cannot be restored if the memory exception comes from an helper */
2988 gen_update_nip(ctx
, ctx
->nip
- 4);
2989 t0
= tcg_temp_new();
2990 t1
= tcg_const_i32(rS(ctx
->opcode
));
2991 gen_addr_imm_index(ctx
, t0
, 0);
2992 gen_helper_stmw(cpu_env
, t0
, t1
);
2994 tcg_temp_free_i32(t1
);
2997 /*** Integer load and store strings ***/
3000 /* PowerPC32 specification says we must generate an exception if
3001 * rA is in the range of registers to be loaded.
3002 * In an other hand, IBM says this is valid, but rA won't be loaded.
3003 * For now, I'll follow the spec...
3005 static void gen_lswi(DisasContext
*ctx
)
3009 int nb
= NB(ctx
->opcode
);
3010 int start
= rD(ctx
->opcode
);
3011 int ra
= rA(ctx
->opcode
);
3017 if (unlikely(((start
+ nr
) > 32 &&
3018 start
<= ra
&& (start
+ nr
- 32) > ra
) ||
3019 ((start
+ nr
) <= 32 && start
<= ra
&& (start
+ nr
) > ra
))) {
3020 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_LSWX
);
3023 gen_set_access_type(ctx
, ACCESS_INT
);
3024 /* NIP cannot be restored if the memory exception comes from an helper */
3025 gen_update_nip(ctx
, ctx
->nip
- 4);
3026 t0
= tcg_temp_new();
3027 gen_addr_register(ctx
, t0
);
3028 t1
= tcg_const_i32(nb
);
3029 t2
= tcg_const_i32(start
);
3030 gen_helper_lsw(cpu_env
, t0
, t1
, t2
);
3032 tcg_temp_free_i32(t1
);
3033 tcg_temp_free_i32(t2
);
3037 static void gen_lswx(DisasContext
*ctx
)
3040 TCGv_i32 t1
, t2
, t3
;
3041 gen_set_access_type(ctx
, ACCESS_INT
);
3042 /* NIP cannot be restored if the memory exception comes from an helper */
3043 gen_update_nip(ctx
, ctx
->nip
- 4);
3044 t0
= tcg_temp_new();
3045 gen_addr_reg_index(ctx
, t0
);
3046 t1
= tcg_const_i32(rD(ctx
->opcode
));
3047 t2
= tcg_const_i32(rA(ctx
->opcode
));
3048 t3
= tcg_const_i32(rB(ctx
->opcode
));
3049 gen_helper_lswx(cpu_env
, t0
, t1
, t2
, t3
);
3051 tcg_temp_free_i32(t1
);
3052 tcg_temp_free_i32(t2
);
3053 tcg_temp_free_i32(t3
);
3057 static void gen_stswi(DisasContext
*ctx
)
3061 int nb
= NB(ctx
->opcode
);
3062 gen_set_access_type(ctx
, ACCESS_INT
);
3063 /* NIP cannot be restored if the memory exception comes from an helper */
3064 gen_update_nip(ctx
, ctx
->nip
- 4);
3065 t0
= tcg_temp_new();
3066 gen_addr_register(ctx
, t0
);
3069 t1
= tcg_const_i32(nb
);
3070 t2
= tcg_const_i32(rS(ctx
->opcode
));
3071 gen_helper_stsw(cpu_env
, t0
, t1
, t2
);
3073 tcg_temp_free_i32(t1
);
3074 tcg_temp_free_i32(t2
);
3078 static void gen_stswx(DisasContext
*ctx
)
3082 gen_set_access_type(ctx
, ACCESS_INT
);
3083 /* NIP cannot be restored if the memory exception comes from an helper */
3084 gen_update_nip(ctx
, ctx
->nip
- 4);
3085 t0
= tcg_temp_new();
3086 gen_addr_reg_index(ctx
, t0
);
3087 t1
= tcg_temp_new_i32();
3088 tcg_gen_trunc_tl_i32(t1
, cpu_xer
);
3089 tcg_gen_andi_i32(t1
, t1
, 0x7F);
3090 t2
= tcg_const_i32(rS(ctx
->opcode
));
3091 gen_helper_stsw(cpu_env
, t0
, t1
, t2
);
3093 tcg_temp_free_i32(t1
);
3094 tcg_temp_free_i32(t2
);
3097 /*** Memory synchronisation ***/
3099 static void gen_eieio(DisasContext
*ctx
)
3104 static void gen_isync(DisasContext
*ctx
)
3106 gen_stop_exception(ctx
);
3110 static void gen_lwarx(DisasContext
*ctx
)
3113 TCGv gpr
= cpu_gpr
[rD(ctx
->opcode
)];
3114 gen_set_access_type(ctx
, ACCESS_RES
);
3115 t0
= tcg_temp_local_new();
3116 gen_addr_reg_index(ctx
, t0
);
3117 gen_check_align(ctx
, t0
, 0x03);
3118 gen_qemu_ld32u(ctx
, gpr
, t0
);
3119 tcg_gen_mov_tl(cpu_reserve
, t0
);
3120 tcg_gen_st_tl(gpr
, cpu_env
, offsetof(CPUPPCState
, reserve_val
));
3124 #if defined(CONFIG_USER_ONLY)
3125 static void gen_conditional_store (DisasContext
*ctx
, TCGv EA
,
3128 TCGv t0
= tcg_temp_new();
3129 uint32_t save_exception
= ctx
->exception
;
3131 tcg_gen_st_tl(EA
, cpu_env
, offsetof(CPUPPCState
, reserve_ea
));
3132 tcg_gen_movi_tl(t0
, (size
<< 5) | reg
);
3133 tcg_gen_st_tl(t0
, cpu_env
, offsetof(CPUPPCState
, reserve_info
));
3135 gen_update_nip(ctx
, ctx
->nip
-4);
3136 ctx
->exception
= POWERPC_EXCP_BRANCH
;
3137 gen_exception(ctx
, POWERPC_EXCP_STCX
);
3138 ctx
->exception
= save_exception
;
3143 static void gen_stwcx_(DisasContext
*ctx
)
3146 gen_set_access_type(ctx
, ACCESS_RES
);
3147 t0
= tcg_temp_local_new();
3148 gen_addr_reg_index(ctx
, t0
);
3149 gen_check_align(ctx
, t0
, 0x03);
3150 #if defined(CONFIG_USER_ONLY)
3151 gen_conditional_store(ctx
, t0
, rS(ctx
->opcode
), 4);
3156 tcg_gen_trunc_tl_i32(cpu_crf
[0], cpu_so
);
3157 l1
= gen_new_label();
3158 tcg_gen_brcond_tl(TCG_COND_NE
, t0
, cpu_reserve
, l1
);
3159 tcg_gen_ori_i32(cpu_crf
[0], cpu_crf
[0], 1 << CRF_EQ
);
3160 gen_qemu_st32(ctx
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
3162 tcg_gen_movi_tl(cpu_reserve
, -1);
3168 #if defined(TARGET_PPC64)
3170 static void gen_ldarx(DisasContext
*ctx
)
3173 TCGv gpr
= cpu_gpr
[rD(ctx
->opcode
)];
3174 gen_set_access_type(ctx
, ACCESS_RES
);
3175 t0
= tcg_temp_local_new();
3176 gen_addr_reg_index(ctx
, t0
);
3177 gen_check_align(ctx
, t0
, 0x07);
3178 gen_qemu_ld64(ctx
, gpr
, t0
);
3179 tcg_gen_mov_tl(cpu_reserve
, t0
);
3180 tcg_gen_st_tl(gpr
, cpu_env
, offsetof(CPUPPCState
, reserve_val
));
3185 static void gen_stdcx_(DisasContext
*ctx
)
3188 gen_set_access_type(ctx
, ACCESS_RES
);
3189 t0
= tcg_temp_local_new();
3190 gen_addr_reg_index(ctx
, t0
);
3191 gen_check_align(ctx
, t0
, 0x07);
3192 #if defined(CONFIG_USER_ONLY)
3193 gen_conditional_store(ctx
, t0
, rS(ctx
->opcode
), 8);
3197 tcg_gen_trunc_tl_i32(cpu_crf
[0], cpu_so
);
3198 l1
= gen_new_label();
3199 tcg_gen_brcond_tl(TCG_COND_NE
, t0
, cpu_reserve
, l1
);
3200 tcg_gen_ori_i32(cpu_crf
[0], cpu_crf
[0], 1 << CRF_EQ
);
3201 gen_qemu_st64(ctx
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
3203 tcg_gen_movi_tl(cpu_reserve
, -1);
3208 #endif /* defined(TARGET_PPC64) */
3211 static void gen_sync(DisasContext
*ctx
)
3216 static void gen_wait(DisasContext
*ctx
)
3218 TCGv_i32 t0
= tcg_temp_new_i32();
3219 tcg_gen_st_i32(t0
, cpu_env
,
3220 -offsetof(PowerPCCPU
, env
) + offsetof(CPUState
, halted
));
3221 tcg_temp_free_i32(t0
);
3222 /* Stop translation, as the CPU is supposed to sleep from now */
3223 gen_exception_err(ctx
, EXCP_HLT
, 1);
3226 /*** Floating-point load ***/
3227 #define GEN_LDF(name, ldop, opc, type) \
3228 static void glue(gen_, name)(DisasContext *ctx) \
3231 if (unlikely(!ctx->fpu_enabled)) { \
3232 gen_exception(ctx, POWERPC_EXCP_FPU); \
3235 gen_set_access_type(ctx, ACCESS_FLOAT); \
3236 EA = tcg_temp_new(); \
3237 gen_addr_imm_index(ctx, EA, 0); \
3238 gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
3239 tcg_temp_free(EA); \
3242 #define GEN_LDUF(name, ldop, opc, type) \
3243 static void glue(gen_, name##u)(DisasContext *ctx) \
3246 if (unlikely(!ctx->fpu_enabled)) { \
3247 gen_exception(ctx, POWERPC_EXCP_FPU); \
3250 if (unlikely(rA(ctx->opcode) == 0)) { \
3251 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3254 gen_set_access_type(ctx, ACCESS_FLOAT); \
3255 EA = tcg_temp_new(); \
3256 gen_addr_imm_index(ctx, EA, 0); \
3257 gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
3258 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3259 tcg_temp_free(EA); \
3262 #define GEN_LDUXF(name, ldop, opc, type) \
3263 static void glue(gen_, name##ux)(DisasContext *ctx) \
3266 if (unlikely(!ctx->fpu_enabled)) { \
3267 gen_exception(ctx, POWERPC_EXCP_FPU); \
3270 if (unlikely(rA(ctx->opcode) == 0)) { \
3271 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3274 gen_set_access_type(ctx, ACCESS_FLOAT); \
3275 EA = tcg_temp_new(); \
3276 gen_addr_reg_index(ctx, EA); \
3277 gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
3278 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3279 tcg_temp_free(EA); \
3282 #define GEN_LDXF(name, ldop, opc2, opc3, type) \
3283 static void glue(gen_, name##x)(DisasContext *ctx) \
3286 if (unlikely(!ctx->fpu_enabled)) { \
3287 gen_exception(ctx, POWERPC_EXCP_FPU); \
3290 gen_set_access_type(ctx, ACCESS_FLOAT); \
3291 EA = tcg_temp_new(); \
3292 gen_addr_reg_index(ctx, EA); \
3293 gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
3294 tcg_temp_free(EA); \
3297 #define GEN_LDFS(name, ldop, op, type) \
3298 GEN_LDF(name, ldop, op | 0x20, type); \
3299 GEN_LDUF(name, ldop, op | 0x21, type); \
3300 GEN_LDUXF(name, ldop, op | 0x01, type); \
3301 GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
3303 static inline void gen_qemu_ld32fs(DisasContext
*ctx
, TCGv_i64 arg1
, TCGv arg2
)
3305 TCGv t0
= tcg_temp_new();
3306 TCGv_i32 t1
= tcg_temp_new_i32();
3307 gen_qemu_ld32u(ctx
, t0
, arg2
);
3308 tcg_gen_trunc_tl_i32(t1
, t0
);
3310 gen_helper_float32_to_float64(arg1
, cpu_env
, t1
);
3311 tcg_temp_free_i32(t1
);
3314 /* lfd lfdu lfdux lfdx */
3315 GEN_LDFS(lfd
, ld64
, 0x12, PPC_FLOAT
);
3316 /* lfs lfsu lfsux lfsx */
3317 GEN_LDFS(lfs
, ld32fs
, 0x10, PPC_FLOAT
);
3320 static void gen_lfdp(DisasContext
*ctx
)
3323 if (unlikely(!ctx
->fpu_enabled
)) {
3324 gen_exception(ctx
, POWERPC_EXCP_FPU
);
3327 gen_set_access_type(ctx
, ACCESS_FLOAT
);
3328 EA
= tcg_temp_new();
3329 gen_addr_imm_index(ctx
, EA
, 0); \
3330 if (unlikely(ctx
->le_mode
)) {
3331 gen_qemu_ld64(ctx
, cpu_fpr
[rD(ctx
->opcode
) + 1], EA
);
3332 tcg_gen_addi_tl(EA
, EA
, 8);
3333 gen_qemu_ld64(ctx
, cpu_fpr
[rD(ctx
->opcode
)], EA
);
3335 gen_qemu_ld64(ctx
, cpu_fpr
[rD(ctx
->opcode
)], EA
);
3336 tcg_gen_addi_tl(EA
, EA
, 8);
3337 gen_qemu_ld64(ctx
, cpu_fpr
[rD(ctx
->opcode
) + 1], EA
);
3343 static void gen_lfdpx(DisasContext
*ctx
)
3346 if (unlikely(!ctx
->fpu_enabled
)) {
3347 gen_exception(ctx
, POWERPC_EXCP_FPU
);
3350 gen_set_access_type(ctx
, ACCESS_FLOAT
);
3351 EA
= tcg_temp_new();
3352 gen_addr_reg_index(ctx
, EA
);
3353 if (unlikely(ctx
->le_mode
)) {
3354 gen_qemu_ld64(ctx
, cpu_fpr
[rD(ctx
->opcode
) + 1], EA
);
3355 tcg_gen_addi_tl(EA
, EA
, 8);
3356 gen_qemu_ld64(ctx
, cpu_fpr
[rD(ctx
->opcode
)], EA
);
3358 gen_qemu_ld64(ctx
, cpu_fpr
[rD(ctx
->opcode
)], EA
);
3359 tcg_gen_addi_tl(EA
, EA
, 8);
3360 gen_qemu_ld64(ctx
, cpu_fpr
[rD(ctx
->opcode
) + 1], EA
);
3366 static void gen_lfiwax(DisasContext
*ctx
)
3370 if (unlikely(!ctx
->fpu_enabled
)) {
3371 gen_exception(ctx
, POWERPC_EXCP_FPU
);
3374 gen_set_access_type(ctx
, ACCESS_FLOAT
);
3375 EA
= tcg_temp_new();
3376 t0
= tcg_temp_new();
3377 gen_addr_reg_index(ctx
, EA
);
3378 gen_qemu_ld32s(ctx
, t0
, EA
);
3379 tcg_gen_ext_tl_i64(cpu_fpr
[rD(ctx
->opcode
)], t0
);
3384 /*** Floating-point store ***/
3385 #define GEN_STF(name, stop, opc, type) \
3386 static void glue(gen_, name)(DisasContext *ctx) \
3389 if (unlikely(!ctx->fpu_enabled)) { \
3390 gen_exception(ctx, POWERPC_EXCP_FPU); \
3393 gen_set_access_type(ctx, ACCESS_FLOAT); \
3394 EA = tcg_temp_new(); \
3395 gen_addr_imm_index(ctx, EA, 0); \
3396 gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
3397 tcg_temp_free(EA); \
3400 #define GEN_STUF(name, stop, opc, type) \
3401 static void glue(gen_, name##u)(DisasContext *ctx) \
3404 if (unlikely(!ctx->fpu_enabled)) { \
3405 gen_exception(ctx, POWERPC_EXCP_FPU); \
3408 if (unlikely(rA(ctx->opcode) == 0)) { \
3409 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3412 gen_set_access_type(ctx, ACCESS_FLOAT); \
3413 EA = tcg_temp_new(); \
3414 gen_addr_imm_index(ctx, EA, 0); \
3415 gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
3416 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3417 tcg_temp_free(EA); \
3420 #define GEN_STUXF(name, stop, opc, type) \
3421 static void glue(gen_, name##ux)(DisasContext *ctx) \
3424 if (unlikely(!ctx->fpu_enabled)) { \
3425 gen_exception(ctx, POWERPC_EXCP_FPU); \
3428 if (unlikely(rA(ctx->opcode) == 0)) { \
3429 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3432 gen_set_access_type(ctx, ACCESS_FLOAT); \
3433 EA = tcg_temp_new(); \
3434 gen_addr_reg_index(ctx, EA); \
3435 gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
3436 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3437 tcg_temp_free(EA); \
3440 #define GEN_STXF(name, stop, opc2, opc3, type) \
3441 static void glue(gen_, name##x)(DisasContext *ctx) \
3444 if (unlikely(!ctx->fpu_enabled)) { \
3445 gen_exception(ctx, POWERPC_EXCP_FPU); \
3448 gen_set_access_type(ctx, ACCESS_FLOAT); \
3449 EA = tcg_temp_new(); \
3450 gen_addr_reg_index(ctx, EA); \
3451 gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
3452 tcg_temp_free(EA); \
3455 #define GEN_STFS(name, stop, op, type) \
3456 GEN_STF(name, stop, op | 0x20, type); \
3457 GEN_STUF(name, stop, op | 0x21, type); \
3458 GEN_STUXF(name, stop, op | 0x01, type); \
3459 GEN_STXF(name, stop, 0x17, op | 0x00, type)
3461 static inline void gen_qemu_st32fs(DisasContext
*ctx
, TCGv_i64 arg1
, TCGv arg2
)
3463 TCGv_i32 t0
= tcg_temp_new_i32();
3464 TCGv t1
= tcg_temp_new();
3465 gen_helper_float64_to_float32(t0
, cpu_env
, arg1
);
3466 tcg_gen_extu_i32_tl(t1
, t0
);
3467 tcg_temp_free_i32(t0
);
3468 gen_qemu_st32(ctx
, t1
, arg2
);
3472 /* stfd stfdu stfdux stfdx */
3473 GEN_STFS(stfd
, st64
, 0x16, PPC_FLOAT
);
3474 /* stfs stfsu stfsux stfsx */
3475 GEN_STFS(stfs
, st32fs
, 0x14, PPC_FLOAT
);