2 * PowerPC emulation for qemu: main translation routines.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 #include "qemu-common.h"
31 #include "host-utils.h"
37 #define CPU_SINGLE_STEP 0x1
38 #define CPU_BRANCH_STEP 0x2
39 #define GDBSTUB_SINGLE_STEP 0x4
41 /* Include definitions for instructions classes and implementations flags */
42 //#define PPC_DEBUG_DISAS
43 //#define DO_PPC_STATISTICS
45 #ifdef PPC_DEBUG_DISAS
46 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
48 # define LOG_DISAS(...) do { } while (0)
50 /*****************************************************************************/
51 /* Code translation helpers */
53 /* global register indexes */
54 static TCGv_ptr cpu_env
;
55 static char cpu_reg_names
[10*3 + 22*4 /* GPR */
56 #if !defined(TARGET_PPC64)
57 + 10*4 + 22*5 /* SPE GPRh */
59 + 10*4 + 22*5 /* FPR */
60 + 2*(10*6 + 22*7) /* AVRh, AVRl */
62 static TCGv cpu_gpr
[32];
63 #if !defined(TARGET_PPC64)
64 static TCGv cpu_gprh
[32];
66 static TCGv_i64 cpu_fpr
[32];
67 static TCGv_i64 cpu_avrh
[32], cpu_avrl
[32];
68 static TCGv_i32 cpu_crf
[8];
74 static TCGv cpu_reserve
;
75 static TCGv_i32 cpu_fpscr
;
76 static TCGv_i32 cpu_access_type
;
78 #include "gen-icount.h"
80 void ppc_translate_init(void)
84 size_t cpu_reg_names_size
;
85 static int done_init
= 0;
90 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
93 cpu_reg_names_size
= sizeof(cpu_reg_names
);
95 for (i
= 0; i
< 8; i
++) {
96 snprintf(p
, cpu_reg_names_size
, "crf%d", i
);
97 cpu_crf
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
98 offsetof(CPUState
, crf
[i
]), p
);
100 cpu_reg_names_size
-= 5;
103 for (i
= 0; i
< 32; i
++) {
104 snprintf(p
, cpu_reg_names_size
, "r%d", i
);
105 cpu_gpr
[i
] = tcg_global_mem_new(TCG_AREG0
,
106 offsetof(CPUState
, gpr
[i
]), p
);
107 p
+= (i
< 10) ?
3 : 4;
108 cpu_reg_names_size
-= (i
< 10) ?
3 : 4;
109 #if !defined(TARGET_PPC64)
110 snprintf(p
, cpu_reg_names_size
, "r%dH", i
);
111 cpu_gprh
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
112 offsetof(CPUState
, gprh
[i
]), p
);
113 p
+= (i
< 10) ?
4 : 5;
114 cpu_reg_names_size
-= (i
< 10) ?
4 : 5;
117 snprintf(p
, cpu_reg_names_size
, "fp%d", i
);
118 cpu_fpr
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
119 offsetof(CPUState
, fpr
[i
]), p
);
120 p
+= (i
< 10) ?
4 : 5;
121 cpu_reg_names_size
-= (i
< 10) ?
4 : 5;
123 snprintf(p
, cpu_reg_names_size
, "avr%dH", i
);
124 #ifdef HOST_WORDS_BIGENDIAN
125 cpu_avrh
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
126 offsetof(CPUState
, avr
[i
].u64
[0]), p
);
128 cpu_avrh
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
129 offsetof(CPUState
, avr
[i
].u64
[1]), p
);
131 p
+= (i
< 10) ?
6 : 7;
132 cpu_reg_names_size
-= (i
< 10) ?
6 : 7;
134 snprintf(p
, cpu_reg_names_size
, "avr%dL", i
);
135 #ifdef HOST_WORDS_BIGENDIAN
136 cpu_avrl
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
137 offsetof(CPUState
, avr
[i
].u64
[1]), p
);
139 cpu_avrl
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
140 offsetof(CPUState
, avr
[i
].u64
[0]), p
);
142 p
+= (i
< 10) ?
6 : 7;
143 cpu_reg_names_size
-= (i
< 10) ?
6 : 7;
146 cpu_nip
= tcg_global_mem_new(TCG_AREG0
,
147 offsetof(CPUState
, nip
), "nip");
149 cpu_msr
= tcg_global_mem_new(TCG_AREG0
,
150 offsetof(CPUState
, msr
), "msr");
152 cpu_ctr
= tcg_global_mem_new(TCG_AREG0
,
153 offsetof(CPUState
, ctr
), "ctr");
155 cpu_lr
= tcg_global_mem_new(TCG_AREG0
,
156 offsetof(CPUState
, lr
), "lr");
158 cpu_xer
= tcg_global_mem_new(TCG_AREG0
,
159 offsetof(CPUState
, xer
), "xer");
161 cpu_reserve
= tcg_global_mem_new(TCG_AREG0
,
162 offsetof(CPUState
, reserve_addr
),
165 cpu_fpscr
= tcg_global_mem_new_i32(TCG_AREG0
,
166 offsetof(CPUState
, fpscr
), "fpscr");
168 cpu_access_type
= tcg_global_mem_new_i32(TCG_AREG0
,
169 offsetof(CPUState
, access_type
), "access_type");
171 /* register helpers */
178 /* internal defines */
179 typedef struct DisasContext
{
180 struct TranslationBlock
*tb
;
184 /* Routine used to access memory */
187 /* Translation flags */
189 #if defined(TARGET_PPC64)
195 ppc_spr_t
*spr_cb
; /* Needed to check rights for mfspr/mtspr */
196 int singlestep_enabled
;
199 struct opc_handler_t
{
202 /* instruction type */
204 /* extended instruction type */
207 void (*handler
)(DisasContext
*ctx
);
208 #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
211 #if defined(DO_PPC_STATISTICS)
216 static inline void gen_reset_fpstatus(void)
218 #ifdef CONFIG_SOFTFLOAT
219 gen_helper_reset_fpstatus();
223 static inline void gen_compute_fprf(TCGv_i64 arg
, int set_fprf
, int set_rc
)
225 TCGv_i32 t0
= tcg_temp_new_i32();
228 /* This case might be optimized later */
229 tcg_gen_movi_i32(t0
, 1);
230 gen_helper_compute_fprf(t0
, arg
, t0
);
231 if (unlikely(set_rc
)) {
232 tcg_gen_mov_i32(cpu_crf
[1], t0
);
234 gen_helper_float_check_status();
235 } else if (unlikely(set_rc
)) {
236 /* We always need to compute fpcc */
237 tcg_gen_movi_i32(t0
, 0);
238 gen_helper_compute_fprf(t0
, arg
, t0
);
239 tcg_gen_mov_i32(cpu_crf
[1], t0
);
242 tcg_temp_free_i32(t0
);
245 static inline void gen_set_access_type(DisasContext
*ctx
, int access_type
)
247 if (ctx
->access_type
!= access_type
) {
248 tcg_gen_movi_i32(cpu_access_type
, access_type
);
249 ctx
->access_type
= access_type
;
253 static inline void gen_update_nip(DisasContext
*ctx
, target_ulong nip
)
255 #if defined(TARGET_PPC64)
257 tcg_gen_movi_tl(cpu_nip
, nip
);
260 tcg_gen_movi_tl(cpu_nip
, (uint32_t)nip
);
263 static inline void gen_exception_err(DisasContext
*ctx
, uint32_t excp
, uint32_t error
)
266 if (ctx
->exception
== POWERPC_EXCP_NONE
) {
267 gen_update_nip(ctx
, ctx
->nip
);
269 t0
= tcg_const_i32(excp
);
270 t1
= tcg_const_i32(error
);
271 gen_helper_raise_exception_err(t0
, t1
);
272 tcg_temp_free_i32(t0
);
273 tcg_temp_free_i32(t1
);
274 ctx
->exception
= (excp
);
277 static inline void gen_exception(DisasContext
*ctx
, uint32_t excp
)
280 if (ctx
->exception
== POWERPC_EXCP_NONE
) {
281 gen_update_nip(ctx
, ctx
->nip
);
283 t0
= tcg_const_i32(excp
);
284 gen_helper_raise_exception(t0
);
285 tcg_temp_free_i32(t0
);
286 ctx
->exception
= (excp
);
289 static inline void gen_debug_exception(DisasContext
*ctx
)
293 if (ctx
->exception
!= POWERPC_EXCP_BRANCH
)
294 gen_update_nip(ctx
, ctx
->nip
);
295 t0
= tcg_const_i32(EXCP_DEBUG
);
296 gen_helper_raise_exception(t0
);
297 tcg_temp_free_i32(t0
);
300 static inline void gen_inval_exception(DisasContext
*ctx
, uint32_t error
)
302 gen_exception_err(ctx
, POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_INVAL
| error
);
305 /* Stop translation */
306 static inline void gen_stop_exception(DisasContext
*ctx
)
308 gen_update_nip(ctx
, ctx
->nip
);
309 ctx
->exception
= POWERPC_EXCP_STOP
;
312 /* No need to update nip here, as execution flow will change */
313 static inline void gen_sync_exception(DisasContext
*ctx
)
315 ctx
->exception
= POWERPC_EXCP_SYNC
;
318 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
319 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
321 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
322 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
324 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
325 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
327 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
328 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
330 typedef struct opcode_t
{
331 unsigned char opc1
, opc2
, opc3
;
332 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
333 unsigned char pad
[5];
335 unsigned char pad
[1];
337 opc_handler_t handler
;
341 /*****************************************************************************/
342 /*** Instruction decoding ***/
343 #define EXTRACT_HELPER(name, shift, nb) \
344 static inline uint32_t name(uint32_t opcode) \
346 return (opcode >> (shift)) & ((1 << (nb)) - 1); \
349 #define EXTRACT_SHELPER(name, shift, nb) \
350 static inline int32_t name(uint32_t opcode) \
352 return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
356 EXTRACT_HELPER(opc1
, 26, 6);
358 EXTRACT_HELPER(opc2
, 1, 5);
360 EXTRACT_HELPER(opc3
, 6, 5);
361 /* Update Cr0 flags */
362 EXTRACT_HELPER(Rc
, 0, 1);
364 EXTRACT_HELPER(rD
, 21, 5);
366 EXTRACT_HELPER(rS
, 21, 5);
368 EXTRACT_HELPER(rA
, 16, 5);
370 EXTRACT_HELPER(rB
, 11, 5);
372 EXTRACT_HELPER(rC
, 6, 5);
374 EXTRACT_HELPER(crfD
, 23, 3);
375 EXTRACT_HELPER(crfS
, 18, 3);
376 EXTRACT_HELPER(crbD
, 21, 5);
377 EXTRACT_HELPER(crbA
, 16, 5);
378 EXTRACT_HELPER(crbB
, 11, 5);
380 EXTRACT_HELPER(_SPR
, 11, 10);
381 static inline uint32_t SPR(uint32_t opcode
)
383 uint32_t sprn
= _SPR(opcode
);
385 return ((sprn
>> 5) & 0x1F) | ((sprn
& 0x1F) << 5);
387 /*** Get constants ***/
388 EXTRACT_HELPER(IMM
, 12, 8);
389 /* 16 bits signed immediate value */
390 EXTRACT_SHELPER(SIMM
, 0, 16);
391 /* 16 bits unsigned immediate value */
392 EXTRACT_HELPER(UIMM
, 0, 16);
393 /* 5 bits signed immediate value */
394 EXTRACT_HELPER(SIMM5
, 16, 5);
395 /* 5 bits signed immediate value */
396 EXTRACT_HELPER(UIMM5
, 16, 5);
398 EXTRACT_HELPER(NB
, 11, 5);
400 EXTRACT_HELPER(SH
, 11, 5);
401 /* Vector shift count */
402 EXTRACT_HELPER(VSH
, 6, 4);
404 EXTRACT_HELPER(MB
, 6, 5);
406 EXTRACT_HELPER(ME
, 1, 5);
408 EXTRACT_HELPER(TO
, 21, 5);
410 EXTRACT_HELPER(CRM
, 12, 8);
411 EXTRACT_HELPER(FM
, 17, 8);
412 EXTRACT_HELPER(SR
, 16, 4);
413 EXTRACT_HELPER(FPIMM
, 12, 4);
415 /*** Jump target decoding ***/
417 EXTRACT_SHELPER(d
, 0, 16);
418 /* Immediate address */
419 static inline target_ulong
LI(uint32_t opcode
)
421 return (opcode
>> 0) & 0x03FFFFFC;
424 static inline uint32_t BD(uint32_t opcode
)
426 return (opcode
>> 0) & 0xFFFC;
429 EXTRACT_HELPER(BO
, 21, 5);
430 EXTRACT_HELPER(BI
, 16, 5);
431 /* Absolute/relative address */
432 EXTRACT_HELPER(AA
, 1, 1);
434 EXTRACT_HELPER(LK
, 0, 1);
436 /* Create a mask between <start> and <end> bits */
437 static inline target_ulong
MASK(uint32_t start
, uint32_t end
)
441 #if defined(TARGET_PPC64)
442 if (likely(start
== 0)) {
443 ret
= UINT64_MAX
<< (63 - end
);
444 } else if (likely(end
== 63)) {
445 ret
= UINT64_MAX
>> start
;
448 if (likely(start
== 0)) {
449 ret
= UINT32_MAX
<< (31 - end
);
450 } else if (likely(end
== 31)) {
451 ret
= UINT32_MAX
>> start
;
455 ret
= (((target_ulong
)(-1ULL)) >> (start
)) ^
456 (((target_ulong
)(-1ULL) >> (end
)) >> 1);
457 if (unlikely(start
> end
))
464 /*****************************************************************************/
465 /* PowerPC instructions table */
467 #if defined(DO_PPC_STATISTICS)
468 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
478 .handler = &gen_##name, \
479 .oname = stringify(name), \
481 .oname = stringify(name), \
483 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
493 .handler = &gen_##name, \
499 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
509 .handler = &gen_##name, \
511 .oname = stringify(name), \
513 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
523 .handler = &gen_##name, \
529 /* SPR load/store helpers */
530 static inline void gen_load_spr(TCGv t
, int reg
)
532 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUState
, spr
[reg
]));
535 static inline void gen_store_spr(int reg
, TCGv t
)
537 tcg_gen_st_tl(t
, cpu_env
, offsetof(CPUState
, spr
[reg
]));
540 /* Invalid instruction */
541 static void gen_invalid(DisasContext
*ctx
)
543 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
546 static opc_handler_t invalid_handler
= {
550 .handler
= gen_invalid
,
553 /*** Integer comparison ***/
555 static inline void gen_op_cmp(TCGv arg0
, TCGv arg1
, int s
, int crf
)
559 tcg_gen_trunc_tl_i32(cpu_crf
[crf
], cpu_xer
);
560 tcg_gen_shri_i32(cpu_crf
[crf
], cpu_crf
[crf
], XER_SO
);
561 tcg_gen_andi_i32(cpu_crf
[crf
], cpu_crf
[crf
], 1);
563 l1
= gen_new_label();
564 l2
= gen_new_label();
565 l3
= gen_new_label();
567 tcg_gen_brcond_tl(TCG_COND_LT
, arg0
, arg1
, l1
);
568 tcg_gen_brcond_tl(TCG_COND_GT
, arg0
, arg1
, l2
);
570 tcg_gen_brcond_tl(TCG_COND_LTU
, arg0
, arg1
, l1
);
571 tcg_gen_brcond_tl(TCG_COND_GTU
, arg0
, arg1
, l2
);
573 tcg_gen_ori_i32(cpu_crf
[crf
], cpu_crf
[crf
], 1 << CRF_EQ
);
576 tcg_gen_ori_i32(cpu_crf
[crf
], cpu_crf
[crf
], 1 << CRF_LT
);
579 tcg_gen_ori_i32(cpu_crf
[crf
], cpu_crf
[crf
], 1 << CRF_GT
);
583 static inline void gen_op_cmpi(TCGv arg0
, target_ulong arg1
, int s
, int crf
)
585 TCGv t0
= tcg_const_local_tl(arg1
);
586 gen_op_cmp(arg0
, t0
, s
, crf
);
590 #if defined(TARGET_PPC64)
591 static inline void gen_op_cmp32(TCGv arg0
, TCGv arg1
, int s
, int crf
)
594 t0
= tcg_temp_local_new();
595 t1
= tcg_temp_local_new();
597 tcg_gen_ext32s_tl(t0
, arg0
);
598 tcg_gen_ext32s_tl(t1
, arg1
);
600 tcg_gen_ext32u_tl(t0
, arg0
);
601 tcg_gen_ext32u_tl(t1
, arg1
);
603 gen_op_cmp(t0
, t1
, s
, crf
);
608 static inline void gen_op_cmpi32(TCGv arg0
, target_ulong arg1
, int s
, int crf
)
610 TCGv t0
= tcg_const_local_tl(arg1
);
611 gen_op_cmp32(arg0
, t0
, s
, crf
);
616 static inline void gen_set_Rc0(DisasContext
*ctx
, TCGv reg
)
618 #if defined(TARGET_PPC64)
620 gen_op_cmpi32(reg
, 0, 1, 0);
623 gen_op_cmpi(reg
, 0, 1, 0);
627 static void gen_cmp(DisasContext
*ctx
)
629 #if defined(TARGET_PPC64)
630 if (!(ctx
->sf_mode
&& (ctx
->opcode
& 0x00200000)))
631 gen_op_cmp32(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
632 1, crfD(ctx
->opcode
));
635 gen_op_cmp(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
636 1, crfD(ctx
->opcode
));
640 static void gen_cmpi(DisasContext
*ctx
)
642 #if defined(TARGET_PPC64)
643 if (!(ctx
->sf_mode
&& (ctx
->opcode
& 0x00200000)))
644 gen_op_cmpi32(cpu_gpr
[rA(ctx
->opcode
)], SIMM(ctx
->opcode
),
645 1, crfD(ctx
->opcode
));
648 gen_op_cmpi(cpu_gpr
[rA(ctx
->opcode
)], SIMM(ctx
->opcode
),
649 1, crfD(ctx
->opcode
));
653 static void gen_cmpl(DisasContext
*ctx
)
655 #if defined(TARGET_PPC64)
656 if (!(ctx
->sf_mode
&& (ctx
->opcode
& 0x00200000)))
657 gen_op_cmp32(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
658 0, crfD(ctx
->opcode
));
661 gen_op_cmp(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)],
662 0, crfD(ctx
->opcode
));
666 static void gen_cmpli(DisasContext
*ctx
)
668 #if defined(TARGET_PPC64)
669 if (!(ctx
->sf_mode
&& (ctx
->opcode
& 0x00200000)))
670 gen_op_cmpi32(cpu_gpr
[rA(ctx
->opcode
)], UIMM(ctx
->opcode
),
671 0, crfD(ctx
->opcode
));
674 gen_op_cmpi(cpu_gpr
[rA(ctx
->opcode
)], UIMM(ctx
->opcode
),
675 0, crfD(ctx
->opcode
));
678 /* isel (PowerPC 2.03 specification) */
679 static void gen_isel(DisasContext
*ctx
)
682 uint32_t bi
= rC(ctx
->opcode
);
686 l1
= gen_new_label();
687 l2
= gen_new_label();
689 mask
= 1 << (3 - (bi
& 0x03));
690 t0
= tcg_temp_new_i32();
691 tcg_gen_andi_i32(t0
, cpu_crf
[bi
>> 2], mask
);
692 tcg_gen_brcondi_i32(TCG_COND_EQ
, t0
, 0, l1
);
693 if (rA(ctx
->opcode
) == 0)
694 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], 0);
696 tcg_gen_mov_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)]);
699 tcg_gen_mov_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
701 tcg_temp_free_i32(t0
);
704 /*** Integer arithmetic ***/
706 static inline void gen_op_arith_compute_ov(DisasContext
*ctx
, TCGv arg0
,
707 TCGv arg1
, TCGv arg2
, int sub
)
712 l1
= gen_new_label();
713 /* Start with XER OV disabled, the most likely case */
714 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~(1 << XER_OV
));
715 t0
= tcg_temp_local_new();
716 tcg_gen_xor_tl(t0
, arg0
, arg1
);
717 #if defined(TARGET_PPC64)
719 tcg_gen_ext32s_tl(t0
, t0
);
722 tcg_gen_brcondi_tl(TCG_COND_LT
, t0
, 0, l1
);
724 tcg_gen_brcondi_tl(TCG_COND_GE
, t0
, 0, l1
);
725 tcg_gen_xor_tl(t0
, arg1
, arg2
);
726 #if defined(TARGET_PPC64)
728 tcg_gen_ext32s_tl(t0
, t0
);
731 tcg_gen_brcondi_tl(TCG_COND_GE
, t0
, 0, l1
);
733 tcg_gen_brcondi_tl(TCG_COND_LT
, t0
, 0, l1
);
734 tcg_gen_ori_tl(cpu_xer
, cpu_xer
, (1 << XER_OV
) | (1 << XER_SO
));
739 static inline void gen_op_arith_compute_ca(DisasContext
*ctx
, TCGv arg1
,
742 int l1
= gen_new_label();
744 #if defined(TARGET_PPC64)
745 if (!(ctx
->sf_mode
)) {
750 tcg_gen_ext32u_tl(t0
, arg1
);
751 tcg_gen_ext32u_tl(t1
, arg2
);
753 tcg_gen_brcond_tl(TCG_COND_GTU
, t0
, t1
, l1
);
755 tcg_gen_brcond_tl(TCG_COND_GEU
, t0
, t1
, l1
);
757 tcg_gen_ori_tl(cpu_xer
, cpu_xer
, 1 << XER_CA
);
765 tcg_gen_brcond_tl(TCG_COND_GTU
, arg1
, arg2
, l1
);
767 tcg_gen_brcond_tl(TCG_COND_GEU
, arg1
, arg2
, l1
);
769 tcg_gen_ori_tl(cpu_xer
, cpu_xer
, 1 << XER_CA
);
774 /* Common add function */
775 static inline void gen_op_arith_add(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
776 TCGv arg2
, int add_ca
, int compute_ca
,
781 if ((!compute_ca
&& !compute_ov
) ||
782 (!TCGV_EQUAL(ret
,arg1
) && !TCGV_EQUAL(ret
, arg2
))) {
785 t0
= tcg_temp_local_new();
789 t1
= tcg_temp_local_new();
790 tcg_gen_andi_tl(t1
, cpu_xer
, (1 << XER_CA
));
791 tcg_gen_shri_tl(t1
, t1
, XER_CA
);
796 if (compute_ca
&& compute_ov
) {
797 /* Start with XER CA and OV disabled, the most likely case */
798 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~((1 << XER_CA
) | (1 << XER_OV
)));
799 } else if (compute_ca
) {
800 /* Start with XER CA disabled, the most likely case */
801 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~(1 << XER_CA
));
802 } else if (compute_ov
) {
803 /* Start with XER OV disabled, the most likely case */
804 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~(1 << XER_OV
));
807 tcg_gen_add_tl(t0
, arg1
, arg2
);
810 gen_op_arith_compute_ca(ctx
, t0
, arg1
, 0);
813 tcg_gen_add_tl(t0
, t0
, t1
);
814 gen_op_arith_compute_ca(ctx
, t0
, t1
, 0);
818 gen_op_arith_compute_ov(ctx
, t0
, arg1
, arg2
, 0);
821 if (unlikely(Rc(ctx
->opcode
) != 0))
822 gen_set_Rc0(ctx
, t0
);
824 if (!TCGV_EQUAL(t0
, ret
)) {
825 tcg_gen_mov_tl(ret
, t0
);
829 /* Add functions with two operands */
830 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
831 static void glue(gen_, name)(DisasContext *ctx) \
833 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
834 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
835 add_ca, compute_ca, compute_ov); \
837 /* Add functions with one operand and one immediate */
838 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
839 add_ca, compute_ca, compute_ov) \
840 static void glue(gen_, name)(DisasContext *ctx) \
842 TCGv t0 = tcg_const_local_tl(const_val); \
843 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
844 cpu_gpr[rA(ctx->opcode)], t0, \
845 add_ca, compute_ca, compute_ov); \
849 /* add add. addo addo. */
850 GEN_INT_ARITH_ADD(add
, 0x08, 0, 0, 0)
851 GEN_INT_ARITH_ADD(addo
, 0x18, 0, 0, 1)
852 /* addc addc. addco addco. */
853 GEN_INT_ARITH_ADD(addc
, 0x00, 0, 1, 0)
854 GEN_INT_ARITH_ADD(addco
, 0x10, 0, 1, 1)
855 /* adde adde. addeo addeo. */
856 GEN_INT_ARITH_ADD(adde
, 0x04, 1, 1, 0)
857 GEN_INT_ARITH_ADD(addeo
, 0x14, 1, 1, 1)
858 /* addme addme. addmeo addmeo. */
859 GEN_INT_ARITH_ADD_CONST(addme
, 0x07, -1LL, 1, 1, 0)
860 GEN_INT_ARITH_ADD_CONST(addmeo
, 0x17, -1LL, 1, 1, 1)
861 /* addze addze. addzeo addzeo.*/
862 GEN_INT_ARITH_ADD_CONST(addze
, 0x06, 0, 1, 1, 0)
863 GEN_INT_ARITH_ADD_CONST(addzeo
, 0x16, 0, 1, 1, 1)
865 static void gen_addi(DisasContext
*ctx
)
867 target_long simm
= SIMM(ctx
->opcode
);
869 if (rA(ctx
->opcode
) == 0) {
871 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], simm
);
873 tcg_gen_addi_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)], simm
);
877 static inline void gen_op_addic(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
880 target_long simm
= SIMM(ctx
->opcode
);
882 /* Start with XER CA and OV disabled, the most likely case */
883 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~(1 << XER_CA
));
885 if (likely(simm
!= 0)) {
886 TCGv t0
= tcg_temp_local_new();
887 tcg_gen_addi_tl(t0
, arg1
, simm
);
888 gen_op_arith_compute_ca(ctx
, t0
, arg1
, 0);
889 tcg_gen_mov_tl(ret
, t0
);
892 tcg_gen_mov_tl(ret
, arg1
);
895 gen_set_Rc0(ctx
, ret
);
899 static void gen_addic(DisasContext
*ctx
)
901 gen_op_addic(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)], 0);
904 static void gen_addic_(DisasContext
*ctx
)
906 gen_op_addic(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)], 1);
910 static void gen_addis(DisasContext
*ctx
)
912 target_long simm
= SIMM(ctx
->opcode
);
914 if (rA(ctx
->opcode
) == 0) {
916 tcg_gen_movi_tl(cpu_gpr
[rD(ctx
->opcode
)], simm
<< 16);
918 tcg_gen_addi_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)], simm
<< 16);
922 static inline void gen_op_arith_divw(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
923 TCGv arg2
, int sign
, int compute_ov
)
925 int l1
= gen_new_label();
926 int l2
= gen_new_label();
927 TCGv_i32 t0
= tcg_temp_local_new_i32();
928 TCGv_i32 t1
= tcg_temp_local_new_i32();
930 tcg_gen_trunc_tl_i32(t0
, arg1
);
931 tcg_gen_trunc_tl_i32(t1
, arg2
);
932 tcg_gen_brcondi_i32(TCG_COND_EQ
, t1
, 0, l1
);
934 int l3
= gen_new_label();
935 tcg_gen_brcondi_i32(TCG_COND_NE
, t1
, -1, l3
);
936 tcg_gen_brcondi_i32(TCG_COND_EQ
, t0
, INT32_MIN
, l1
);
938 tcg_gen_div_i32(t0
, t0
, t1
);
940 tcg_gen_divu_i32(t0
, t0
, t1
);
943 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~(1 << XER_OV
));
948 tcg_gen_sari_i32(t0
, t0
, 31);
950 tcg_gen_movi_i32(t0
, 0);
953 tcg_gen_ori_tl(cpu_xer
, cpu_xer
, (1 << XER_OV
) | (1 << XER_SO
));
956 tcg_gen_extu_i32_tl(ret
, t0
);
957 tcg_temp_free_i32(t0
);
958 tcg_temp_free_i32(t1
);
959 if (unlikely(Rc(ctx
->opcode
) != 0))
960 gen_set_Rc0(ctx
, ret
);
963 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
964 static void glue(gen_, name)(DisasContext *ctx) \
966 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
967 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
970 /* divwu divwu. divwuo divwuo. */
971 GEN_INT_ARITH_DIVW(divwu
, 0x0E, 0, 0);
972 GEN_INT_ARITH_DIVW(divwuo
, 0x1E, 0, 1);
973 /* divw divw. divwo divwo. */
974 GEN_INT_ARITH_DIVW(divw
, 0x0F, 1, 0);
975 GEN_INT_ARITH_DIVW(divwo
, 0x1F, 1, 1);
976 #if defined(TARGET_PPC64)
977 static inline void gen_op_arith_divd(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
978 TCGv arg2
, int sign
, int compute_ov
)
980 int l1
= gen_new_label();
981 int l2
= gen_new_label();
983 tcg_gen_brcondi_i64(TCG_COND_EQ
, arg2
, 0, l1
);
985 int l3
= gen_new_label();
986 tcg_gen_brcondi_i64(TCG_COND_NE
, arg2
, -1, l3
);
987 tcg_gen_brcondi_i64(TCG_COND_EQ
, arg1
, INT64_MIN
, l1
);
989 tcg_gen_div_i64(ret
, arg1
, arg2
);
991 tcg_gen_divu_i64(ret
, arg1
, arg2
);
994 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~(1 << XER_OV
));
999 tcg_gen_sari_i64(ret
, arg1
, 63);
1001 tcg_gen_movi_i64(ret
, 0);
1004 tcg_gen_ori_tl(cpu_xer
, cpu_xer
, (1 << XER_OV
) | (1 << XER_SO
));
1007 if (unlikely(Rc(ctx
->opcode
) != 0))
1008 gen_set_Rc0(ctx
, ret
);
1010 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1011 static void glue(gen_, name)(DisasContext *ctx) \
1013 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1014 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1015 sign, compute_ov); \
1017 /* divwu divwu. divwuo divwuo. */
1018 GEN_INT_ARITH_DIVD(divdu
, 0x0E, 0, 0);
1019 GEN_INT_ARITH_DIVD(divduo
, 0x1E, 0, 1);
1020 /* divw divw. divwo divwo. */
1021 GEN_INT_ARITH_DIVD(divd
, 0x0F, 1, 0);
1022 GEN_INT_ARITH_DIVD(divdo
, 0x1F, 1, 1);
1026 static void gen_mulhw(DisasContext
*ctx
)
1030 t0
= tcg_temp_new_i64();
1031 t1
= tcg_temp_new_i64();
1032 #if defined(TARGET_PPC64)
1033 tcg_gen_ext32s_tl(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1034 tcg_gen_ext32s_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1035 tcg_gen_mul_i64(t0
, t0
, t1
);
1036 tcg_gen_shri_i64(cpu_gpr
[rD(ctx
->opcode
)], t0
, 32);
1038 tcg_gen_ext_tl_i64(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1039 tcg_gen_ext_tl_i64(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1040 tcg_gen_mul_i64(t0
, t0
, t1
);
1041 tcg_gen_shri_i64(t0
, t0
, 32);
1042 tcg_gen_trunc_i64_tl(cpu_gpr
[rD(ctx
->opcode
)], t0
);
1044 tcg_temp_free_i64(t0
);
1045 tcg_temp_free_i64(t1
);
1046 if (unlikely(Rc(ctx
->opcode
) != 0))
1047 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1050 /* mulhwu mulhwu. */
1051 static void gen_mulhwu(DisasContext
*ctx
)
1055 t0
= tcg_temp_new_i64();
1056 t1
= tcg_temp_new_i64();
1057 #if defined(TARGET_PPC64)
1058 tcg_gen_ext32u_i64(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1059 tcg_gen_ext32u_i64(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1060 tcg_gen_mul_i64(t0
, t0
, t1
);
1061 tcg_gen_shri_i64(cpu_gpr
[rD(ctx
->opcode
)], t0
, 32);
1063 tcg_gen_extu_tl_i64(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1064 tcg_gen_extu_tl_i64(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1065 tcg_gen_mul_i64(t0
, t0
, t1
);
1066 tcg_gen_shri_i64(t0
, t0
, 32);
1067 tcg_gen_trunc_i64_tl(cpu_gpr
[rD(ctx
->opcode
)], t0
);
1069 tcg_temp_free_i64(t0
);
1070 tcg_temp_free_i64(t1
);
1071 if (unlikely(Rc(ctx
->opcode
) != 0))
1072 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1076 static void gen_mullw(DisasContext
*ctx
)
1078 tcg_gen_mul_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1079 cpu_gpr
[rB(ctx
->opcode
)]);
1080 tcg_gen_ext32s_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rD(ctx
->opcode
)]);
1081 if (unlikely(Rc(ctx
->opcode
) != 0))
1082 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1085 /* mullwo mullwo. */
1086 static void gen_mullwo(DisasContext
*ctx
)
1091 t0
= tcg_temp_new_i64();
1092 t1
= tcg_temp_new_i64();
1093 l1
= gen_new_label();
1094 /* Start with XER OV disabled, the most likely case */
1095 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~(1 << XER_OV
));
1096 #if defined(TARGET_PPC64)
1097 tcg_gen_ext32s_i64(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1098 tcg_gen_ext32s_i64(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1100 tcg_gen_ext_tl_i64(t0
, cpu_gpr
[rA(ctx
->opcode
)]);
1101 tcg_gen_ext_tl_i64(t1
, cpu_gpr
[rB(ctx
->opcode
)]);
1103 tcg_gen_mul_i64(t0
, t0
, t1
);
1104 #if defined(TARGET_PPC64)
1105 tcg_gen_ext32s_i64(cpu_gpr
[rD(ctx
->opcode
)], t0
);
1106 tcg_gen_brcond_i64(TCG_COND_EQ
, t0
, cpu_gpr
[rD(ctx
->opcode
)], l1
);
1108 tcg_gen_trunc_i64_tl(cpu_gpr
[rD(ctx
->opcode
)], t0
);
1109 tcg_gen_ext32s_i64(t1
, t0
);
1110 tcg_gen_brcond_i64(TCG_COND_EQ
, t0
, t1
, l1
);
1112 tcg_gen_ori_tl(cpu_xer
, cpu_xer
, (1 << XER_OV
) | (1 << XER_SO
));
1114 tcg_temp_free_i64(t0
);
1115 tcg_temp_free_i64(t1
);
1116 if (unlikely(Rc(ctx
->opcode
) != 0))
1117 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1121 static void gen_mulli(DisasContext
*ctx
)
1123 tcg_gen_muli_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1126 #if defined(TARGET_PPC64)
1127 #define GEN_INT_ARITH_MUL_HELPER(name, opc3) \
1128 static void glue(gen_, name)(DisasContext *ctx) \
1130 gen_helper_##name (cpu_gpr[rD(ctx->opcode)], \
1131 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
1132 if (unlikely(Rc(ctx->opcode) != 0)) \
1133 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
1136 GEN_INT_ARITH_MUL_HELPER(mulhdu
, 0x00);
1137 /* mulhdu mulhdu. */
1138 GEN_INT_ARITH_MUL_HELPER(mulhd
, 0x02);
1141 static void gen_mulld(DisasContext
*ctx
)
1143 tcg_gen_mul_tl(cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)],
1144 cpu_gpr
[rB(ctx
->opcode
)]);
1145 if (unlikely(Rc(ctx
->opcode
) != 0))
1146 gen_set_Rc0(ctx
, cpu_gpr
[rD(ctx
->opcode
)]);
1148 /* mulldo mulldo. */
1149 GEN_INT_ARITH_MUL_HELPER(mulldo
, 0x17);
1152 /* neg neg. nego nego. */
1153 static inline void gen_op_arith_neg(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
1156 int l1
= gen_new_label();
1157 int l2
= gen_new_label();
1158 TCGv t0
= tcg_temp_local_new();
1159 #if defined(TARGET_PPC64)
1161 tcg_gen_mov_tl(t0
, arg1
);
1162 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, INT64_MIN
, l1
);
1166 tcg_gen_ext32s_tl(t0
, arg1
);
1167 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, INT32_MIN
, l1
);
1169 tcg_gen_neg_tl(ret
, arg1
);
1171 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~(1 << XER_OV
));
1175 tcg_gen_mov_tl(ret
, t0
);
1177 tcg_gen_ori_tl(cpu_xer
, cpu_xer
, (1 << XER_OV
) | (1 << XER_SO
));
1181 if (unlikely(Rc(ctx
->opcode
) != 0))
1182 gen_set_Rc0(ctx
, ret
);
1185 static void gen_neg(DisasContext
*ctx
)
1187 gen_op_arith_neg(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)], 0);
1190 static void gen_nego(DisasContext
*ctx
)
1192 gen_op_arith_neg(ctx
, cpu_gpr
[rD(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)], 1);
1195 /* Common subf function */
1196 static inline void gen_op_arith_subf(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
1197 TCGv arg2
, int add_ca
, int compute_ca
,
1202 if ((!compute_ca
&& !compute_ov
) ||
1203 (!TCGV_EQUAL(ret
, arg1
) && !TCGV_EQUAL(ret
, arg2
))) {
1206 t0
= tcg_temp_local_new();
1210 t1
= tcg_temp_local_new();
1211 tcg_gen_andi_tl(t1
, cpu_xer
, (1 << XER_CA
));
1212 tcg_gen_shri_tl(t1
, t1
, XER_CA
);
1217 if (compute_ca
&& compute_ov
) {
1218 /* Start with XER CA and OV disabled, the most likely case */
1219 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~((1 << XER_CA
) | (1 << XER_OV
)));
1220 } else if (compute_ca
) {
1221 /* Start with XER CA disabled, the most likely case */
1222 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~(1 << XER_CA
));
1223 } else if (compute_ov
) {
1224 /* Start with XER OV disabled, the most likely case */
1225 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~(1 << XER_OV
));
1229 tcg_gen_not_tl(t0
, arg1
);
1230 tcg_gen_add_tl(t0
, t0
, arg2
);
1231 gen_op_arith_compute_ca(ctx
, t0
, arg2
, 0);
1232 tcg_gen_add_tl(t0
, t0
, t1
);
1233 gen_op_arith_compute_ca(ctx
, t0
, t1
, 0);
1236 tcg_gen_sub_tl(t0
, arg2
, arg1
);
1238 gen_op_arith_compute_ca(ctx
, t0
, arg2
, 1);
1242 gen_op_arith_compute_ov(ctx
, t0
, arg1
, arg2
, 1);
1245 if (unlikely(Rc(ctx
->opcode
) != 0))
1246 gen_set_Rc0(ctx
, t0
);
1248 if (!TCGV_EQUAL(t0
, ret
)) {
1249 tcg_gen_mov_tl(ret
, t0
);
1253 /* Sub functions with Two operands functions */
1254 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
1255 static void glue(gen_, name)(DisasContext *ctx) \
1257 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1258 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1259 add_ca, compute_ca, compute_ov); \
1261 /* Sub functions with one operand and one immediate */
1262 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
1263 add_ca, compute_ca, compute_ov) \
1264 static void glue(gen_, name)(DisasContext *ctx) \
1266 TCGv t0 = tcg_const_local_tl(const_val); \
1267 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1268 cpu_gpr[rA(ctx->opcode)], t0, \
1269 add_ca, compute_ca, compute_ov); \
1270 tcg_temp_free(t0); \
1272 /* subf subf. subfo subfo. */
1273 GEN_INT_ARITH_SUBF(subf
, 0x01, 0, 0, 0)
1274 GEN_INT_ARITH_SUBF(subfo
, 0x11, 0, 0, 1)
1275 /* subfc subfc. subfco subfco. */
1276 GEN_INT_ARITH_SUBF(subfc
, 0x00, 0, 1, 0)
1277 GEN_INT_ARITH_SUBF(subfco
, 0x10, 0, 1, 1)
1278 /* subfe subfe. subfeo subfo. */
1279 GEN_INT_ARITH_SUBF(subfe
, 0x04, 1, 1, 0)
1280 GEN_INT_ARITH_SUBF(subfeo
, 0x14, 1, 1, 1)
1281 /* subfme subfme. subfmeo subfmeo. */
1282 GEN_INT_ARITH_SUBF_CONST(subfme
, 0x07, -1LL, 1, 1, 0)
1283 GEN_INT_ARITH_SUBF_CONST(subfmeo
, 0x17, -1LL, 1, 1, 1)
1284 /* subfze subfze. subfzeo subfzeo.*/
1285 GEN_INT_ARITH_SUBF_CONST(subfze
, 0x06, 0, 1, 1, 0)
1286 GEN_INT_ARITH_SUBF_CONST(subfzeo
, 0x16, 0, 1, 1, 1)
1289 static void gen_subfic(DisasContext
*ctx
)
1291 /* Start with XER CA and OV disabled, the most likely case */
1292 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~(1 << XER_CA
));
1293 TCGv t0
= tcg_temp_local_new();
1294 TCGv t1
= tcg_const_local_tl(SIMM(ctx
->opcode
));
1295 tcg_gen_sub_tl(t0
, t1
, cpu_gpr
[rA(ctx
->opcode
)]);
1296 gen_op_arith_compute_ca(ctx
, t0
, t1
, 1);
1298 tcg_gen_mov_tl(cpu_gpr
[rD(ctx
->opcode
)], t0
);
1302 /*** Integer logical ***/
1303 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
1304 static void glue(gen_, name)(DisasContext *ctx) \
1306 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
1307 cpu_gpr[rB(ctx->opcode)]); \
1308 if (unlikely(Rc(ctx->opcode) != 0)) \
1309 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1312 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
1313 static void glue(gen_, name)(DisasContext *ctx) \
1315 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
1316 if (unlikely(Rc(ctx->opcode) != 0)) \
1317 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1321 GEN_LOGICAL2(and, tcg_gen_and_tl
, 0x00, PPC_INTEGER
);
1323 GEN_LOGICAL2(andc
, tcg_gen_andc_tl
, 0x01, PPC_INTEGER
);
1326 static void gen_andi_(DisasContext
*ctx
)
1328 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], UIMM(ctx
->opcode
));
1329 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1333 static void gen_andis_(DisasContext
*ctx
)
1335 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], UIMM(ctx
->opcode
) << 16);
1336 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1340 static void gen_cntlzw(DisasContext
*ctx
)
1342 gen_helper_cntlzw(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1343 if (unlikely(Rc(ctx
->opcode
) != 0))
1344 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1347 GEN_LOGICAL2(eqv
, tcg_gen_eqv_tl
, 0x08, PPC_INTEGER
);
1348 /* extsb & extsb. */
1349 GEN_LOGICAL1(extsb
, tcg_gen_ext8s_tl
, 0x1D, PPC_INTEGER
);
1350 /* extsh & extsh. */
1351 GEN_LOGICAL1(extsh
, tcg_gen_ext16s_tl
, 0x1C, PPC_INTEGER
);
1353 GEN_LOGICAL2(nand
, tcg_gen_nand_tl
, 0x0E, PPC_INTEGER
);
1355 GEN_LOGICAL2(nor
, tcg_gen_nor_tl
, 0x03, PPC_INTEGER
);
1358 static void gen_or(DisasContext
*ctx
)
1362 rs
= rS(ctx
->opcode
);
1363 ra
= rA(ctx
->opcode
);
1364 rb
= rB(ctx
->opcode
);
1365 /* Optimisation for mr. ri case */
1366 if (rs
!= ra
|| rs
!= rb
) {
1368 tcg_gen_or_tl(cpu_gpr
[ra
], cpu_gpr
[rs
], cpu_gpr
[rb
]);
1370 tcg_gen_mov_tl(cpu_gpr
[ra
], cpu_gpr
[rs
]);
1371 if (unlikely(Rc(ctx
->opcode
) != 0))
1372 gen_set_Rc0(ctx
, cpu_gpr
[ra
]);
1373 } else if (unlikely(Rc(ctx
->opcode
) != 0)) {
1374 gen_set_Rc0(ctx
, cpu_gpr
[rs
]);
1375 #if defined(TARGET_PPC64)
1381 /* Set process priority to low */
1385 /* Set process priority to medium-low */
1389 /* Set process priority to normal */
1392 #if !defined(CONFIG_USER_ONLY)
1394 if (ctx
->mem_idx
> 0) {
1395 /* Set process priority to very low */
1400 if (ctx
->mem_idx
> 0) {
1401 /* Set process priority to medium-hight */
1406 if (ctx
->mem_idx
> 0) {
1407 /* Set process priority to high */
1412 if (ctx
->mem_idx
> 1) {
1413 /* Set process priority to very high */
1423 TCGv t0
= tcg_temp_new();
1424 gen_load_spr(t0
, SPR_PPR
);
1425 tcg_gen_andi_tl(t0
, t0
, ~0x001C000000000000ULL
);
1426 tcg_gen_ori_tl(t0
, t0
, ((uint64_t)prio
) << 50);
1427 gen_store_spr(SPR_PPR
, t0
);
1434 GEN_LOGICAL2(orc
, tcg_gen_orc_tl
, 0x0C, PPC_INTEGER
);
1437 static void gen_xor(DisasContext
*ctx
)
1439 /* Optimisation for "set to zero" case */
1440 if (rS(ctx
->opcode
) != rB(ctx
->opcode
))
1441 tcg_gen_xor_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1443 tcg_gen_movi_tl(cpu_gpr
[rA(ctx
->opcode
)], 0);
1444 if (unlikely(Rc(ctx
->opcode
) != 0))
1445 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1449 static void gen_ori(DisasContext
*ctx
)
1451 target_ulong uimm
= UIMM(ctx
->opcode
);
1453 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1455 /* XXX: should handle special NOPs for POWER series */
1458 tcg_gen_ori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
);
1462 static void gen_oris(DisasContext
*ctx
)
1464 target_ulong uimm
= UIMM(ctx
->opcode
);
1466 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1470 tcg_gen_ori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
<< 16);
1474 static void gen_xori(DisasContext
*ctx
)
1476 target_ulong uimm
= UIMM(ctx
->opcode
);
1478 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1482 tcg_gen_xori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
);
1486 static void gen_xoris(DisasContext
*ctx
)
1488 target_ulong uimm
= UIMM(ctx
->opcode
);
1490 if (rS(ctx
->opcode
) == rA(ctx
->opcode
) && uimm
== 0) {
1494 tcg_gen_xori_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], uimm
<< 16);
1497 /* popcntb : PowerPC 2.03 specification */
1498 static void gen_popcntb(DisasContext
*ctx
)
1500 gen_helper_popcntb(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1503 static void gen_popcntw(DisasContext
*ctx
)
1505 gen_helper_popcntw(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1508 #if defined(TARGET_PPC64)
1509 /* popcntd: PowerPC 2.06 specification */
1510 static void gen_popcntd(DisasContext
*ctx
)
1512 gen_helper_popcntd(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1516 #if defined(TARGET_PPC64)
1517 /* extsw & extsw. */
1518 GEN_LOGICAL1(extsw
, tcg_gen_ext32s_tl
, 0x1E, PPC_64B
);
1521 static void gen_cntlzd(DisasContext
*ctx
)
1523 gen_helper_cntlzd(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1524 if (unlikely(Rc(ctx
->opcode
) != 0))
1525 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1529 /*** Integer rotate ***/
1531 /* rlwimi & rlwimi. */
1532 static void gen_rlwimi(DisasContext
*ctx
)
1534 uint32_t mb
, me
, sh
;
1536 mb
= MB(ctx
->opcode
);
1537 me
= ME(ctx
->opcode
);
1538 sh
= SH(ctx
->opcode
);
1539 if (likely(sh
== 0 && mb
== 0 && me
== 31)) {
1540 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1544 TCGv t0
= tcg_temp_new();
1545 #if defined(TARGET_PPC64)
1546 TCGv_i32 t2
= tcg_temp_new_i32();
1547 tcg_gen_trunc_i64_i32(t2
, cpu_gpr
[rS(ctx
->opcode
)]);
1548 tcg_gen_rotli_i32(t2
, t2
, sh
);
1549 tcg_gen_extu_i32_i64(t0
, t2
);
1550 tcg_temp_free_i32(t2
);
1552 tcg_gen_rotli_i32(t0
, cpu_gpr
[rS(ctx
->opcode
)], sh
);
1554 #if defined(TARGET_PPC64)
1558 mask
= MASK(mb
, me
);
1559 t1
= tcg_temp_new();
1560 tcg_gen_andi_tl(t0
, t0
, mask
);
1561 tcg_gen_andi_tl(t1
, cpu_gpr
[rA(ctx
->opcode
)], ~mask
);
1562 tcg_gen_or_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1566 if (unlikely(Rc(ctx
->opcode
) != 0))
1567 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1570 /* rlwinm & rlwinm. */
1571 static void gen_rlwinm(DisasContext
*ctx
)
1573 uint32_t mb
, me
, sh
;
1575 sh
= SH(ctx
->opcode
);
1576 mb
= MB(ctx
->opcode
);
1577 me
= ME(ctx
->opcode
);
1579 if (likely(mb
== 0 && me
== (31 - sh
))) {
1580 if (likely(sh
== 0)) {
1581 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1583 TCGv t0
= tcg_temp_new();
1584 tcg_gen_ext32u_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)]);
1585 tcg_gen_shli_tl(t0
, t0
, sh
);
1586 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1589 } else if (likely(sh
!= 0 && me
== 31 && sh
== (32 - mb
))) {
1590 TCGv t0
= tcg_temp_new();
1591 tcg_gen_ext32u_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)]);
1592 tcg_gen_shri_tl(t0
, t0
, mb
);
1593 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1596 TCGv t0
= tcg_temp_new();
1597 #if defined(TARGET_PPC64)
1598 TCGv_i32 t1
= tcg_temp_new_i32();
1599 tcg_gen_trunc_i64_i32(t1
, cpu_gpr
[rS(ctx
->opcode
)]);
1600 tcg_gen_rotli_i32(t1
, t1
, sh
);
1601 tcg_gen_extu_i32_i64(t0
, t1
);
1602 tcg_temp_free_i32(t1
);
1604 tcg_gen_rotli_i32(t0
, cpu_gpr
[rS(ctx
->opcode
)], sh
);
1606 #if defined(TARGET_PPC64)
1610 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, MASK(mb
, me
));
1613 if (unlikely(Rc(ctx
->opcode
) != 0))
1614 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1617 /* rlwnm & rlwnm. */
1618 static void gen_rlwnm(DisasContext
*ctx
)
1622 #if defined(TARGET_PPC64)
1626 mb
= MB(ctx
->opcode
);
1627 me
= ME(ctx
->opcode
);
1628 t0
= tcg_temp_new();
1629 tcg_gen_andi_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x1f);
1630 #if defined(TARGET_PPC64)
1631 t1
= tcg_temp_new_i32();
1632 t2
= tcg_temp_new_i32();
1633 tcg_gen_trunc_i64_i32(t1
, cpu_gpr
[rS(ctx
->opcode
)]);
1634 tcg_gen_trunc_i64_i32(t2
, t0
);
1635 tcg_gen_rotl_i32(t1
, t1
, t2
);
1636 tcg_gen_extu_i32_i64(t0
, t1
);
1637 tcg_temp_free_i32(t1
);
1638 tcg_temp_free_i32(t2
);
1640 tcg_gen_rotl_i32(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1642 if (unlikely(mb
!= 0 || me
!= 31)) {
1643 #if defined(TARGET_PPC64)
1647 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, MASK(mb
, me
));
1649 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1652 if (unlikely(Rc(ctx
->opcode
) != 0))
1653 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1656 #if defined(TARGET_PPC64)
1657 #define GEN_PPC64_R2(name, opc1, opc2) \
1658 static void glue(gen_, name##0)(DisasContext *ctx) \
1660 gen_##name(ctx, 0); \
1663 static void glue(gen_, name##1)(DisasContext *ctx) \
1665 gen_##name(ctx, 1); \
1667 #define GEN_PPC64_R4(name, opc1, opc2) \
1668 static void glue(gen_, name##0)(DisasContext *ctx) \
1670 gen_##name(ctx, 0, 0); \
1673 static void glue(gen_, name##1)(DisasContext *ctx) \
1675 gen_##name(ctx, 0, 1); \
1678 static void glue(gen_, name##2)(DisasContext *ctx) \
1680 gen_##name(ctx, 1, 0); \
1683 static void glue(gen_, name##3)(DisasContext *ctx) \
1685 gen_##name(ctx, 1, 1); \
1688 static inline void gen_rldinm(DisasContext
*ctx
, uint32_t mb
, uint32_t me
,
1691 if (likely(sh
!= 0 && mb
== 0 && me
== (63 - sh
))) {
1692 tcg_gen_shli_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], sh
);
1693 } else if (likely(sh
!= 0 && me
== 63 && sh
== (64 - mb
))) {
1694 tcg_gen_shri_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], mb
);
1696 TCGv t0
= tcg_temp_new();
1697 tcg_gen_rotli_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], sh
);
1698 if (likely(mb
== 0 && me
== 63)) {
1699 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1701 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, MASK(mb
, me
));
1705 if (unlikely(Rc(ctx
->opcode
) != 0))
1706 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1708 /* rldicl - rldicl. */
1709 static inline void gen_rldicl(DisasContext
*ctx
, int mbn
, int shn
)
1713 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1714 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1715 gen_rldinm(ctx
, mb
, 63, sh
);
1717 GEN_PPC64_R4(rldicl
, 0x1E, 0x00);
1718 /* rldicr - rldicr. */
1719 static inline void gen_rldicr(DisasContext
*ctx
, int men
, int shn
)
1723 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1724 me
= MB(ctx
->opcode
) | (men
<< 5);
1725 gen_rldinm(ctx
, 0, me
, sh
);
1727 GEN_PPC64_R4(rldicr
, 0x1E, 0x02);
1728 /* rldic - rldic. */
1729 static inline void gen_rldic(DisasContext
*ctx
, int mbn
, int shn
)
1733 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1734 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1735 gen_rldinm(ctx
, mb
, 63 - sh
, sh
);
1737 GEN_PPC64_R4(rldic
, 0x1E, 0x04);
1739 static inline void gen_rldnm(DisasContext
*ctx
, uint32_t mb
, uint32_t me
)
1743 mb
= MB(ctx
->opcode
);
1744 me
= ME(ctx
->opcode
);
1745 t0
= tcg_temp_new();
1746 tcg_gen_andi_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x3f);
1747 tcg_gen_rotl_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1748 if (unlikely(mb
!= 0 || me
!= 63)) {
1749 tcg_gen_andi_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, MASK(mb
, me
));
1751 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
);
1754 if (unlikely(Rc(ctx
->opcode
) != 0))
1755 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1758 /* rldcl - rldcl. */
1759 static inline void gen_rldcl(DisasContext
*ctx
, int mbn
)
1763 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1764 gen_rldnm(ctx
, mb
, 63);
1766 GEN_PPC64_R2(rldcl
, 0x1E, 0x08);
1767 /* rldcr - rldcr. */
1768 static inline void gen_rldcr(DisasContext
*ctx
, int men
)
1772 me
= MB(ctx
->opcode
) | (men
<< 5);
1773 gen_rldnm(ctx
, 0, me
);
1775 GEN_PPC64_R2(rldcr
, 0x1E, 0x09);
1776 /* rldimi - rldimi. */
1777 static inline void gen_rldimi(DisasContext
*ctx
, int mbn
, int shn
)
1779 uint32_t sh
, mb
, me
;
1781 sh
= SH(ctx
->opcode
) | (shn
<< 5);
1782 mb
= MB(ctx
->opcode
) | (mbn
<< 5);
1784 if (unlikely(sh
== 0 && mb
== 0)) {
1785 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1790 t0
= tcg_temp_new();
1791 tcg_gen_rotli_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], sh
);
1792 t1
= tcg_temp_new();
1793 mask
= MASK(mb
, me
);
1794 tcg_gen_andi_tl(t0
, t0
, mask
);
1795 tcg_gen_andi_tl(t1
, cpu_gpr
[rA(ctx
->opcode
)], ~mask
);
1796 tcg_gen_or_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1800 if (unlikely(Rc(ctx
->opcode
) != 0))
1801 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1803 GEN_PPC64_R4(rldimi
, 0x1E, 0x06);
1806 /*** Integer shift ***/
1809 static void gen_slw(DisasContext
*ctx
)
1813 t0
= tcg_temp_new();
1814 /* AND rS with a mask that is 0 when rB >= 0x20 */
1815 #if defined(TARGET_PPC64)
1816 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x3a);
1817 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1819 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x1a);
1820 tcg_gen_sari_tl(t0
, t0
, 0x1f);
1822 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1823 t1
= tcg_temp_new();
1824 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x1f);
1825 tcg_gen_shl_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1828 tcg_gen_ext32u_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rA(ctx
->opcode
)]);
1829 if (unlikely(Rc(ctx
->opcode
) != 0))
1830 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1834 static void gen_sraw(DisasContext
*ctx
)
1836 gen_helper_sraw(cpu_gpr
[rA(ctx
->opcode
)],
1837 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1838 if (unlikely(Rc(ctx
->opcode
) != 0))
1839 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1842 /* srawi & srawi. */
1843 static void gen_srawi(DisasContext
*ctx
)
1845 int sh
= SH(ctx
->opcode
);
1849 l1
= gen_new_label();
1850 l2
= gen_new_label();
1851 t0
= tcg_temp_local_new();
1852 tcg_gen_ext32s_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)]);
1853 tcg_gen_brcondi_tl(TCG_COND_GE
, t0
, 0, l1
);
1854 tcg_gen_andi_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], (1ULL << sh
) - 1);
1855 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, l1
);
1856 tcg_gen_ori_tl(cpu_xer
, cpu_xer
, 1 << XER_CA
);
1859 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~(1 << XER_CA
));
1861 tcg_gen_ext32s_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)]);
1862 tcg_gen_sari_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, sh
);
1865 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1866 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~(1 << XER_CA
));
1868 if (unlikely(Rc(ctx
->opcode
) != 0))
1869 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1873 static void gen_srw(DisasContext
*ctx
)
1877 t0
= tcg_temp_new();
1878 /* AND rS with a mask that is 0 when rB >= 0x20 */
1879 #if defined(TARGET_PPC64)
1880 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x3a);
1881 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1883 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x1a);
1884 tcg_gen_sari_tl(t0
, t0
, 0x1f);
1886 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1887 tcg_gen_ext32u_tl(t0
, t0
);
1888 t1
= tcg_temp_new();
1889 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x1f);
1890 tcg_gen_shr_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1893 if (unlikely(Rc(ctx
->opcode
) != 0))
1894 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1897 #if defined(TARGET_PPC64)
1899 static void gen_sld(DisasContext
*ctx
)
1903 t0
= tcg_temp_new();
1904 /* AND rS with a mask that is 0 when rB >= 0x40 */
1905 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x39);
1906 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1907 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1908 t1
= tcg_temp_new();
1909 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x3f);
1910 tcg_gen_shl_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1913 if (unlikely(Rc(ctx
->opcode
) != 0))
1914 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1918 static void gen_srad(DisasContext
*ctx
)
1920 gen_helper_srad(cpu_gpr
[rA(ctx
->opcode
)],
1921 cpu_gpr
[rS(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
1922 if (unlikely(Rc(ctx
->opcode
) != 0))
1923 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1925 /* sradi & sradi. */
1926 static inline void gen_sradi(DisasContext
*ctx
, int n
)
1928 int sh
= SH(ctx
->opcode
) + (n
<< 5);
1932 l1
= gen_new_label();
1933 l2
= gen_new_label();
1934 t0
= tcg_temp_local_new();
1935 tcg_gen_brcondi_tl(TCG_COND_GE
, cpu_gpr
[rS(ctx
->opcode
)], 0, l1
);
1936 tcg_gen_andi_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], (1ULL << sh
) - 1);
1937 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, l1
);
1938 tcg_gen_ori_tl(cpu_xer
, cpu_xer
, 1 << XER_CA
);
1941 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~(1 << XER_CA
));
1944 tcg_gen_sari_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)], sh
);
1946 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rS(ctx
->opcode
)]);
1947 tcg_gen_andi_tl(cpu_xer
, cpu_xer
, ~(1 << XER_CA
));
1949 if (unlikely(Rc(ctx
->opcode
) != 0))
1950 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1953 static void gen_sradi0(DisasContext
*ctx
)
1958 static void gen_sradi1(DisasContext
*ctx
)
1964 static void gen_srd(DisasContext
*ctx
)
1968 t0
= tcg_temp_new();
1969 /* AND rS with a mask that is 0 when rB >= 0x40 */
1970 tcg_gen_shli_tl(t0
, cpu_gpr
[rB(ctx
->opcode
)], 0x39);
1971 tcg_gen_sari_tl(t0
, t0
, 0x3f);
1972 tcg_gen_andc_tl(t0
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
1973 t1
= tcg_temp_new();
1974 tcg_gen_andi_tl(t1
, cpu_gpr
[rB(ctx
->opcode
)], 0x3f);
1975 tcg_gen_shr_tl(cpu_gpr
[rA(ctx
->opcode
)], t0
, t1
);
1978 if (unlikely(Rc(ctx
->opcode
) != 0))
1979 gen_set_Rc0(ctx
, cpu_gpr
[rA(ctx
->opcode
)]);
1983 /*** Floating-Point arithmetic ***/
1984 #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
1985 static void gen_f##name(DisasContext *ctx) \
1987 if (unlikely(!ctx->fpu_enabled)) { \
1988 gen_exception(ctx, POWERPC_EXCP_FPU); \
1991 /* NIP cannot be restored if the memory exception comes from an helper */ \
1992 gen_update_nip(ctx, ctx->nip - 4); \
1993 gen_reset_fpstatus(); \
1994 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
1995 cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
1997 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
1999 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], set_fprf, \
2000 Rc(ctx->opcode) != 0); \
2003 #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
2004 _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
2005 _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
2007 #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2008 static void gen_f##name(DisasContext *ctx) \
2010 if (unlikely(!ctx->fpu_enabled)) { \
2011 gen_exception(ctx, POWERPC_EXCP_FPU); \
2014 /* NIP cannot be restored if the memory exception comes from an helper */ \
2015 gen_update_nip(ctx, ctx->nip - 4); \
2016 gen_reset_fpstatus(); \
2017 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
2018 cpu_fpr[rB(ctx->opcode)]); \
2020 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
2022 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2023 set_fprf, Rc(ctx->opcode) != 0); \
2025 #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
2026 _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2027 _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2029 #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2030 static void gen_f##name(DisasContext *ctx) \
2032 if (unlikely(!ctx->fpu_enabled)) { \
2033 gen_exception(ctx, POWERPC_EXCP_FPU); \
2036 /* NIP cannot be restored if the memory exception comes from an helper */ \
2037 gen_update_nip(ctx, ctx->nip - 4); \
2038 gen_reset_fpstatus(); \
2039 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
2040 cpu_fpr[rC(ctx->opcode)]); \
2042 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
2044 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2045 set_fprf, Rc(ctx->opcode) != 0); \
2047 #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
2048 _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2049 _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2051 #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
2052 static void gen_f##name(DisasContext *ctx) \
2054 if (unlikely(!ctx->fpu_enabled)) { \
2055 gen_exception(ctx, POWERPC_EXCP_FPU); \
2058 /* NIP cannot be restored if the memory exception comes from an helper */ \
2059 gen_update_nip(ctx, ctx->nip - 4); \
2060 gen_reset_fpstatus(); \
2061 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
2062 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2063 set_fprf, Rc(ctx->opcode) != 0); \
2066 #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
2067 static void gen_f##name(DisasContext *ctx) \
2069 if (unlikely(!ctx->fpu_enabled)) { \
2070 gen_exception(ctx, POWERPC_EXCP_FPU); \
2073 /* NIP cannot be restored if the memory exception comes from an helper */ \
2074 gen_update_nip(ctx, ctx->nip - 4); \
2075 gen_reset_fpstatus(); \
2076 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
2077 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2078 set_fprf, Rc(ctx->opcode) != 0); \
2082 GEN_FLOAT_AB(add
, 0x15, 0x000007C0, 1, PPC_FLOAT
);
2084 GEN_FLOAT_AB(div
, 0x12, 0x000007C0, 1, PPC_FLOAT
);
2086 GEN_FLOAT_AC(mul
, 0x19, 0x0000F800, 1, PPC_FLOAT
);
2089 GEN_FLOAT_BS(re
, 0x3F, 0x18, 1, PPC_FLOAT_EXT
);
2092 GEN_FLOAT_BS(res
, 0x3B, 0x18, 1, PPC_FLOAT_FRES
);
2095 GEN_FLOAT_BS(rsqrte
, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE
);
2098 static void gen_frsqrtes(DisasContext
*ctx
)
2100 if (unlikely(!ctx
->fpu_enabled
)) {
2101 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2104 /* NIP cannot be restored if the memory exception comes from an helper */
2105 gen_update_nip(ctx
, ctx
->nip
- 4);
2106 gen_reset_fpstatus();
2107 gen_helper_frsqrte(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)]);
2108 gen_helper_frsp(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rD(ctx
->opcode
)]);
2109 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)], 1, Rc(ctx
->opcode
) != 0);
2113 _GEN_FLOAT_ACB(sel
, sel
, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL
);
2115 GEN_FLOAT_AB(sub
, 0x14, 0x000007C0, 1, PPC_FLOAT
);
2119 static void gen_fsqrt(DisasContext
*ctx
)
2121 if (unlikely(!ctx
->fpu_enabled
)) {
2122 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2125 /* NIP cannot be restored if the memory exception comes from an helper */
2126 gen_update_nip(ctx
, ctx
->nip
- 4);
2127 gen_reset_fpstatus();
2128 gen_helper_fsqrt(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)]);
2129 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)], 1, Rc(ctx
->opcode
) != 0);
2132 static void gen_fsqrts(DisasContext
*ctx
)
2134 if (unlikely(!ctx
->fpu_enabled
)) {
2135 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2138 /* NIP cannot be restored if the memory exception comes from an helper */
2139 gen_update_nip(ctx
, ctx
->nip
- 4);
2140 gen_reset_fpstatus();
2141 gen_helper_fsqrt(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)]);
2142 gen_helper_frsp(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rD(ctx
->opcode
)]);
2143 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)], 1, Rc(ctx
->opcode
) != 0);
2146 /*** Floating-Point multiply-and-add ***/
2147 /* fmadd - fmadds */
2148 GEN_FLOAT_ACB(madd
, 0x1D, 1, PPC_FLOAT
);
2149 /* fmsub - fmsubs */
2150 GEN_FLOAT_ACB(msub
, 0x1C, 1, PPC_FLOAT
);
2151 /* fnmadd - fnmadds */
2152 GEN_FLOAT_ACB(nmadd
, 0x1F, 1, PPC_FLOAT
);
2153 /* fnmsub - fnmsubs */
2154 GEN_FLOAT_ACB(nmsub
, 0x1E, 1, PPC_FLOAT
);
2156 /*** Floating-Point round & convert ***/
2158 GEN_FLOAT_B(ctiw
, 0x0E, 0x00, 0, PPC_FLOAT
);
2160 GEN_FLOAT_B(ctiwz
, 0x0F, 0x00, 0, PPC_FLOAT
);
2162 GEN_FLOAT_B(rsp
, 0x0C, 0x00, 1, PPC_FLOAT
);
2163 #if defined(TARGET_PPC64)
2165 GEN_FLOAT_B(cfid
, 0x0E, 0x1A, 1, PPC_64B
);
2167 GEN_FLOAT_B(ctid
, 0x0E, 0x19, 0, PPC_64B
);
2169 GEN_FLOAT_B(ctidz
, 0x0F, 0x19, 0, PPC_64B
);
2173 GEN_FLOAT_B(rin
, 0x08, 0x0C, 1, PPC_FLOAT_EXT
);
2175 GEN_FLOAT_B(riz
, 0x08, 0x0D, 1, PPC_FLOAT_EXT
);
2177 GEN_FLOAT_B(rip
, 0x08, 0x0E, 1, PPC_FLOAT_EXT
);
2179 GEN_FLOAT_B(rim
, 0x08, 0x0F, 1, PPC_FLOAT_EXT
);
2181 /*** Floating-Point compare ***/
2184 static void gen_fcmpo(DisasContext
*ctx
)
2187 if (unlikely(!ctx
->fpu_enabled
)) {
2188 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2191 /* NIP cannot be restored if the memory exception comes from an helper */
2192 gen_update_nip(ctx
, ctx
->nip
- 4);
2193 gen_reset_fpstatus();
2194 crf
= tcg_const_i32(crfD(ctx
->opcode
));
2195 gen_helper_fcmpo(cpu_fpr
[rA(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)], crf
);
2196 tcg_temp_free_i32(crf
);
2197 gen_helper_float_check_status();
2201 static void gen_fcmpu(DisasContext
*ctx
)
2204 if (unlikely(!ctx
->fpu_enabled
)) {
2205 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2208 /* NIP cannot be restored if the memory exception comes from an helper */
2209 gen_update_nip(ctx
, ctx
->nip
- 4);
2210 gen_reset_fpstatus();
2211 crf
= tcg_const_i32(crfD(ctx
->opcode
));
2212 gen_helper_fcmpu(cpu_fpr
[rA(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)], crf
);
2213 tcg_temp_free_i32(crf
);
2214 gen_helper_float_check_status();
2217 /*** Floating-point move ***/
2219 /* XXX: beware that fabs never checks for NaNs nor update FPSCR */
2220 GEN_FLOAT_B(abs
, 0x08, 0x08, 0, PPC_FLOAT
);
2223 /* XXX: beware that fmr never checks for NaNs nor update FPSCR */
2224 static void gen_fmr(DisasContext
*ctx
)
2226 if (unlikely(!ctx
->fpu_enabled
)) {
2227 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2230 tcg_gen_mov_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpr
[rB(ctx
->opcode
)]);
2231 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)], 0, Rc(ctx
->opcode
) != 0);
2235 /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
2236 GEN_FLOAT_B(nabs
, 0x08, 0x04, 0, PPC_FLOAT
);
2238 /* XXX: beware that fneg never checks for NaNs nor update FPSCR */
2239 GEN_FLOAT_B(neg
, 0x08, 0x01, 0, PPC_FLOAT
);
2241 /*** Floating-Point status & ctrl register ***/
2244 static void gen_mcrfs(DisasContext
*ctx
)
2248 if (unlikely(!ctx
->fpu_enabled
)) {
2249 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2252 bfa
= 4 * (7 - crfS(ctx
->opcode
));
2253 tcg_gen_shri_i32(cpu_crf
[crfD(ctx
->opcode
)], cpu_fpscr
, bfa
);
2254 tcg_gen_andi_i32(cpu_crf
[crfD(ctx
->opcode
)], cpu_crf
[crfD(ctx
->opcode
)], 0xf);
2255 tcg_gen_andi_i32(cpu_fpscr
, cpu_fpscr
, ~(0xF << bfa
));
2259 static void gen_mffs(DisasContext
*ctx
)
2261 if (unlikely(!ctx
->fpu_enabled
)) {
2262 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2265 gen_reset_fpstatus();
2266 tcg_gen_extu_i32_i64(cpu_fpr
[rD(ctx
->opcode
)], cpu_fpscr
);
2267 gen_compute_fprf(cpu_fpr
[rD(ctx
->opcode
)], 0, Rc(ctx
->opcode
) != 0);
2271 static void gen_mtfsb0(DisasContext
*ctx
)
2275 if (unlikely(!ctx
->fpu_enabled
)) {
2276 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2279 crb
= 31 - crbD(ctx
->opcode
);
2280 gen_reset_fpstatus();
2281 if (likely(crb
!= FPSCR_FEX
&& crb
!= FPSCR_VX
)) {
2283 /* NIP cannot be restored if the memory exception comes from an helper */
2284 gen_update_nip(ctx
, ctx
->nip
- 4);
2285 t0
= tcg_const_i32(crb
);
2286 gen_helper_fpscr_clrbit(t0
);
2287 tcg_temp_free_i32(t0
);
2289 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2290 tcg_gen_shri_i32(cpu_crf
[1], cpu_fpscr
, FPSCR_OX
);
2295 static void gen_mtfsb1(DisasContext
*ctx
)
2299 if (unlikely(!ctx
->fpu_enabled
)) {
2300 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2303 crb
= 31 - crbD(ctx
->opcode
);
2304 gen_reset_fpstatus();
2305 /* XXX: we pretend we can only do IEEE floating-point computations */
2306 if (likely(crb
!= FPSCR_FEX
&& crb
!= FPSCR_VX
&& crb
!= FPSCR_NI
)) {
2308 /* NIP cannot be restored if the memory exception comes from an helper */
2309 gen_update_nip(ctx
, ctx
->nip
- 4);
2310 t0
= tcg_const_i32(crb
);
2311 gen_helper_fpscr_setbit(t0
);
2312 tcg_temp_free_i32(t0
);
2314 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2315 tcg_gen_shri_i32(cpu_crf
[1], cpu_fpscr
, FPSCR_OX
);
2317 /* We can raise a differed exception */
2318 gen_helper_float_check_status();
2322 static void gen_mtfsf(DisasContext
*ctx
)
2325 int L
= ctx
->opcode
& 0x02000000;
2327 if (unlikely(!ctx
->fpu_enabled
)) {
2328 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2331 /* NIP cannot be restored if the memory exception comes from an helper */
2332 gen_update_nip(ctx
, ctx
->nip
- 4);
2333 gen_reset_fpstatus();
2335 t0
= tcg_const_i32(0xff);
2337 t0
= tcg_const_i32(FM(ctx
->opcode
));
2338 gen_helper_store_fpscr(cpu_fpr
[rB(ctx
->opcode
)], t0
);
2339 tcg_temp_free_i32(t0
);
2340 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2341 tcg_gen_shri_i32(cpu_crf
[1], cpu_fpscr
, FPSCR_OX
);
2343 /* We can raise a differed exception */
2344 gen_helper_float_check_status();
2348 static void gen_mtfsfi(DisasContext
*ctx
)
2354 if (unlikely(!ctx
->fpu_enabled
)) {
2355 gen_exception(ctx
, POWERPC_EXCP_FPU
);
2358 bf
= crbD(ctx
->opcode
) >> 2;
2360 /* NIP cannot be restored if the memory exception comes from an helper */
2361 gen_update_nip(ctx
, ctx
->nip
- 4);
2362 gen_reset_fpstatus();
2363 t0
= tcg_const_i64(FPIMM(ctx
->opcode
) << (4 * sh
));
2364 t1
= tcg_const_i32(1 << sh
);
2365 gen_helper_store_fpscr(t0
, t1
);
2366 tcg_temp_free_i64(t0
);
2367 tcg_temp_free_i32(t1
);
2368 if (unlikely(Rc(ctx
->opcode
) != 0)) {
2369 tcg_gen_shri_i32(cpu_crf
[1], cpu_fpscr
, FPSCR_OX
);
2371 /* We can raise a differed exception */
2372 gen_helper_float_check_status();
2375 /*** Addressing modes ***/
2376 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2377 static inline void gen_addr_imm_index(DisasContext
*ctx
, TCGv EA
,
2380 target_long simm
= SIMM(ctx
->opcode
);
2383 if (rA(ctx
->opcode
) == 0) {
2384 #if defined(TARGET_PPC64)
2385 if (!ctx
->sf_mode
) {
2386 tcg_gen_movi_tl(EA
, (uint32_t)simm
);
2389 tcg_gen_movi_tl(EA
, simm
);
2390 } else if (likely(simm
!= 0)) {
2391 tcg_gen_addi_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)], simm
);
2392 #if defined(TARGET_PPC64)
2393 if (!ctx
->sf_mode
) {
2394 tcg_gen_ext32u_tl(EA
, EA
);
2398 #if defined(TARGET_PPC64)
2399 if (!ctx
->sf_mode
) {
2400 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2403 tcg_gen_mov_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2407 static inline void gen_addr_reg_index(DisasContext
*ctx
, TCGv EA
)
2409 if (rA(ctx
->opcode
) == 0) {
2410 #if defined(TARGET_PPC64)
2411 if (!ctx
->sf_mode
) {
2412 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rB(ctx
->opcode
)]);
2415 tcg_gen_mov_tl(EA
, cpu_gpr
[rB(ctx
->opcode
)]);
2417 tcg_gen_add_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)], cpu_gpr
[rB(ctx
->opcode
)]);
2418 #if defined(TARGET_PPC64)
2419 if (!ctx
->sf_mode
) {
2420 tcg_gen_ext32u_tl(EA
, EA
);
2426 static inline void gen_addr_register(DisasContext
*ctx
, TCGv EA
)
2428 if (rA(ctx
->opcode
) == 0) {
2429 tcg_gen_movi_tl(EA
, 0);
2431 #if defined(TARGET_PPC64)
2432 if (!ctx
->sf_mode
) {
2433 tcg_gen_ext32u_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2436 tcg_gen_mov_tl(EA
, cpu_gpr
[rA(ctx
->opcode
)]);
2440 static inline void gen_addr_add(DisasContext
*ctx
, TCGv ret
, TCGv arg1
,
2443 tcg_gen_addi_tl(ret
, arg1
, val
);
2444 #if defined(TARGET_PPC64)
2445 if (!ctx
->sf_mode
) {
2446 tcg_gen_ext32u_tl(ret
, ret
);
2451 static inline void gen_check_align(DisasContext
*ctx
, TCGv EA
, int mask
)
2453 int l1
= gen_new_label();
2454 TCGv t0
= tcg_temp_new();
2456 /* NIP cannot be restored if the memory exception comes from an helper */
2457 gen_update_nip(ctx
, ctx
->nip
- 4);
2458 tcg_gen_andi_tl(t0
, EA
, mask
);
2459 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, l1
);
2460 t1
= tcg_const_i32(POWERPC_EXCP_ALIGN
);
2461 t2
= tcg_const_i32(0);
2462 gen_helper_raise_exception_err(t1
, t2
);
2463 tcg_temp_free_i32(t1
);
2464 tcg_temp_free_i32(t2
);
2469 /*** Integer load ***/
2470 static inline void gen_qemu_ld8u(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2472 tcg_gen_qemu_ld8u(arg1
, arg2
, ctx
->mem_idx
);
2475 static inline void gen_qemu_ld8s(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2477 tcg_gen_qemu_ld8s(arg1
, arg2
, ctx
->mem_idx
);
2480 static inline void gen_qemu_ld16u(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2482 tcg_gen_qemu_ld16u(arg1
, arg2
, ctx
->mem_idx
);
2483 if (unlikely(ctx
->le_mode
)) {
2484 tcg_gen_bswap16_tl(arg1
, arg1
);
2488 static inline void gen_qemu_ld16s(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2490 if (unlikely(ctx
->le_mode
)) {
2491 tcg_gen_qemu_ld16u(arg1
, arg2
, ctx
->mem_idx
);
2492 tcg_gen_bswap16_tl(arg1
, arg1
);
2493 tcg_gen_ext16s_tl(arg1
, arg1
);
2495 tcg_gen_qemu_ld16s(arg1
, arg2
, ctx
->mem_idx
);
2499 static inline void gen_qemu_ld32u(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2501 tcg_gen_qemu_ld32u(arg1
, arg2
, ctx
->mem_idx
);
2502 if (unlikely(ctx
->le_mode
)) {
2503 tcg_gen_bswap32_tl(arg1
, arg1
);
2507 #if defined(TARGET_PPC64)
2508 static inline void gen_qemu_ld32s(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2510 if (unlikely(ctx
->le_mode
)) {
2511 tcg_gen_qemu_ld32u(arg1
, arg2
, ctx
->mem_idx
);
2512 tcg_gen_bswap32_tl(arg1
, arg1
);
2513 tcg_gen_ext32s_tl(arg1
, arg1
);
2515 tcg_gen_qemu_ld32s(arg1
, arg2
, ctx
->mem_idx
);
2519 static inline void gen_qemu_ld64(DisasContext
*ctx
, TCGv_i64 arg1
, TCGv arg2
)
2521 tcg_gen_qemu_ld64(arg1
, arg2
, ctx
->mem_idx
);
2522 if (unlikely(ctx
->le_mode
)) {
2523 tcg_gen_bswap64_i64(arg1
, arg1
);
2527 static inline void gen_qemu_st8(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2529 tcg_gen_qemu_st8(arg1
, arg2
, ctx
->mem_idx
);
2532 static inline void gen_qemu_st16(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2534 if (unlikely(ctx
->le_mode
)) {
2535 TCGv t0
= tcg_temp_new();
2536 tcg_gen_ext16u_tl(t0
, arg1
);
2537 tcg_gen_bswap16_tl(t0
, t0
);
2538 tcg_gen_qemu_st16(t0
, arg2
, ctx
->mem_idx
);
2541 tcg_gen_qemu_st16(arg1
, arg2
, ctx
->mem_idx
);
2545 static inline void gen_qemu_st32(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2547 if (unlikely(ctx
->le_mode
)) {
2548 TCGv t0
= tcg_temp_new();
2549 tcg_gen_ext32u_tl(t0
, arg1
);
2550 tcg_gen_bswap32_tl(t0
, t0
);
2551 tcg_gen_qemu_st32(t0
, arg2
, ctx
->mem_idx
);
2554 tcg_gen_qemu_st32(arg1
, arg2
, ctx
->mem_idx
);
2558 static inline void gen_qemu_st64(DisasContext
*ctx
, TCGv_i64 arg1
, TCGv arg2
)
2560 if (unlikely(ctx
->le_mode
)) {
2561 TCGv_i64 t0
= tcg_temp_new_i64();
2562 tcg_gen_bswap64_i64(t0
, arg1
);
2563 tcg_gen_qemu_st64(t0
, arg2
, ctx
->mem_idx
);
2564 tcg_temp_free_i64(t0
);
2566 tcg_gen_qemu_st64(arg1
, arg2
, ctx
->mem_idx
);
2569 #define GEN_LD(name, ldop, opc, type) \
2570 static void glue(gen_, name)(DisasContext *ctx) \
2573 gen_set_access_type(ctx, ACCESS_INT); \
2574 EA = tcg_temp_new(); \
2575 gen_addr_imm_index(ctx, EA, 0); \
2576 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2577 tcg_temp_free(EA); \
2580 #define GEN_LDU(name, ldop, opc, type) \
2581 static void glue(gen_, name##u)(DisasContext *ctx) \
2584 if (unlikely(rA(ctx->opcode) == 0 || \
2585 rA(ctx->opcode) == rD(ctx->opcode))) { \
2586 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2589 gen_set_access_type(ctx, ACCESS_INT); \
2590 EA = tcg_temp_new(); \
2591 if (type == PPC_64B) \
2592 gen_addr_imm_index(ctx, EA, 0x03); \
2594 gen_addr_imm_index(ctx, EA, 0); \
2595 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2596 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2597 tcg_temp_free(EA); \
2600 #define GEN_LDUX(name, ldop, opc2, opc3, type) \
2601 static void glue(gen_, name##ux)(DisasContext *ctx) \
2604 if (unlikely(rA(ctx->opcode) == 0 || \
2605 rA(ctx->opcode) == rD(ctx->opcode))) { \
2606 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2609 gen_set_access_type(ctx, ACCESS_INT); \
2610 EA = tcg_temp_new(); \
2611 gen_addr_reg_index(ctx, EA); \
2612 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2613 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2614 tcg_temp_free(EA); \
2617 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2618 static void glue(gen_, name##x)(DisasContext *ctx) \
2621 gen_set_access_type(ctx, ACCESS_INT); \
2622 EA = tcg_temp_new(); \
2623 gen_addr_reg_index(ctx, EA); \
2624 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2625 tcg_temp_free(EA); \
2628 #define GEN_LDS(name, ldop, op, type) \
2629 GEN_LD(name, ldop, op | 0x20, type); \
2630 GEN_LDU(name, ldop, op | 0x21, type); \
2631 GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
2632 GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2634 /* lbz lbzu lbzux lbzx */
2635 GEN_LDS(lbz
, ld8u
, 0x02, PPC_INTEGER
);
2636 /* lha lhau lhaux lhax */
2637 GEN_LDS(lha
, ld16s
, 0x0A, PPC_INTEGER
);
2638 /* lhz lhzu lhzux lhzx */
2639 GEN_LDS(lhz
, ld16u
, 0x08, PPC_INTEGER
);
2640 /* lwz lwzu lwzux lwzx */
2641 GEN_LDS(lwz
, ld32u
, 0x00, PPC_INTEGER
);
2642 #if defined(TARGET_PPC64)
2644 GEN_LDUX(lwa
, ld32s
, 0x15, 0x0B, PPC_64B
);
2646 GEN_LDX(lwa
, ld32s
, 0x15, 0x0A, PPC_64B
);
2648 GEN_LDUX(ld
, ld64
, 0x15, 0x01, PPC_64B
);
2650 GEN_LDX(ld
, ld64
, 0x15, 0x00, PPC_64B
);
2652 static void gen_ld(DisasContext
*ctx
)
2655 if (Rc(ctx
->opcode
)) {
2656 if (unlikely(rA(ctx
->opcode
) == 0 ||
2657 rA(ctx
->opcode
) == rD(ctx
->opcode
))) {
2658 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2662 gen_set_access_type(ctx
, ACCESS_INT
);
2663 EA
= tcg_temp_new();
2664 gen_addr_imm_index(ctx
, EA
, 0x03);
2665 if (ctx
->opcode
& 0x02) {
2666 /* lwa (lwau is undefined) */
2667 gen_qemu_ld32s(ctx
, cpu_gpr
[rD(ctx
->opcode
)], EA
);
2670 gen_qemu_ld64(ctx
, cpu_gpr
[rD(ctx
->opcode
)], EA
);
2672 if (Rc(ctx
->opcode
))
2673 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], EA
);
2678 static void gen_lq(DisasContext
*ctx
)
2680 #if defined(CONFIG_USER_ONLY)
2681 gen_inval_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
2686 /* Restore CPU state */
2687 if (unlikely(ctx
->mem_idx
== 0)) {
2688 gen_inval_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
2691 ra
= rA(ctx
->opcode
);
2692 rd
= rD(ctx
->opcode
);
2693 if (unlikely((rd
& 1) || rd
== ra
)) {
2694 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2697 if (unlikely(ctx
->le_mode
)) {
2698 /* Little-endian mode is not handled */
2699 gen_exception_err(ctx
, POWERPC_EXCP_ALIGN
, POWERPC_EXCP_ALIGN_LE
);
2702 gen_set_access_type(ctx
, ACCESS_INT
);
2703 EA
= tcg_temp_new();
2704 gen_addr_imm_index(ctx
, EA
, 0x0F);
2705 gen_qemu_ld64(ctx
, cpu_gpr
[rd
], EA
);
2706 gen_addr_add(ctx
, EA
, EA
, 8);
2707 gen_qemu_ld64(ctx
, cpu_gpr
[rd
+1], EA
);
2713 /*** Integer store ***/
2714 #define GEN_ST(name, stop, opc, type) \
2715 static void glue(gen_, name)(DisasContext *ctx) \
2718 gen_set_access_type(ctx, ACCESS_INT); \
2719 EA = tcg_temp_new(); \
2720 gen_addr_imm_index(ctx, EA, 0); \
2721 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2722 tcg_temp_free(EA); \
2725 #define GEN_STU(name, stop, opc, type) \
2726 static void glue(gen_, stop##u)(DisasContext *ctx) \
2729 if (unlikely(rA(ctx->opcode) == 0)) { \
2730 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2733 gen_set_access_type(ctx, ACCESS_INT); \
2734 EA = tcg_temp_new(); \
2735 if (type == PPC_64B) \
2736 gen_addr_imm_index(ctx, EA, 0x03); \
2738 gen_addr_imm_index(ctx, EA, 0); \
2739 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2740 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2741 tcg_temp_free(EA); \
2744 #define GEN_STUX(name, stop, opc2, opc3, type) \
2745 static void glue(gen_, name##ux)(DisasContext *ctx) \
2748 if (unlikely(rA(ctx->opcode) == 0)) { \
2749 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2752 gen_set_access_type(ctx, ACCESS_INT); \
2753 EA = tcg_temp_new(); \
2754 gen_addr_reg_index(ctx, EA); \
2755 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2756 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2757 tcg_temp_free(EA); \
2760 #define GEN_STX(name, stop, opc2, opc3, type) \
2761 static void glue(gen_, name##x)(DisasContext *ctx) \
2764 gen_set_access_type(ctx, ACCESS_INT); \
2765 EA = tcg_temp_new(); \
2766 gen_addr_reg_index(ctx, EA); \
2767 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2768 tcg_temp_free(EA); \
2771 #define GEN_STS(name, stop, op, type) \
2772 GEN_ST(name, stop, op | 0x20, type); \
2773 GEN_STU(name, stop, op | 0x21, type); \
2774 GEN_STUX(name, stop, 0x17, op | 0x01, type); \
2775 GEN_STX(name, stop, 0x17, op | 0x00, type)
2777 /* stb stbu stbux stbx */
2778 GEN_STS(stb
, st8
, 0x06, PPC_INTEGER
);
2779 /* sth sthu sthux sthx */
2780 GEN_STS(sth
, st16
, 0x0C, PPC_INTEGER
);
2781 /* stw stwu stwux stwx */
2782 GEN_STS(stw
, st32
, 0x04, PPC_INTEGER
);
2783 #if defined(TARGET_PPC64)
2784 GEN_STUX(std
, st64
, 0x15, 0x05, PPC_64B
);
2785 GEN_STX(std
, st64
, 0x15, 0x04, PPC_64B
);
2787 static void gen_std(DisasContext
*ctx
)
2792 rs
= rS(ctx
->opcode
);
2793 if ((ctx
->opcode
& 0x3) == 0x2) {
2794 #if defined(CONFIG_USER_ONLY)
2795 gen_inval_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
2798 if (unlikely(ctx
->mem_idx
== 0)) {
2799 gen_inval_exception(ctx
, POWERPC_EXCP_PRIV_OPC
);
2802 if (unlikely(rs
& 1)) {
2803 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2806 if (unlikely(ctx
->le_mode
)) {
2807 /* Little-endian mode is not handled */
2808 gen_exception_err(ctx
, POWERPC_EXCP_ALIGN
, POWERPC_EXCP_ALIGN_LE
);
2811 gen_set_access_type(ctx
, ACCESS_INT
);
2812 EA
= tcg_temp_new();
2813 gen_addr_imm_index(ctx
, EA
, 0x03);
2814 gen_qemu_st64(ctx
, cpu_gpr
[rs
], EA
);
2815 gen_addr_add(ctx
, EA
, EA
, 8);
2816 gen_qemu_st64(ctx
, cpu_gpr
[rs
+1], EA
);
2821 if (Rc(ctx
->opcode
)) {
2822 if (unlikely(rA(ctx
->opcode
) == 0)) {
2823 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_INVAL
);
2827 gen_set_access_type(ctx
, ACCESS_INT
);
2828 EA
= tcg_temp_new();
2829 gen_addr_imm_index(ctx
, EA
, 0x03);
2830 gen_qemu_st64(ctx
, cpu_gpr
[rs
], EA
);
2831 if (Rc(ctx
->opcode
))
2832 tcg_gen_mov_tl(cpu_gpr
[rA(ctx
->opcode
)], EA
);
2837 /*** Integer load and store with byte reverse ***/
2839 static inline void gen_qemu_ld16ur(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2841 tcg_gen_qemu_ld16u(arg1
, arg2
, ctx
->mem_idx
);
2842 if (likely(!ctx
->le_mode
)) {
2843 tcg_gen_bswap16_tl(arg1
, arg1
);
2846 GEN_LDX(lhbr
, ld16ur
, 0x16, 0x18, PPC_INTEGER
);
2849 static inline void gen_qemu_ld32ur(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2851 tcg_gen_qemu_ld32u(arg1
, arg2
, ctx
->mem_idx
);
2852 if (likely(!ctx
->le_mode
)) {
2853 tcg_gen_bswap32_tl(arg1
, arg1
);
2856 GEN_LDX(lwbr
, ld32ur
, 0x16, 0x10, PPC_INTEGER
);
2859 static inline void gen_qemu_st16r(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2861 if (likely(!ctx
->le_mode
)) {
2862 TCGv t0
= tcg_temp_new();
2863 tcg_gen_ext16u_tl(t0
, arg1
);
2864 tcg_gen_bswap16_tl(t0
, t0
);
2865 tcg_gen_qemu_st16(t0
, arg2
, ctx
->mem_idx
);
2868 tcg_gen_qemu_st16(arg1
, arg2
, ctx
->mem_idx
);
2871 GEN_STX(sthbr
, st16r
, 0x16, 0x1C, PPC_INTEGER
);
2874 static inline void gen_qemu_st32r(DisasContext
*ctx
, TCGv arg1
, TCGv arg2
)
2876 if (likely(!ctx
->le_mode
)) {
2877 TCGv t0
= tcg_temp_new();
2878 tcg_gen_ext32u_tl(t0
, arg1
);
2879 tcg_gen_bswap32_tl(t0
, t0
);
2880 tcg_gen_qemu_st32(t0
, arg2
, ctx
->mem_idx
);
2883 tcg_gen_qemu_st32(arg1
, arg2
, ctx
->mem_idx
);
2886 GEN_STX(stwbr
, st32r
, 0x16, 0x14, PPC_INTEGER
);
2888 /*** Integer load and store multiple ***/
2891 static void gen_lmw(DisasContext
*ctx
)
2895 gen_set_access_type(ctx
, ACCESS_INT
);
2896 /* NIP cannot be restored if the memory exception comes from an helper */
2897 gen_update_nip(ctx
, ctx
->nip
- 4);
2898 t0
= tcg_temp_new();
2899 t1
= tcg_const_i32(rD(ctx
->opcode
));
2900 gen_addr_imm_index(ctx
, t0
, 0);
2901 gen_helper_lmw(t0
, t1
);
2903 tcg_temp_free_i32(t1
);
2907 static void gen_stmw(DisasContext
*ctx
)
2911 gen_set_access_type(ctx
, ACCESS_INT
);
2912 /* NIP cannot be restored if the memory exception comes from an helper */
2913 gen_update_nip(ctx
, ctx
->nip
- 4);
2914 t0
= tcg_temp_new();
2915 t1
= tcg_const_i32(rS(ctx
->opcode
));
2916 gen_addr_imm_index(ctx
, t0
, 0);
2917 gen_helper_stmw(t0
, t1
);
2919 tcg_temp_free_i32(t1
);
2922 /*** Integer load and store strings ***/
2925 /* PowerPC32 specification says we must generate an exception if
2926 * rA is in the range of registers to be loaded.
2927 * In an other hand, IBM says this is valid, but rA won't be loaded.
2928 * For now, I'll follow the spec...
2930 static void gen_lswi(DisasContext
*ctx
)
2934 int nb
= NB(ctx
->opcode
);
2935 int start
= rD(ctx
->opcode
);
2936 int ra
= rA(ctx
->opcode
);
2942 if (unlikely(((start
+ nr
) > 32 &&
2943 start
<= ra
&& (start
+ nr
- 32) > ra
) ||
2944 ((start
+ nr
) <= 32 && start
<= ra
&& (start
+ nr
) > ra
))) {
2945 gen_inval_exception(ctx
, POWERPC_EXCP_INVAL_LSWX
);
2948 gen_set_access_type(ctx
, ACCESS_INT
);
2949 /* NIP cannot be restored if the memory exception comes from an helper */
2950 gen_update_nip(ctx
, ctx
->nip
- 4);
2951 t0
= tcg_temp_new();
2952 gen_addr_register(ctx
, t0
);
2953 t1
= tcg_const_i32(nb
);
2954 t2
= tcg_const_i32(start
);
2955 gen_helper_lsw(t0
, t1
, t2
);
2957 tcg_temp_free_i32(t1
);
2958 tcg_temp_free_i32(t2
);
2962 static void gen_lswx(DisasContext
*ctx
)
2965 TCGv_i32 t1
, t2
, t3
;
2966 gen_set_access_type(ctx
, ACCESS_INT
);
2967 /* NIP cannot be restored if the memory exception comes from an helper */
2968 gen_update_nip(ctx
, ctx
->nip
- 4);
2969 t0
= tcg_temp_new();
2970 gen_addr_reg_index(ctx
, t0
);
2971 t1
= tcg_const_i32(rD(ctx
->opcode
));
2972 t2
= tcg_const_i32(rA(ctx
->opcode
));
2973 t3
= tcg_const_i32(rB(ctx
->opcode
));
2974 gen_helper_lswx(t0
, t1
, t2
, t3
);
2976 tcg_temp_free_i32(t1
);
2977 tcg_temp_free_i32(t2
);
2978 tcg_temp_free_i32(t3
);
2982 static void gen_stswi(DisasContext
*ctx
)
2986 int nb
= NB(ctx
->opcode
);
2987 gen_set_access_type(ctx
, ACCESS_INT
);
2988 /* NIP cannot be restored if the memory exception comes from an helper */
2989 gen_update_nip(ctx
, ctx
->nip
- 4);
2990 t0
= tcg_temp_new();
2991 gen_addr_register(ctx
, t0
);
2994 t1
= tcg_const_i32(nb
);
2995 t2
= tcg_const_i32(rS(ctx
->opcode
));
2996 gen_helper_stsw(t0
, t1
, t2
);
2998 tcg_temp_free_i32(t1
);
2999 tcg_temp_free_i32(t2
);
3003 static void gen_stswx(DisasContext
*ctx
)
3007 gen_set_access_type(ctx
, ACCESS_INT
);
3008 /* NIP cannot be restored if the memory exception comes from an helper */
3009 gen_update_nip(ctx
, ctx
->nip
- 4);
3010 t0
= tcg_temp_new();
3011 gen_addr_reg_index(ctx
, t0
);
3012 t1
= tcg_temp_new_i32();
3013 tcg_gen_trunc_tl_i32(t1
, cpu_xer
);
3014 tcg_gen_andi_i32(t1
, t1
, 0x7F);
3015 t2
= tcg_const_i32(rS(ctx
->opcode
));
3016 gen_helper_stsw(t0
, t1
, t2
);
3018 tcg_temp_free_i32(t1
);
3019 tcg_temp_free_i32(t2
);
3022 /*** Memory synchronisation ***/
3024 static void gen_eieio(DisasContext
*ctx
)
3029 static void gen_isync(DisasContext
*ctx
)
3031 gen_stop_exception(ctx
);
3035 static void gen_lwarx(DisasContext
*ctx
)
3038 TCGv gpr
= cpu_gpr
[rD(ctx
->opcode
)];
3039 gen_set_access_type(ctx
, ACCESS_RES
);
3040 t0
= tcg_temp_local_new();
3041 gen_addr_reg_index(ctx
, t0
);
3042 gen_check_align(ctx
, t0
, 0x03);
3043 gen_qemu_ld32u(ctx
, gpr
, t0
);
3044 tcg_gen_mov_tl(cpu_reserve
, t0
);
3045 tcg_gen_st_tl(gpr
, cpu_env
, offsetof(CPUState
, reserve_val
));
3049 #if defined(CONFIG_USER_ONLY)
3050 static void gen_conditional_store (DisasContext
*ctx
, TCGv EA
,
3053 TCGv t0
= tcg_temp_new();
3054 uint32_t save_exception
= ctx
->exception
;
3056 tcg_gen_st_tl(EA
, cpu_env
, offsetof(CPUState
, reserve_ea
));
3057 tcg_gen_movi_tl(t0
, (size
<< 5) | reg
);
3058 tcg_gen_st_tl(t0
, cpu_env
, offsetof(CPUState
, reserve_info
));
3060 gen_update_nip(ctx
, ctx
->nip
-4);
3061 ctx
->exception
= POWERPC_EXCP_BRANCH
;
3062 gen_exception(ctx
, POWERPC_EXCP_STCX
);
3063 ctx
->exception
= save_exception
;
3068 static void gen_stwcx_(DisasContext
*ctx
)
3071 gen_set_access_type(ctx
, ACCESS_RES
);
3072 t0
= tcg_temp_local_new();
3073 gen_addr_reg_index(ctx
, t0
);
3074 gen_check_align(ctx
, t0
, 0x03);
3075 #if defined(CONFIG_USER_ONLY)
3076 gen_conditional_store(ctx
, t0
, rS(ctx
->opcode
), 4);
3081 tcg_gen_trunc_tl_i32(cpu_crf
[0], cpu_xer
);
3082 tcg_gen_shri_i32(cpu_crf
[0], cpu_crf
[0], XER_SO
);
3083 tcg_gen_andi_i32(cpu_crf
[0], cpu_crf
[0], 1);
3084 l1
= gen_new_label();
3085 tcg_gen_brcond_tl(TCG_COND_NE
, t0
, cpu_reserve
, l1
);
3086 tcg_gen_ori_i32(cpu_crf
[0], cpu_crf
[0], 1 << CRF_EQ
);
3087 gen_qemu_st32(ctx
, cpu_gpr
[rS(ctx
->opcode
)], t0
);
3089 tcg_gen_movi_tl(cpu_reserve
, -1);
3095 #if defined(TARGET_PPC64)
3097 static void gen_ldarx(DisasContext
*ctx
)
3100 TCGv gpr
= cpu_gpr
[rD(ctx
->opcode
)];
3101 gen_set_access_type(ctx
, ACCESS_RES
);
3102 t0
= tcg_temp_local_new();
3103 gen_addr_reg_index(ctx
, t0
);
3104 gen_check_align(ctx
, t0
, 0x07);
3105 gen_qemu_ld64(ctx
, gpr
, t0
);
3106 tcg_gen_mov_tl(cpu_reserve
, t0
);
3107 tcg_gen_st_tl(gpr
, cpu_env
, offsetof(CPUState
, reserve_val
));
3112 static void gen_stdcx_(DisasContext
*ctx
)
3115 gen_set_access_type(ctx
, ACCESS_RES
);
3116 t0
= tcg_temp_local_new();
3117 gen_addr_reg_index(ctx
, t0
);
3118 gen_check_align(ctx
, t0
, 0x07);
3119 #if defined(CONFIG_USER_ONLY)
3120 gen_conditional_store(ctx
, t0
, rS(ctx
->opcode
), 8);