ppc: compute mask from BI using right shift
[qemu.git] / target-ppc / translate.c
1 /*
2 * PowerPC emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "tcg-op.h"
24 #include "qemu/host-utils.h"
25 #include "exec/cpu_ldst.h"
26
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29
30 #include "trace-tcg.h"
31
32
33 #define CPU_SINGLE_STEP 0x1
34 #define CPU_BRANCH_STEP 0x2
35 #define GDBSTUB_SINGLE_STEP 0x4
36
37 /* Include definitions for instructions classes and implementations flags */
38 //#define PPC_DEBUG_DISAS
39 //#define DO_PPC_STATISTICS
40
41 #ifdef PPC_DEBUG_DISAS
42 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
43 #else
44 # define LOG_DISAS(...) do { } while (0)
45 #endif
46 /*****************************************************************************/
47 /* Code translation helpers */
48
49 /* global register indexes */
50 static TCGv_ptr cpu_env;
51 static char cpu_reg_names[10*3 + 22*4 /* GPR */
52 + 10*4 + 22*5 /* SPE GPRh */
53 + 10*4 + 22*5 /* FPR */
54 + 2*(10*6 + 22*7) /* AVRh, AVRl */
55 + 10*5 + 22*6 /* VSR */
56 + 8*5 /* CRF */];
57 static TCGv cpu_gpr[32];
58 static TCGv cpu_gprh[32];
59 static TCGv_i64 cpu_fpr[32];
60 static TCGv_i64 cpu_avrh[32], cpu_avrl[32];
61 static TCGv_i64 cpu_vsr[32];
62 static TCGv_i32 cpu_crf[8];
63 static TCGv cpu_nip;
64 static TCGv cpu_msr;
65 static TCGv cpu_ctr;
66 static TCGv cpu_lr;
67 #if defined(TARGET_PPC64)
68 static TCGv cpu_cfar;
69 #endif
70 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca;
71 static TCGv cpu_reserve;
72 static TCGv cpu_fpscr;
73 static TCGv_i32 cpu_access_type;
74
75 #include "exec/gen-icount.h"
76
77 void ppc_translate_init(void)
78 {
79 int i;
80 char* p;
81 size_t cpu_reg_names_size;
82 static int done_init = 0;
83
84 if (done_init)
85 return;
86
87 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
88
89 p = cpu_reg_names;
90 cpu_reg_names_size = sizeof(cpu_reg_names);
91
92 for (i = 0; i < 8; i++) {
93 snprintf(p, cpu_reg_names_size, "crf%d", i);
94 cpu_crf[i] = tcg_global_mem_new_i32(TCG_AREG0,
95 offsetof(CPUPPCState, crf[i]), p);
96 p += 5;
97 cpu_reg_names_size -= 5;
98 }
99
100 for (i = 0; i < 32; i++) {
101 snprintf(p, cpu_reg_names_size, "r%d", i);
102 cpu_gpr[i] = tcg_global_mem_new(TCG_AREG0,
103 offsetof(CPUPPCState, gpr[i]), p);
104 p += (i < 10) ? 3 : 4;
105 cpu_reg_names_size -= (i < 10) ? 3 : 4;
106 snprintf(p, cpu_reg_names_size, "r%dH", i);
107 cpu_gprh[i] = tcg_global_mem_new(TCG_AREG0,
108 offsetof(CPUPPCState, gprh[i]), p);
109 p += (i < 10) ? 4 : 5;
110 cpu_reg_names_size -= (i < 10) ? 4 : 5;
111
112 snprintf(p, cpu_reg_names_size, "fp%d", i);
113 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
114 offsetof(CPUPPCState, fpr[i]), p);
115 p += (i < 10) ? 4 : 5;
116 cpu_reg_names_size -= (i < 10) ? 4 : 5;
117
118 snprintf(p, cpu_reg_names_size, "avr%dH", i);
119 #ifdef HOST_WORDS_BIGENDIAN
120 cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
121 offsetof(CPUPPCState, avr[i].u64[0]), p);
122 #else
123 cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
124 offsetof(CPUPPCState, avr[i].u64[1]), p);
125 #endif
126 p += (i < 10) ? 6 : 7;
127 cpu_reg_names_size -= (i < 10) ? 6 : 7;
128
129 snprintf(p, cpu_reg_names_size, "avr%dL", i);
130 #ifdef HOST_WORDS_BIGENDIAN
131 cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
132 offsetof(CPUPPCState, avr[i].u64[1]), p);
133 #else
134 cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
135 offsetof(CPUPPCState, avr[i].u64[0]), p);
136 #endif
137 p += (i < 10) ? 6 : 7;
138 cpu_reg_names_size -= (i < 10) ? 6 : 7;
139 snprintf(p, cpu_reg_names_size, "vsr%d", i);
140 cpu_vsr[i] = tcg_global_mem_new_i64(TCG_AREG0,
141 offsetof(CPUPPCState, vsr[i]), p);
142 p += (i < 10) ? 5 : 6;
143 cpu_reg_names_size -= (i < 10) ? 5 : 6;
144 }
145
146 cpu_nip = tcg_global_mem_new(TCG_AREG0,
147 offsetof(CPUPPCState, nip), "nip");
148
149 cpu_msr = tcg_global_mem_new(TCG_AREG0,
150 offsetof(CPUPPCState, msr), "msr");
151
152 cpu_ctr = tcg_global_mem_new(TCG_AREG0,
153 offsetof(CPUPPCState, ctr), "ctr");
154
155 cpu_lr = tcg_global_mem_new(TCG_AREG0,
156 offsetof(CPUPPCState, lr), "lr");
157
158 #if defined(TARGET_PPC64)
159 cpu_cfar = tcg_global_mem_new(TCG_AREG0,
160 offsetof(CPUPPCState, cfar), "cfar");
161 #endif
162
163 cpu_xer = tcg_global_mem_new(TCG_AREG0,
164 offsetof(CPUPPCState, xer), "xer");
165 cpu_so = tcg_global_mem_new(TCG_AREG0,
166 offsetof(CPUPPCState, so), "SO");
167 cpu_ov = tcg_global_mem_new(TCG_AREG0,
168 offsetof(CPUPPCState, ov), "OV");
169 cpu_ca = tcg_global_mem_new(TCG_AREG0,
170 offsetof(CPUPPCState, ca), "CA");
171
172 cpu_reserve = tcg_global_mem_new(TCG_AREG0,
173 offsetof(CPUPPCState, reserve_addr),
174 "reserve_addr");
175
176 cpu_fpscr = tcg_global_mem_new(TCG_AREG0,
177 offsetof(CPUPPCState, fpscr), "fpscr");
178
179 cpu_access_type = tcg_global_mem_new_i32(TCG_AREG0,
180 offsetof(CPUPPCState, access_type), "access_type");
181
182 done_init = 1;
183 }
184
185 /* internal defines */
186 typedef struct DisasContext {
187 struct TranslationBlock *tb;
188 target_ulong nip;
189 uint32_t opcode;
190 uint32_t exception;
191 /* Routine used to access memory */
192 int mem_idx;
193 int access_type;
194 /* Translation flags */
195 int le_mode;
196 TCGMemOp default_tcg_memop_mask;
197 #if defined(TARGET_PPC64)
198 int sf_mode;
199 int has_cfar;
200 #endif
201 int fpu_enabled;
202 int altivec_enabled;
203 int vsx_enabled;
204 int spe_enabled;
205 ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
206 int singlestep_enabled;
207 uint64_t insns_flags;
208 uint64_t insns_flags2;
209 } DisasContext;
210
211 /* Return true iff byteswap is needed in a scalar memop */
212 static inline bool need_byteswap(const DisasContext *ctx)
213 {
214 #if defined(TARGET_WORDS_BIGENDIAN)
215 return ctx->le_mode;
216 #else
217 return !ctx->le_mode;
218 #endif
219 }
220
221 /* True when active word size < size of target_long. */
222 #ifdef TARGET_PPC64
223 # define NARROW_MODE(C) (!(C)->sf_mode)
224 #else
225 # define NARROW_MODE(C) 0
226 #endif
227
228 struct opc_handler_t {
229 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
230 uint32_t inval1;
231 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
232 uint32_t inval2;
233 /* instruction type */
234 uint64_t type;
235 /* extended instruction type */
236 uint64_t type2;
237 /* handler */
238 void (*handler)(DisasContext *ctx);
239 #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
240 const char *oname;
241 #endif
242 #if defined(DO_PPC_STATISTICS)
243 uint64_t count;
244 #endif
245 };
246
247 static inline void gen_reset_fpstatus(void)
248 {
249 gen_helper_reset_fpstatus(cpu_env);
250 }
251
252 static inline void gen_compute_fprf(TCGv_i64 arg, int set_fprf, int set_rc)
253 {
254 TCGv_i32 t0 = tcg_temp_new_i32();
255
256 if (set_fprf != 0) {
257 /* This case might be optimized later */
258 tcg_gen_movi_i32(t0, 1);
259 gen_helper_compute_fprf(t0, cpu_env, arg, t0);
260 if (unlikely(set_rc)) {
261 tcg_gen_mov_i32(cpu_crf[1], t0);
262 }
263 gen_helper_float_check_status(cpu_env);
264 } else if (unlikely(set_rc)) {
265 /* We always need to compute fpcc */
266 tcg_gen_movi_i32(t0, 0);
267 gen_helper_compute_fprf(t0, cpu_env, arg, t0);
268 tcg_gen_mov_i32(cpu_crf[1], t0);
269 }
270
271 tcg_temp_free_i32(t0);
272 }
273
274 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
275 {
276 if (ctx->access_type != access_type) {
277 tcg_gen_movi_i32(cpu_access_type, access_type);
278 ctx->access_type = access_type;
279 }
280 }
281
282 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
283 {
284 if (NARROW_MODE(ctx)) {
285 nip = (uint32_t)nip;
286 }
287 tcg_gen_movi_tl(cpu_nip, nip);
288 }
289
290 void gen_update_current_nip(void *opaque)
291 {
292 DisasContext *ctx = opaque;
293
294 tcg_gen_movi_tl(cpu_nip, ctx->nip);
295 }
296
297 static inline void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
298 {
299 TCGv_i32 t0, t1;
300 if (ctx->exception == POWERPC_EXCP_NONE) {
301 gen_update_nip(ctx, ctx->nip);
302 }
303 t0 = tcg_const_i32(excp);
304 t1 = tcg_const_i32(error);
305 gen_helper_raise_exception_err(cpu_env, t0, t1);
306 tcg_temp_free_i32(t0);
307 tcg_temp_free_i32(t1);
308 ctx->exception = (excp);
309 }
310
311 static inline void gen_exception(DisasContext *ctx, uint32_t excp)
312 {
313 TCGv_i32 t0;
314 if (ctx->exception == POWERPC_EXCP_NONE) {
315 gen_update_nip(ctx, ctx->nip);
316 }
317 t0 = tcg_const_i32(excp);
318 gen_helper_raise_exception(cpu_env, t0);
319 tcg_temp_free_i32(t0);
320 ctx->exception = (excp);
321 }
322
323 static inline void gen_debug_exception(DisasContext *ctx)
324 {
325 TCGv_i32 t0;
326
327 if ((ctx->exception != POWERPC_EXCP_BRANCH) &&
328 (ctx->exception != POWERPC_EXCP_SYNC)) {
329 gen_update_nip(ctx, ctx->nip);
330 }
331 t0 = tcg_const_i32(EXCP_DEBUG);
332 gen_helper_raise_exception(cpu_env, t0);
333 tcg_temp_free_i32(t0);
334 }
335
336 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
337 {
338 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | error);
339 }
340
341 /* Stop translation */
342 static inline void gen_stop_exception(DisasContext *ctx)
343 {
344 gen_update_nip(ctx, ctx->nip);
345 ctx->exception = POWERPC_EXCP_STOP;
346 }
347
348 /* No need to update nip here, as execution flow will change */
349 static inline void gen_sync_exception(DisasContext *ctx)
350 {
351 ctx->exception = POWERPC_EXCP_SYNC;
352 }
353
354 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
355 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
356
357 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
358 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
359
360 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
361 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
362
363 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
364 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
365
366 typedef struct opcode_t {
367 unsigned char opc1, opc2, opc3;
368 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
369 unsigned char pad[5];
370 #else
371 unsigned char pad[1];
372 #endif
373 opc_handler_t handler;
374 const char *oname;
375 } opcode_t;
376
377 /*****************************************************************************/
378 /*** Instruction decoding ***/
379 #define EXTRACT_HELPER(name, shift, nb) \
380 static inline uint32_t name(uint32_t opcode) \
381 { \
382 return (opcode >> (shift)) & ((1 << (nb)) - 1); \
383 }
384
385 #define EXTRACT_SHELPER(name, shift, nb) \
386 static inline int32_t name(uint32_t opcode) \
387 { \
388 return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
389 }
390
391 #define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \
392 static inline uint32_t name(uint32_t opcode) \
393 { \
394 return (((opcode >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
395 ((opcode >> (shift2)) & ((1 << (nb2)) - 1)); \
396 }
397 /* Opcode part 1 */
398 EXTRACT_HELPER(opc1, 26, 6);
399 /* Opcode part 2 */
400 EXTRACT_HELPER(opc2, 1, 5);
401 /* Opcode part 3 */
402 EXTRACT_HELPER(opc3, 6, 5);
403 /* Update Cr0 flags */
404 EXTRACT_HELPER(Rc, 0, 1);
405 /* Update Cr6 flags (Altivec) */
406 EXTRACT_HELPER(Rc21, 10, 1);
407 /* Destination */
408 EXTRACT_HELPER(rD, 21, 5);
409 /* Source */
410 EXTRACT_HELPER(rS, 21, 5);
411 /* First operand */
412 EXTRACT_HELPER(rA, 16, 5);
413 /* Second operand */
414 EXTRACT_HELPER(rB, 11, 5);
415 /* Third operand */
416 EXTRACT_HELPER(rC, 6, 5);
417 /*** Get CRn ***/
418 EXTRACT_HELPER(crfD, 23, 3);
419 EXTRACT_HELPER(crfS, 18, 3);
420 EXTRACT_HELPER(crbD, 21, 5);
421 EXTRACT_HELPER(crbA, 16, 5);
422 EXTRACT_HELPER(crbB, 11, 5);
423 /* SPR / TBL */
424 EXTRACT_HELPER(_SPR, 11, 10);
425 static inline uint32_t SPR(uint32_t opcode)
426 {
427 uint32_t sprn = _SPR(opcode);
428
429 return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
430 }
431 /*** Get constants ***/
432 /* 16 bits signed immediate value */
433 EXTRACT_SHELPER(SIMM, 0, 16);
434 /* 16 bits unsigned immediate value */
435 EXTRACT_HELPER(UIMM, 0, 16);
436 /* 5 bits signed immediate value */
437 EXTRACT_HELPER(SIMM5, 16, 5);
438 /* 5 bits signed immediate value */
439 EXTRACT_HELPER(UIMM5, 16, 5);
440 /* Bit count */
441 EXTRACT_HELPER(NB, 11, 5);
442 /* Shift count */
443 EXTRACT_HELPER(SH, 11, 5);
444 /* Vector shift count */
445 EXTRACT_HELPER(VSH, 6, 4);
446 /* Mask start */
447 EXTRACT_HELPER(MB, 6, 5);
448 /* Mask end */
449 EXTRACT_HELPER(ME, 1, 5);
450 /* Trap operand */
451 EXTRACT_HELPER(TO, 21, 5);
452
453 EXTRACT_HELPER(CRM, 12, 8);
454 EXTRACT_HELPER(SR, 16, 4);
455
456 /* mtfsf/mtfsfi */
457 EXTRACT_HELPER(FPBF, 23, 3);
458 EXTRACT_HELPER(FPIMM, 12, 4);
459 EXTRACT_HELPER(FPL, 25, 1);
460 EXTRACT_HELPER(FPFLM, 17, 8);
461 EXTRACT_HELPER(FPW, 16, 1);
462
463 /*** Jump target decoding ***/
464 /* Immediate address */
465 static inline target_ulong LI(uint32_t opcode)
466 {
467 return (opcode >> 0) & 0x03FFFFFC;
468 }
469
470 static inline uint32_t BD(uint32_t opcode)
471 {
472 return (opcode >> 0) & 0xFFFC;
473 }
474
475 EXTRACT_HELPER(BO, 21, 5);
476 EXTRACT_HELPER(BI, 16, 5);
477 /* Absolute/relative address */
478 EXTRACT_HELPER(AA, 1, 1);
479 /* Link */
480 EXTRACT_HELPER(LK, 0, 1);
481
482 /* DFP Z22-form */
483 EXTRACT_HELPER(DCM, 10, 6)
484
485 /* DFP Z23-form */
486 EXTRACT_HELPER(RMC, 9, 2)
487
488 /* Create a mask between <start> and <end> bits */
489 static inline target_ulong MASK(uint32_t start, uint32_t end)
490 {
491 target_ulong ret;
492
493 #if defined(TARGET_PPC64)
494 if (likely(start == 0)) {
495 ret = UINT64_MAX << (63 - end);
496 } else if (likely(end == 63)) {
497 ret = UINT64_MAX >> start;
498 }
499 #else
500 if (likely(start == 0)) {
501 ret = UINT32_MAX << (31 - end);
502 } else if (likely(end == 31)) {
503 ret = UINT32_MAX >> start;
504 }
505 #endif
506 else {
507 ret = (((target_ulong)(-1ULL)) >> (start)) ^
508 (((target_ulong)(-1ULL) >> (end)) >> 1);
509 if (unlikely(start > end))
510 return ~ret;
511 }
512
513 return ret;
514 }
515
516 EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5);
517 EXTRACT_HELPER_SPLIT(xS, 0, 1, 21, 5);
518 EXTRACT_HELPER_SPLIT(xA, 2, 1, 16, 5);
519 EXTRACT_HELPER_SPLIT(xB, 1, 1, 11, 5);
520 EXTRACT_HELPER_SPLIT(xC, 3, 1, 6, 5);
521 EXTRACT_HELPER(DM, 8, 2);
522 EXTRACT_HELPER(UIM, 16, 2);
523 EXTRACT_HELPER(SHW, 8, 2);
524 EXTRACT_HELPER(SP, 19, 2);
525 /*****************************************************************************/
526 /* PowerPC instructions table */
527
528 #if defined(DO_PPC_STATISTICS)
529 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
530 { \
531 .opc1 = op1, \
532 .opc2 = op2, \
533 .opc3 = op3, \
534 .pad = { 0, }, \
535 .handler = { \
536 .inval1 = invl, \
537 .type = _typ, \
538 .type2 = _typ2, \
539 .handler = &gen_##name, \
540 .oname = stringify(name), \
541 }, \
542 .oname = stringify(name), \
543 }
544 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
545 { \
546 .opc1 = op1, \
547 .opc2 = op2, \
548 .opc3 = op3, \
549 .pad = { 0, }, \
550 .handler = { \
551 .inval1 = invl1, \
552 .inval2 = invl2, \
553 .type = _typ, \
554 .type2 = _typ2, \
555 .handler = &gen_##name, \
556 .oname = stringify(name), \
557 }, \
558 .oname = stringify(name), \
559 }
560 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
561 { \
562 .opc1 = op1, \
563 .opc2 = op2, \
564 .opc3 = op3, \
565 .pad = { 0, }, \
566 .handler = { \
567 .inval1 = invl, \
568 .type = _typ, \
569 .type2 = _typ2, \
570 .handler = &gen_##name, \
571 .oname = onam, \
572 }, \
573 .oname = onam, \
574 }
575 #else
576 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
577 { \
578 .opc1 = op1, \
579 .opc2 = op2, \
580 .opc3 = op3, \
581 .pad = { 0, }, \
582 .handler = { \
583 .inval1 = invl, \
584 .type = _typ, \
585 .type2 = _typ2, \
586 .handler = &gen_##name, \
587 }, \
588 .oname = stringify(name), \
589 }
590 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
591 { \
592 .opc1 = op1, \
593 .opc2 = op2, \
594 .opc3 = op3, \
595 .pad = { 0, }, \
596 .handler = { \
597 .inval1 = invl1, \
598 .inval2 = invl2, \
599 .type = _typ, \
600 .type2 = _typ2, \
601 .handler = &gen_##name, \
602 }, \
603 .oname = stringify(name), \
604 }
605 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
606 { \
607 .opc1 = op1, \
608 .opc2 = op2, \
609 .opc3 = op3, \
610 .pad = { 0, }, \
611 .handler = { \
612 .inval1 = invl, \
613 .type = _typ, \
614 .type2 = _typ2, \
615 .handler = &gen_##name, \
616 }, \
617 .oname = onam, \
618 }
619 #endif
620
621 /* SPR load/store helpers */
622 static inline void gen_load_spr(TCGv t, int reg)
623 {
624 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
625 }
626
627 static inline void gen_store_spr(int reg, TCGv t)
628 {
629 tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
630 }
631
632 /* Invalid instruction */
633 static void gen_invalid(DisasContext *ctx)
634 {
635 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
636 }
637
638 static opc_handler_t invalid_handler = {
639 .inval1 = 0xFFFFFFFF,
640 .inval2 = 0xFFFFFFFF,
641 .type = PPC_NONE,
642 .type2 = PPC_NONE,
643 .handler = gen_invalid,
644 };
645
646 #if defined(TARGET_PPC64)
647 /* NOTE: as this time, the only use of is_user_mode() is in 64 bit code. And */
648 /* so the function is wrapped in the standard 64-bit ifdef in order to */
649 /* avoid compiler warnings in 32-bit implementations. */
650 static bool is_user_mode(DisasContext *ctx)
651 {
652 #if defined(CONFIG_USER_ONLY)
653 return true;
654 #else
655 return ctx->mem_idx == 0;
656 #endif
657 }
658 #endif
659
660 /*** Integer comparison ***/
661
662 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
663 {
664 TCGv t0 = tcg_temp_new();
665 TCGv_i32 t1 = tcg_temp_new_i32();
666
667 tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
668
669 tcg_gen_setcond_tl((s ? TCG_COND_LT: TCG_COND_LTU), t0, arg0, arg1);
670 tcg_gen_trunc_tl_i32(t1, t0);
671 tcg_gen_shli_i32(t1, t1, CRF_LT);
672 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
673
674 tcg_gen_setcond_tl((s ? TCG_COND_GT: TCG_COND_GTU), t0, arg0, arg1);
675 tcg_gen_trunc_tl_i32(t1, t0);
676 tcg_gen_shli_i32(t1, t1, CRF_GT);
677 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
678
679 tcg_gen_setcond_tl(TCG_COND_EQ, t0, arg0, arg1);
680 tcg_gen_trunc_tl_i32(t1, t0);
681 tcg_gen_shli_i32(t1, t1, CRF_EQ);
682 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
683
684 tcg_temp_free(t0);
685 tcg_temp_free_i32(t1);
686 }
687
688 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
689 {
690 TCGv t0 = tcg_const_tl(arg1);
691 gen_op_cmp(arg0, t0, s, crf);
692 tcg_temp_free(t0);
693 }
694
695 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
696 {
697 TCGv t0, t1;
698 t0 = tcg_temp_new();
699 t1 = tcg_temp_new();
700 if (s) {
701 tcg_gen_ext32s_tl(t0, arg0);
702 tcg_gen_ext32s_tl(t1, arg1);
703 } else {
704 tcg_gen_ext32u_tl(t0, arg0);
705 tcg_gen_ext32u_tl(t1, arg1);
706 }
707 gen_op_cmp(t0, t1, s, crf);
708 tcg_temp_free(t1);
709 tcg_temp_free(t0);
710 }
711
712 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
713 {
714 TCGv t0 = tcg_const_tl(arg1);
715 gen_op_cmp32(arg0, t0, s, crf);
716 tcg_temp_free(t0);
717 }
718
719 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
720 {
721 if (NARROW_MODE(ctx)) {
722 gen_op_cmpi32(reg, 0, 1, 0);
723 } else {
724 gen_op_cmpi(reg, 0, 1, 0);
725 }
726 }
727
728 /* cmp */
729 static void gen_cmp(DisasContext *ctx)
730 {
731 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
732 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
733 1, crfD(ctx->opcode));
734 } else {
735 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
736 1, crfD(ctx->opcode));
737 }
738 }
739
740 /* cmpi */
741 static void gen_cmpi(DisasContext *ctx)
742 {
743 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
744 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
745 1, crfD(ctx->opcode));
746 } else {
747 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
748 1, crfD(ctx->opcode));
749 }
750 }
751
752 /* cmpl */
753 static void gen_cmpl(DisasContext *ctx)
754 {
755 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
756 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
757 0, crfD(ctx->opcode));
758 } else {
759 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
760 0, crfD(ctx->opcode));
761 }
762 }
763
764 /* cmpli */
765 static void gen_cmpli(DisasContext *ctx)
766 {
767 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
768 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
769 0, crfD(ctx->opcode));
770 } else {
771 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
772 0, crfD(ctx->opcode));
773 }
774 }
775
776 /* isel (PowerPC 2.03 specification) */
777 static void gen_isel(DisasContext *ctx)
778 {
779 int l1, l2;
780 uint32_t bi = rC(ctx->opcode);
781 uint32_t mask;
782 TCGv_i32 t0;
783
784 l1 = gen_new_label();
785 l2 = gen_new_label();
786
787 mask = 0x08 >> (bi & 0x03);
788 t0 = tcg_temp_new_i32();
789 tcg_gen_andi_i32(t0, cpu_crf[bi >> 2], mask);
790 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
791 if (rA(ctx->opcode) == 0)
792 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
793 else
794 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
795 tcg_gen_br(l2);
796 gen_set_label(l1);
797 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
798 gen_set_label(l2);
799 tcg_temp_free_i32(t0);
800 }
801
802 /* cmpb: PowerPC 2.05 specification */
803 static void gen_cmpb(DisasContext *ctx)
804 {
805 gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
806 cpu_gpr[rB(ctx->opcode)]);
807 }
808
809 /*** Integer arithmetic ***/
810
811 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
812 TCGv arg1, TCGv arg2, int sub)
813 {
814 TCGv t0 = tcg_temp_new();
815
816 tcg_gen_xor_tl(cpu_ov, arg0, arg2);
817 tcg_gen_xor_tl(t0, arg1, arg2);
818 if (sub) {
819 tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
820 } else {
821 tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
822 }
823 tcg_temp_free(t0);
824 if (NARROW_MODE(ctx)) {
825 tcg_gen_ext32s_tl(cpu_ov, cpu_ov);
826 }
827 tcg_gen_shri_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1);
828 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
829 }
830
831 /* Common add function */
832 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
833 TCGv arg2, bool add_ca, bool compute_ca,
834 bool compute_ov, bool compute_rc0)
835 {
836 TCGv t0 = ret;
837
838 if (compute_ca || compute_ov) {
839 t0 = tcg_temp_new();
840 }
841
842 if (compute_ca) {
843 if (NARROW_MODE(ctx)) {
844 /* Caution: a non-obvious corner case of the spec is that we
845 must produce the *entire* 64-bit addition, but produce the
846 carry into bit 32. */
847 TCGv t1 = tcg_temp_new();
848 tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */
849 tcg_gen_add_tl(t0, arg1, arg2);
850 if (add_ca) {
851 tcg_gen_add_tl(t0, t0, cpu_ca);
852 }
853 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changed w/ carry */
854 tcg_temp_free(t1);
855 tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
856 tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
857 } else {
858 TCGv zero = tcg_const_tl(0);
859 if (add_ca) {
860 tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, cpu_ca, zero);
861 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, arg2, zero);
862 } else {
863 tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, arg2, zero);
864 }
865 tcg_temp_free(zero);
866 }
867 } else {
868 tcg_gen_add_tl(t0, arg1, arg2);
869 if (add_ca) {
870 tcg_gen_add_tl(t0, t0, cpu_ca);
871 }
872 }
873
874 if (compute_ov) {
875 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
876 }
877 if (unlikely(compute_rc0)) {
878 gen_set_Rc0(ctx, t0);
879 }
880
881 if (!TCGV_EQUAL(t0, ret)) {
882 tcg_gen_mov_tl(ret, t0);
883 tcg_temp_free(t0);
884 }
885 }
886 /* Add functions with two operands */
887 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
888 static void glue(gen_, name)(DisasContext *ctx) \
889 { \
890 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
891 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
892 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
893 }
894 /* Add functions with one operand and one immediate */
895 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
896 add_ca, compute_ca, compute_ov) \
897 static void glue(gen_, name)(DisasContext *ctx) \
898 { \
899 TCGv t0 = tcg_const_tl(const_val); \
900 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
901 cpu_gpr[rA(ctx->opcode)], t0, \
902 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
903 tcg_temp_free(t0); \
904 }
905
906 /* add add. addo addo. */
907 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
908 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
909 /* addc addc. addco addco. */
910 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
911 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
912 /* adde adde. addeo addeo. */
913 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
914 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
915 /* addme addme. addmeo addmeo. */
916 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
917 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
918 /* addze addze. addzeo addzeo.*/
919 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
920 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
921 /* addi */
922 static void gen_addi(DisasContext *ctx)
923 {
924 target_long simm = SIMM(ctx->opcode);
925
926 if (rA(ctx->opcode) == 0) {
927 /* li case */
928 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm);
929 } else {
930 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
931 cpu_gpr[rA(ctx->opcode)], simm);
932 }
933 }
934 /* addic addic.*/
935 static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
936 {
937 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
938 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
939 c, 0, 1, 0, compute_rc0);
940 tcg_temp_free(c);
941 }
942
943 static void gen_addic(DisasContext *ctx)
944 {
945 gen_op_addic(ctx, 0);
946 }
947
948 static void gen_addic_(DisasContext *ctx)
949 {
950 gen_op_addic(ctx, 1);
951 }
952
953 /* addis */
954 static void gen_addis(DisasContext *ctx)
955 {
956 target_long simm = SIMM(ctx->opcode);
957
958 if (rA(ctx->opcode) == 0) {
959 /* lis case */
960 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm << 16);
961 } else {
962 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
963 cpu_gpr[rA(ctx->opcode)], simm << 16);
964 }
965 }
966
967 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
968 TCGv arg2, int sign, int compute_ov)
969 {
970 int l1 = gen_new_label();
971 int l2 = gen_new_label();
972 TCGv_i32 t0 = tcg_temp_local_new_i32();
973 TCGv_i32 t1 = tcg_temp_local_new_i32();
974
975 tcg_gen_trunc_tl_i32(t0, arg1);
976 tcg_gen_trunc_tl_i32(t1, arg2);
977 tcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1);
978 if (sign) {
979 int l3 = gen_new_label();
980 tcg_gen_brcondi_i32(TCG_COND_NE, t1, -1, l3);
981 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, INT32_MIN, l1);
982 gen_set_label(l3);
983 tcg_gen_div_i32(t0, t0, t1);
984 } else {
985 tcg_gen_divu_i32(t0, t0, t1);
986 }
987 if (compute_ov) {
988 tcg_gen_movi_tl(cpu_ov, 0);
989 }
990 tcg_gen_br(l2);
991 gen_set_label(l1);
992 if (sign) {
993 tcg_gen_sari_i32(t0, t0, 31);
994 } else {
995 tcg_gen_movi_i32(t0, 0);
996 }
997 if (compute_ov) {
998 tcg_gen_movi_tl(cpu_ov, 1);
999 tcg_gen_movi_tl(cpu_so, 1);
1000 }
1001 gen_set_label(l2);
1002 tcg_gen_extu_i32_tl(ret, t0);
1003 tcg_temp_free_i32(t0);
1004 tcg_temp_free_i32(t1);
1005 if (unlikely(Rc(ctx->opcode) != 0))
1006 gen_set_Rc0(ctx, ret);
1007 }
1008 /* Div functions */
1009 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
1010 static void glue(gen_, name)(DisasContext *ctx) \
1011 { \
1012 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
1013 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1014 sign, compute_ov); \
1015 }
1016 /* divwu divwu. divwuo divwuo. */
1017 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
1018 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
1019 /* divw divw. divwo divwo. */
1020 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
1021 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
1022
1023 /* div[wd]eu[o][.] */
1024 #define GEN_DIVE(name, hlpr, compute_ov) \
1025 static void gen_##name(DisasContext *ctx) \
1026 { \
1027 TCGv_i32 t0 = tcg_const_i32(compute_ov); \
1028 gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
1029 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1030 tcg_temp_free_i32(t0); \
1031 if (unlikely(Rc(ctx->opcode) != 0)) { \
1032 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
1033 } \
1034 }
1035
1036 GEN_DIVE(divweu, divweu, 0);
1037 GEN_DIVE(divweuo, divweu, 1);
1038 GEN_DIVE(divwe, divwe, 0);
1039 GEN_DIVE(divweo, divwe, 1);
1040
1041 #if defined(TARGET_PPC64)
1042 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
1043 TCGv arg2, int sign, int compute_ov)
1044 {
1045 int l1 = gen_new_label();
1046 int l2 = gen_new_label();
1047
1048 tcg_gen_brcondi_i64(TCG_COND_EQ, arg2, 0, l1);
1049 if (sign) {
1050 int l3 = gen_new_label();
1051 tcg_gen_brcondi_i64(TCG_COND_NE, arg2, -1, l3);
1052 tcg_gen_brcondi_i64(TCG_COND_EQ, arg1, INT64_MIN, l1);
1053 gen_set_label(l3);
1054 tcg_gen_div_i64(ret, arg1, arg2);
1055 } else {
1056 tcg_gen_divu_i64(ret, arg1, arg2);
1057 }
1058 if (compute_ov) {
1059 tcg_gen_movi_tl(cpu_ov, 0);
1060 }
1061 tcg_gen_br(l2);
1062 gen_set_label(l1);
1063 if (sign) {
1064 tcg_gen_sari_i64(ret, arg1, 63);
1065 } else {
1066 tcg_gen_movi_i64(ret, 0);
1067 }
1068 if (compute_ov) {
1069 tcg_gen_movi_tl(cpu_ov, 1);
1070 tcg_gen_movi_tl(cpu_so, 1);
1071 }
1072 gen_set_label(l2);
1073 if (unlikely(Rc(ctx->opcode) != 0))
1074 gen_set_Rc0(ctx, ret);
1075 }
1076 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1077 static void glue(gen_, name)(DisasContext *ctx) \
1078 { \
1079 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1080 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1081 sign, compute_ov); \
1082 }
1083 /* divwu divwu. divwuo divwuo. */
1084 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1085 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1086 /* divw divw. divwo divwo. */
1087 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1088 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1089
1090 GEN_DIVE(divdeu, divdeu, 0);
1091 GEN_DIVE(divdeuo, divdeu, 1);
1092 GEN_DIVE(divde, divde, 0);
1093 GEN_DIVE(divdeo, divde, 1);
1094 #endif
1095
1096 /* mulhw mulhw. */
1097 static void gen_mulhw(DisasContext *ctx)
1098 {
1099 TCGv_i32 t0 = tcg_temp_new_i32();
1100 TCGv_i32 t1 = tcg_temp_new_i32();
1101
1102 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1103 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1104 tcg_gen_muls2_i32(t0, t1, t0, t1);
1105 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1106 tcg_temp_free_i32(t0);
1107 tcg_temp_free_i32(t1);
1108 if (unlikely(Rc(ctx->opcode) != 0))
1109 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1110 }
1111
1112 /* mulhwu mulhwu. */
1113 static void gen_mulhwu(DisasContext *ctx)
1114 {
1115 TCGv_i32 t0 = tcg_temp_new_i32();
1116 TCGv_i32 t1 = tcg_temp_new_i32();
1117
1118 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1119 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1120 tcg_gen_mulu2_i32(t0, t1, t0, t1);
1121 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1122 tcg_temp_free_i32(t0);
1123 tcg_temp_free_i32(t1);
1124 if (unlikely(Rc(ctx->opcode) != 0))
1125 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1126 }
1127
1128 /* mullw mullw. */
1129 static void gen_mullw(DisasContext *ctx)
1130 {
1131 #if defined(TARGET_PPC64)
1132 TCGv_i64 t0, t1;
1133 t0 = tcg_temp_new_i64();
1134 t1 = tcg_temp_new_i64();
1135 tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
1136 tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
1137 tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1138 tcg_temp_free(t0);
1139 tcg_temp_free(t1);
1140 #else
1141 tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1142 cpu_gpr[rB(ctx->opcode)]);
1143 #endif
1144 if (unlikely(Rc(ctx->opcode) != 0))
1145 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1146 }
1147
1148 /* mullwo mullwo. */
1149 static void gen_mullwo(DisasContext *ctx)
1150 {
1151 TCGv_i32 t0 = tcg_temp_new_i32();
1152 TCGv_i32 t1 = tcg_temp_new_i32();
1153
1154 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1155 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1156 tcg_gen_muls2_i32(t0, t1, t0, t1);
1157 #if defined(TARGET_PPC64)
1158 tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1159 #else
1160 tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
1161 #endif
1162
1163 tcg_gen_sari_i32(t0, t0, 31);
1164 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
1165 tcg_gen_extu_i32_tl(cpu_ov, t0);
1166 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1167
1168 tcg_temp_free_i32(t0);
1169 tcg_temp_free_i32(t1);
1170 if (unlikely(Rc(ctx->opcode) != 0))
1171 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1172 }
1173
1174 /* mulli */
1175 static void gen_mulli(DisasContext *ctx)
1176 {
1177 tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1178 SIMM(ctx->opcode));
1179 }
1180
1181 #if defined(TARGET_PPC64)
1182 /* mulhd mulhd. */
1183 static void gen_mulhd(DisasContext *ctx)
1184 {
1185 TCGv lo = tcg_temp_new();
1186 tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1187 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1188 tcg_temp_free(lo);
1189 if (unlikely(Rc(ctx->opcode) != 0)) {
1190 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1191 }
1192 }
1193
1194 /* mulhdu mulhdu. */
1195 static void gen_mulhdu(DisasContext *ctx)
1196 {
1197 TCGv lo = tcg_temp_new();
1198 tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1199 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1200 tcg_temp_free(lo);
1201 if (unlikely(Rc(ctx->opcode) != 0)) {
1202 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1203 }
1204 }
1205
1206 /* mulld mulld. */
1207 static void gen_mulld(DisasContext *ctx)
1208 {
1209 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1210 cpu_gpr[rB(ctx->opcode)]);
1211 if (unlikely(Rc(ctx->opcode) != 0))
1212 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1213 }
1214
1215 /* mulldo mulldo. */
1216 static void gen_mulldo(DisasContext *ctx)
1217 {
1218 TCGv_i64 t0 = tcg_temp_new_i64();
1219 TCGv_i64 t1 = tcg_temp_new_i64();
1220
1221 tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
1222 cpu_gpr[rB(ctx->opcode)]);
1223 tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
1224
1225 tcg_gen_sari_i64(t0, t0, 63);
1226 tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
1227 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1228
1229 tcg_temp_free_i64(t0);
1230 tcg_temp_free_i64(t1);
1231
1232 if (unlikely(Rc(ctx->opcode) != 0)) {
1233 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1234 }
1235 }
1236 #endif
1237
1238 /* Common subf function */
1239 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
1240 TCGv arg2, bool add_ca, bool compute_ca,
1241 bool compute_ov, bool compute_rc0)
1242 {
1243 TCGv t0 = ret;
1244
1245 if (compute_ca || compute_ov) {
1246 t0 = tcg_temp_new();
1247 }
1248
1249 if (compute_ca) {
1250 /* dest = ~arg1 + arg2 [+ ca]. */
1251 if (NARROW_MODE(ctx)) {
1252 /* Caution: a non-obvious corner case of the spec is that we
1253 must produce the *entire* 64-bit addition, but produce the
1254 carry into bit 32. */
1255 TCGv inv1 = tcg_temp_new();
1256 TCGv t1 = tcg_temp_new();
1257 tcg_gen_not_tl(inv1, arg1);
1258 if (add_ca) {
1259 tcg_gen_add_tl(t0, arg2, cpu_ca);
1260 } else {
1261 tcg_gen_addi_tl(t0, arg2, 1);
1262 }
1263 tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */
1264 tcg_gen_add_tl(t0, t0, inv1);
1265 tcg_temp_free(inv1);
1266 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */
1267 tcg_temp_free(t1);
1268 tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
1269 tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
1270 } else if (add_ca) {
1271 TCGv zero, inv1 = tcg_temp_new();
1272 tcg_gen_not_tl(inv1, arg1);
1273 zero = tcg_const_tl(0);
1274 tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
1275 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
1276 tcg_temp_free(zero);
1277 tcg_temp_free(inv1);
1278 } else {
1279 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
1280 tcg_gen_sub_tl(t0, arg2, arg1);
1281 }
1282 } else if (add_ca) {
1283 /* Since we're ignoring carry-out, we can simplify the
1284 standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */
1285 tcg_gen_sub_tl(t0, arg2, arg1);
1286 tcg_gen_add_tl(t0, t0, cpu_ca);
1287 tcg_gen_subi_tl(t0, t0, 1);
1288 } else {
1289 tcg_gen_sub_tl(t0, arg2, arg1);
1290 }
1291
1292 if (compute_ov) {
1293 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
1294 }
1295 if (unlikely(compute_rc0)) {
1296 gen_set_Rc0(ctx, t0);
1297 }
1298
1299 if (!TCGV_EQUAL(t0, ret)) {
1300 tcg_gen_mov_tl(ret, t0);
1301 tcg_temp_free(t0);
1302 }
1303 }
1304 /* Sub functions with Two operands functions */
1305 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
1306 static void glue(gen_, name)(DisasContext *ctx) \
1307 { \
1308 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1309 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1310 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1311 }
1312 /* Sub functions with one operand and one immediate */
1313 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
1314 add_ca, compute_ca, compute_ov) \
1315 static void glue(gen_, name)(DisasContext *ctx) \
1316 { \
1317 TCGv t0 = tcg_const_tl(const_val); \
1318 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1319 cpu_gpr[rA(ctx->opcode)], t0, \
1320 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1321 tcg_temp_free(t0); \
1322 }
1323 /* subf subf. subfo subfo. */
1324 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
1325 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
1326 /* subfc subfc. subfco subfco. */
1327 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
1328 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
1329 /* subfe subfe. subfeo subfo. */
1330 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
1331 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
1332 /* subfme subfme. subfmeo subfmeo. */
1333 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
1334 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
1335 /* subfze subfze. subfzeo subfzeo.*/
1336 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
1337 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
1338
1339 /* subfic */
1340 static void gen_subfic(DisasContext *ctx)
1341 {
1342 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
1343 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1344 c, 0, 1, 0, 0);
1345 tcg_temp_free(c);
1346 }
1347
1348 /* neg neg. nego nego. */
1349 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
1350 {
1351 TCGv zero = tcg_const_tl(0);
1352 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1353 zero, 0, 0, compute_ov, Rc(ctx->opcode));
1354 tcg_temp_free(zero);
1355 }
1356
1357 static void gen_neg(DisasContext *ctx)
1358 {
1359 gen_op_arith_neg(ctx, 0);
1360 }
1361
1362 static void gen_nego(DisasContext *ctx)
1363 {
1364 gen_op_arith_neg(ctx, 1);
1365 }
1366
1367 /*** Integer logical ***/
1368 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
1369 static void glue(gen_, name)(DisasContext *ctx) \
1370 { \
1371 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
1372 cpu_gpr[rB(ctx->opcode)]); \
1373 if (unlikely(Rc(ctx->opcode) != 0)) \
1374 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1375 }
1376
1377 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
1378 static void glue(gen_, name)(DisasContext *ctx) \
1379 { \
1380 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
1381 if (unlikely(Rc(ctx->opcode) != 0)) \
1382 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1383 }
1384
1385 /* and & and. */
1386 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
1387 /* andc & andc. */
1388 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
1389
1390 /* andi. */
1391 static void gen_andi_(DisasContext *ctx)
1392 {
1393 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode));
1394 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1395 }
1396
1397 /* andis. */
1398 static void gen_andis_(DisasContext *ctx)
1399 {
1400 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16);
1401 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1402 }
1403
1404 /* cntlzw */
1405 static void gen_cntlzw(DisasContext *ctx)
1406 {
1407 gen_helper_cntlzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1408 if (unlikely(Rc(ctx->opcode) != 0))
1409 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1410 }
1411 /* eqv & eqv. */
1412 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
1413 /* extsb & extsb. */
1414 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
1415 /* extsh & extsh. */
1416 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
1417 /* nand & nand. */
1418 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
1419 /* nor & nor. */
1420 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
1421
1422 /* or & or. */
1423 static void gen_or(DisasContext *ctx)
1424 {
1425 int rs, ra, rb;
1426
1427 rs = rS(ctx->opcode);
1428 ra = rA(ctx->opcode);
1429 rb = rB(ctx->opcode);
1430 /* Optimisation for mr. ri case */
1431 if (rs != ra || rs != rb) {
1432 if (rs != rb)
1433 tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
1434 else
1435 tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
1436 if (unlikely(Rc(ctx->opcode) != 0))
1437 gen_set_Rc0(ctx, cpu_gpr[ra]);
1438 } else if (unlikely(Rc(ctx->opcode) != 0)) {
1439 gen_set_Rc0(ctx, cpu_gpr[rs]);
1440 #if defined(TARGET_PPC64)
1441 } else {
1442 int prio = 0;
1443
1444 switch (rs) {
1445 case 1:
1446 /* Set process priority to low */
1447 prio = 2;
1448 break;
1449 case 6:
1450 /* Set process priority to medium-low */
1451 prio = 3;
1452 break;
1453 case 2:
1454 /* Set process priority to normal */
1455 prio = 4;
1456 break;
1457 #if !defined(CONFIG_USER_ONLY)
1458 case 31:
1459 if (ctx->mem_idx > 0) {
1460 /* Set process priority to very low */
1461 prio = 1;
1462 }
1463 break;
1464 case 5:
1465 if (ctx->mem_idx > 0) {
1466 /* Set process priority to medium-hight */
1467 prio = 5;
1468 }
1469 break;
1470 case 3:
1471 if (ctx->mem_idx > 0) {
1472 /* Set process priority to high */
1473 prio = 6;
1474 }
1475 break;
1476 case 7:
1477 if (ctx->mem_idx > 1) {
1478 /* Set process priority to very high */
1479 prio = 7;
1480 }
1481 break;
1482 #endif
1483 default:
1484 /* nop */
1485 break;
1486 }
1487 if (prio) {
1488 TCGv t0 = tcg_temp_new();
1489 gen_load_spr(t0, SPR_PPR);
1490 tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
1491 tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
1492 gen_store_spr(SPR_PPR, t0);
1493 tcg_temp_free(t0);
1494 }
1495 #endif
1496 }
1497 }
1498 /* orc & orc. */
1499 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
1500
1501 /* xor & xor. */
1502 static void gen_xor(DisasContext *ctx)
1503 {
1504 /* Optimisation for "set to zero" case */
1505 if (rS(ctx->opcode) != rB(ctx->opcode))
1506 tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1507 else
1508 tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
1509 if (unlikely(Rc(ctx->opcode) != 0))
1510 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1511 }
1512
1513 /* ori */
1514 static void gen_ori(DisasContext *ctx)
1515 {
1516 target_ulong uimm = UIMM(ctx->opcode);
1517
1518 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1519 /* NOP */
1520 /* XXX: should handle special NOPs for POWER series */
1521 return;
1522 }
1523 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1524 }
1525
1526 /* oris */
1527 static void gen_oris(DisasContext *ctx)
1528 {
1529 target_ulong uimm = UIMM(ctx->opcode);
1530
1531 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1532 /* NOP */
1533 return;
1534 }
1535 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1536 }
1537
1538 /* xori */
1539 static void gen_xori(DisasContext *ctx)
1540 {
1541 target_ulong uimm = UIMM(ctx->opcode);
1542
1543 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1544 /* NOP */
1545 return;
1546 }
1547 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1548 }
1549
1550 /* xoris */
1551 static void gen_xoris(DisasContext *ctx)
1552 {
1553 target_ulong uimm = UIMM(ctx->opcode);
1554
1555 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1556 /* NOP */
1557 return;
1558 }
1559 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1560 }
1561
1562 /* popcntb : PowerPC 2.03 specification */
1563 static void gen_popcntb(DisasContext *ctx)
1564 {
1565 gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1566 }
1567
1568 static void gen_popcntw(DisasContext *ctx)
1569 {
1570 gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1571 }
1572
1573 #if defined(TARGET_PPC64)
1574 /* popcntd: PowerPC 2.06 specification */
1575 static void gen_popcntd(DisasContext *ctx)
1576 {
1577 gen_helper_popcntd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1578 }
1579 #endif
1580
1581 /* prtyw: PowerPC 2.05 specification */
1582 static void gen_prtyw(DisasContext *ctx)
1583 {
1584 TCGv ra = cpu_gpr[rA(ctx->opcode)];
1585 TCGv rs = cpu_gpr[rS(ctx->opcode)];
1586 TCGv t0 = tcg_temp_new();
1587 tcg_gen_shri_tl(t0, rs, 16);
1588 tcg_gen_xor_tl(ra, rs, t0);
1589 tcg_gen_shri_tl(t0, ra, 8);
1590 tcg_gen_xor_tl(ra, ra, t0);
1591 tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
1592 tcg_temp_free(t0);
1593 }
1594
1595 #if defined(TARGET_PPC64)
1596 /* prtyd: PowerPC 2.05 specification */
1597 static void gen_prtyd(DisasContext *ctx)
1598 {
1599 TCGv ra = cpu_gpr[rA(ctx->opcode)];
1600 TCGv rs = cpu_gpr[rS(ctx->opcode)];
1601 TCGv t0 = tcg_temp_new();
1602 tcg_gen_shri_tl(t0, rs, 32);
1603 tcg_gen_xor_tl(ra, rs, t0);
1604 tcg_gen_shri_tl(t0, ra, 16);
1605 tcg_gen_xor_tl(ra, ra, t0);
1606 tcg_gen_shri_tl(t0, ra, 8);
1607 tcg_gen_xor_tl(ra, ra, t0);
1608 tcg_gen_andi_tl(ra, ra, 1);
1609 tcg_temp_free(t0);
1610 }
1611 #endif
1612
1613 #if defined(TARGET_PPC64)
1614 /* bpermd */
1615 static void gen_bpermd(DisasContext *ctx)
1616 {
1617 gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
1618 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1619 }
1620 #endif
1621
1622 #if defined(TARGET_PPC64)
1623 /* extsw & extsw. */
1624 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
1625
1626 /* cntlzd */
1627 static void gen_cntlzd(DisasContext *ctx)
1628 {
1629 gen_helper_cntlzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1630 if (unlikely(Rc(ctx->opcode) != 0))
1631 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1632 }
1633 #endif
1634
1635 /*** Integer rotate ***/
1636
1637 /* rlwimi & rlwimi. */
1638 static void gen_rlwimi(DisasContext *ctx)
1639 {
1640 uint32_t mb, me, sh;
1641
1642 mb = MB(ctx->opcode);
1643 me = ME(ctx->opcode);
1644 sh = SH(ctx->opcode);
1645 if (likely(sh == (31-me) && mb <= me)) {
1646 tcg_gen_deposit_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1647 cpu_gpr[rS(ctx->opcode)], sh, me - mb + 1);
1648 } else {
1649 target_ulong mask;
1650 TCGv t1;
1651 TCGv t0 = tcg_temp_new();
1652 #if defined(TARGET_PPC64)
1653 tcg_gen_deposit_i64(t0, cpu_gpr[rS(ctx->opcode)],
1654 cpu_gpr[rS(ctx->opcode)], 32, 32);
1655 tcg_gen_rotli_i64(t0, t0, sh);
1656 #else
1657 tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
1658 #endif
1659 #if defined(TARGET_PPC64)
1660 mb += 32;
1661 me += 32;
1662 #endif
1663 mask = MASK(mb, me);
1664 t1 = tcg_temp_new();
1665 tcg_gen_andi_tl(t0, t0, mask);
1666 tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
1667 tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1668 tcg_temp_free(t0);
1669 tcg_temp_free(t1);
1670 }
1671 if (unlikely(Rc(ctx->opcode) != 0))
1672 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1673 }
1674
1675 /* rlwinm & rlwinm. */
1676 static void gen_rlwinm(DisasContext *ctx)
1677 {
1678 uint32_t mb, me, sh;
1679
1680 sh = SH(ctx->opcode);
1681 mb = MB(ctx->opcode);
1682 me = ME(ctx->opcode);
1683
1684 if (likely(mb == 0 && me == (31 - sh))) {
1685 if (likely(sh == 0)) {
1686 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1687 } else {
1688 TCGv t0 = tcg_temp_new();
1689 tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
1690 tcg_gen_shli_tl(t0, t0, sh);
1691 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
1692 tcg_temp_free(t0);
1693 }
1694 } else if (likely(sh != 0 && me == 31 && sh == (32 - mb))) {
1695 TCGv t0 = tcg_temp_new();
1696 tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
1697 tcg_gen_shri_tl(t0, t0, mb);
1698 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
1699 tcg_temp_free(t0);
1700 } else if (likely(mb == 0 && me == 31)) {
1701 TCGv_i32 t0 = tcg_temp_new_i32();
1702 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rS(ctx->opcode)]);
1703 tcg_gen_rotli_i32(t0, t0, sh);
1704 tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t0);
1705 tcg_temp_free_i32(t0);
1706 } else {
1707 TCGv t0 = tcg_temp_new();
1708 #if defined(TARGET_PPC64)
1709 tcg_gen_deposit_i64(t0, cpu_gpr[rS(ctx->opcode)],
1710 cpu_gpr[rS(ctx->opcode)], 32, 32);
1711 tcg_gen_rotli_i64(t0, t0, sh);
1712 #else
1713 tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
1714 #endif
1715 #if defined(TARGET_PPC64)
1716 mb += 32;
1717 me += 32;
1718 #endif
1719 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1720 tcg_temp_free(t0);
1721 }
1722 if (unlikely(Rc(ctx->opcode) != 0))
1723 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1724 }
1725
1726 /* rlwnm & rlwnm. */
1727 static void gen_rlwnm(DisasContext *ctx)
1728 {
1729 uint32_t mb, me;
1730 mb = MB(ctx->opcode);
1731 me = ME(ctx->opcode);
1732
1733 if (likely(mb == 0 && me == 31)) {
1734 TCGv_i32 t0, t1;
1735 t0 = tcg_temp_new_i32();
1736 t1 = tcg_temp_new_i32();
1737 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]);
1738 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rS(ctx->opcode)]);
1739 tcg_gen_andi_i32(t0, t0, 0x1f);
1740 tcg_gen_rotl_i32(t1, t1, t0);
1741 tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t1);
1742 tcg_temp_free_i32(t0);
1743 tcg_temp_free_i32(t1);
1744 } else {
1745 TCGv t0;
1746 #if defined(TARGET_PPC64)
1747 TCGv t1;
1748 #endif
1749
1750 t0 = tcg_temp_new();
1751 tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1f);
1752 #if defined(TARGET_PPC64)
1753 t1 = tcg_temp_new_i64();
1754 tcg_gen_deposit_i64(t1, cpu_gpr[rS(ctx->opcode)],
1755 cpu_gpr[rS(ctx->opcode)], 32, 32);
1756 tcg_gen_rotl_i64(t0, t1, t0);
1757 tcg_temp_free_i64(t1);
1758 #else
1759 tcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0);
1760 #endif
1761 if (unlikely(mb != 0 || me != 31)) {
1762 #if defined(TARGET_PPC64)
1763 mb += 32;
1764 me += 32;
1765 #endif
1766 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1767 } else {
1768 tcg_gen_andi_tl(t0, t0, MASK(32, 63));
1769 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
1770 }
1771 tcg_temp_free(t0);
1772 }
1773 if (unlikely(Rc(ctx->opcode) != 0))
1774 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1775 }
1776
1777 #if defined(TARGET_PPC64)
1778 #define GEN_PPC64_R2(name, opc1, opc2) \
1779 static void glue(gen_, name##0)(DisasContext *ctx) \
1780 { \
1781 gen_##name(ctx, 0); \
1782 } \
1783 \
1784 static void glue(gen_, name##1)(DisasContext *ctx) \
1785 { \
1786 gen_##name(ctx, 1); \
1787 }
1788 #define GEN_PPC64_R4(name, opc1, opc2) \
1789 static void glue(gen_, name##0)(DisasContext *ctx) \
1790 { \
1791 gen_##name(ctx, 0, 0); \
1792 } \
1793 \
1794 static void glue(gen_, name##1)(DisasContext *ctx) \
1795 { \
1796 gen_##name(ctx, 0, 1); \
1797 } \
1798 \
1799 static void glue(gen_, name##2)(DisasContext *ctx) \
1800 { \
1801 gen_##name(ctx, 1, 0); \
1802 } \
1803 \
1804 static void glue(gen_, name##3)(DisasContext *ctx) \
1805 { \
1806 gen_##name(ctx, 1, 1); \
1807 }
1808
1809 static inline void gen_rldinm(DisasContext *ctx, uint32_t mb, uint32_t me,
1810 uint32_t sh)
1811 {
1812 if (likely(sh != 0 && mb == 0 && me == (63 - sh))) {
1813 tcg_gen_shli_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
1814 } else if (likely(sh != 0 && me == 63 && sh == (64 - mb))) {
1815 tcg_gen_shri_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], mb);
1816 } else {
1817 TCGv t0 = tcg_temp_new();
1818 tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
1819 if (likely(mb == 0 && me == 63)) {
1820 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
1821 } else {
1822 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1823 }
1824 tcg_temp_free(t0);
1825 }
1826 if (unlikely(Rc(ctx->opcode) != 0))
1827 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1828 }
1829 /* rldicl - rldicl. */
1830 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
1831 {
1832 uint32_t sh, mb;
1833
1834 sh = SH(ctx->opcode) | (shn << 5);
1835 mb = MB(ctx->opcode) | (mbn << 5);
1836 gen_rldinm(ctx, mb, 63, sh);
1837 }
1838 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
1839 /* rldicr - rldicr. */
1840 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
1841 {
1842 uint32_t sh, me;
1843
1844 sh = SH(ctx->opcode) | (shn << 5);
1845 me = MB(ctx->opcode) | (men << 5);
1846 gen_rldinm(ctx, 0, me, sh);
1847 }
1848 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
1849 /* rldic - rldic. */
1850 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
1851 {
1852 uint32_t sh, mb;
1853
1854 sh = SH(ctx->opcode) | (shn << 5);
1855 mb = MB(ctx->opcode) | (mbn << 5);
1856 gen_rldinm(ctx, mb, 63 - sh, sh);
1857 }
1858 GEN_PPC64_R4(rldic, 0x1E, 0x04);
1859
1860 static inline void gen_rldnm(DisasContext *ctx, uint32_t mb, uint32_t me)
1861 {
1862 TCGv t0;
1863
1864 t0 = tcg_temp_new();
1865 tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3f);
1866 tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1867 if (unlikely(mb != 0 || me != 63)) {
1868 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1869 } else {
1870 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
1871 }
1872 tcg_temp_free(t0);
1873 if (unlikely(Rc(ctx->opcode) != 0))
1874 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1875 }
1876
1877 /* rldcl - rldcl. */
1878 static inline void gen_rldcl(DisasContext *ctx, int mbn)
1879 {
1880 uint32_t mb;
1881
1882 mb = MB(ctx->opcode) | (mbn << 5);
1883 gen_rldnm(ctx, mb, 63);
1884 }
1885 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
1886 /* rldcr - rldcr. */
1887 static inline void gen_rldcr(DisasContext *ctx, int men)
1888 {
1889 uint32_t me;
1890
1891 me = MB(ctx->opcode) | (men << 5);
1892 gen_rldnm(ctx, 0, me);
1893 }
1894 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
1895 /* rldimi - rldimi. */
1896 static inline void gen_rldimi(DisasContext *ctx, int mbn, int shn)
1897 {
1898 uint32_t sh, mb, me;
1899
1900 sh = SH(ctx->opcode) | (shn << 5);
1901 mb = MB(ctx->opcode) | (mbn << 5);
1902 me = 63 - sh;
1903 if (unlikely(sh == 0 && mb == 0)) {
1904 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1905 } else {
1906 TCGv t0, t1;
1907 target_ulong mask;
1908
1909 t0 = tcg_temp_new();
1910 tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
1911 t1 = tcg_temp_new();
1912 mask = MASK(mb, me);
1913 tcg_gen_andi_tl(t0, t0, mask);
1914 tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
1915 tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1916 tcg_temp_free(t0);
1917 tcg_temp_free(t1);
1918 }
1919 if (unlikely(Rc(ctx->opcode) != 0))
1920 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1921 }
1922 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
1923 #endif
1924
1925 /*** Integer shift ***/
1926
1927 /* slw & slw. */
1928 static void gen_slw(DisasContext *ctx)
1929 {
1930 TCGv t0, t1;
1931
1932 t0 = tcg_temp_new();
1933 /* AND rS with a mask that is 0 when rB >= 0x20 */
1934 #if defined(TARGET_PPC64)
1935 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
1936 tcg_gen_sari_tl(t0, t0, 0x3f);
1937 #else
1938 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
1939 tcg_gen_sari_tl(t0, t0, 0x1f);
1940 #endif
1941 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1942 t1 = tcg_temp_new();
1943 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
1944 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1945 tcg_temp_free(t1);
1946 tcg_temp_free(t0);
1947 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
1948 if (unlikely(Rc(ctx->opcode) != 0))
1949 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1950 }
1951
1952 /* sraw & sraw. */
1953 static void gen_sraw(DisasContext *ctx)
1954 {
1955 gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
1956 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1957 if (unlikely(Rc(ctx->opcode) != 0))
1958 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1959 }
1960
1961 /* srawi & srawi. */
1962 static void gen_srawi(DisasContext *ctx)
1963 {
1964 int sh = SH(ctx->opcode);
1965 TCGv dst = cpu_gpr[rA(ctx->opcode)];
1966 TCGv src = cpu_gpr[rS(ctx->opcode)];
1967 if (sh == 0) {
1968 tcg_gen_ext32s_tl(dst, src);
1969 tcg_gen_movi_tl(cpu_ca, 0);
1970 } else {
1971 TCGv t0;
1972 tcg_gen_ext32s_tl(dst, src);
1973 tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
1974 t0 = tcg_temp_new();
1975 tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
1976 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
1977 tcg_temp_free(t0);
1978 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
1979 tcg_gen_sari_tl(dst, dst, sh);
1980 }
1981 if (unlikely(Rc(ctx->opcode) != 0)) {
1982 gen_set_Rc0(ctx, dst);
1983 }
1984 }
1985
1986 /* srw & srw. */
1987 static void gen_srw(DisasContext *ctx)
1988 {
1989 TCGv t0, t1;
1990
1991 t0 = tcg_temp_new();
1992 /* AND rS with a mask that is 0 when rB >= 0x20 */
1993 #if defined(TARGET_PPC64)
1994 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
1995 tcg_gen_sari_tl(t0, t0, 0x3f);
1996 #else
1997 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
1998 tcg_gen_sari_tl(t0, t0, 0x1f);
1999 #endif
2000 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2001 tcg_gen_ext32u_tl(t0, t0);
2002 t1 = tcg_temp_new();
2003 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2004 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2005 tcg_temp_free(t1);
2006 tcg_temp_free(t0);
2007 if (unlikely(Rc(ctx->opcode) != 0))
2008 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2009 }
2010
2011 #if defined(TARGET_PPC64)
2012 /* sld & sld. */
2013 static void gen_sld(DisasContext *ctx)
2014 {
2015 TCGv t0, t1;
2016
2017 t0 = tcg_temp_new();
2018 /* AND rS with a mask that is 0 when rB >= 0x40 */
2019 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2020 tcg_gen_sari_tl(t0, t0, 0x3f);
2021 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2022 t1 = tcg_temp_new();
2023 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2024 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2025 tcg_temp_free(t1);
2026 tcg_temp_free(t0);
2027 if (unlikely(Rc(ctx->opcode) != 0))
2028 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2029 }
2030
2031 /* srad & srad. */
2032 static void gen_srad(DisasContext *ctx)
2033 {
2034 gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
2035 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2036 if (unlikely(Rc(ctx->opcode) != 0))
2037 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2038 }
2039 /* sradi & sradi. */
2040 static inline void gen_sradi(DisasContext *ctx, int n)
2041 {
2042 int sh = SH(ctx->opcode) + (n << 5);
2043 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2044 TCGv src = cpu_gpr[rS(ctx->opcode)];
2045 if (sh == 0) {
2046 tcg_gen_mov_tl(dst, src);
2047 tcg_gen_movi_tl(cpu_ca, 0);
2048 } else {
2049 TCGv t0;
2050 tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
2051 t0 = tcg_temp_new();
2052 tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
2053 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2054 tcg_temp_free(t0);
2055 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2056 tcg_gen_sari_tl(dst, src, sh);
2057 }
2058 if (unlikely(Rc(ctx->opcode) != 0)) {
2059 gen_set_Rc0(ctx, dst);
2060 }
2061 }
2062
2063 static void gen_sradi0(DisasContext *ctx)
2064 {
2065 gen_sradi(ctx, 0);
2066 }
2067
2068 static void gen_sradi1(DisasContext *ctx)
2069 {
2070 gen_sradi(ctx, 1);
2071 }
2072
2073 /* srd & srd. */
2074 static void gen_srd(DisasContext *ctx)
2075 {
2076 TCGv t0, t1;
2077
2078 t0 = tcg_temp_new();
2079 /* AND rS with a mask that is 0 when rB >= 0x40 */
2080 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2081 tcg_gen_sari_tl(t0, t0, 0x3f);
2082 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2083 t1 = tcg_temp_new();
2084 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2085 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2086 tcg_temp_free(t1);
2087 tcg_temp_free(t0);
2088 if (unlikely(Rc(ctx->opcode) != 0))
2089 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2090 }
2091 #endif
2092
2093 /*** Floating-Point arithmetic ***/
2094 #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
2095 static void gen_f##name(DisasContext *ctx) \
2096 { \
2097 if (unlikely(!ctx->fpu_enabled)) { \
2098 gen_exception(ctx, POWERPC_EXCP_FPU); \
2099 return; \
2100 } \
2101 /* NIP cannot be restored if the memory exception comes from an helper */ \
2102 gen_update_nip(ctx, ctx->nip - 4); \
2103 gen_reset_fpstatus(); \
2104 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2105 cpu_fpr[rA(ctx->opcode)], \
2106 cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
2107 if (isfloat) { \
2108 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2109 cpu_fpr[rD(ctx->opcode)]); \
2110 } \
2111 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], set_fprf, \
2112 Rc(ctx->opcode) != 0); \
2113 }
2114
2115 #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
2116 _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
2117 _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
2118
2119 #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2120 static void gen_f##name(DisasContext *ctx) \
2121 { \
2122 if (unlikely(!ctx->fpu_enabled)) { \
2123 gen_exception(ctx, POWERPC_EXCP_FPU); \
2124 return; \
2125 } \
2126 /* NIP cannot be restored if the memory exception comes from an helper */ \
2127 gen_update_nip(ctx, ctx->nip - 4); \
2128 gen_reset_fpstatus(); \
2129 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2130 cpu_fpr[rA(ctx->opcode)], \
2131 cpu_fpr[rB(ctx->opcode)]); \
2132 if (isfloat) { \
2133 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2134 cpu_fpr[rD(ctx->opcode)]); \
2135 } \
2136 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2137 set_fprf, Rc(ctx->opcode) != 0); \
2138 }
2139 #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
2140 _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2141 _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2142
2143 #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2144 static void gen_f##name(DisasContext *ctx) \
2145 { \
2146 if (unlikely(!ctx->fpu_enabled)) { \
2147 gen_exception(ctx, POWERPC_EXCP_FPU); \
2148 return; \
2149 } \
2150 /* NIP cannot be restored if the memory exception comes from an helper */ \
2151 gen_update_nip(ctx, ctx->nip - 4); \
2152 gen_reset_fpstatus(); \
2153 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2154 cpu_fpr[rA(ctx->opcode)], \
2155 cpu_fpr[rC(ctx->opcode)]); \
2156 if (isfloat) { \
2157 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2158 cpu_fpr[rD(ctx->opcode)]); \
2159 } \
2160 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2161 set_fprf, Rc(ctx->opcode) != 0); \
2162 }
2163 #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
2164 _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2165 _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2166
2167 #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
2168 static void gen_f##name(DisasContext *ctx) \
2169 { \
2170 if (unlikely(!ctx->fpu_enabled)) { \
2171 gen_exception(ctx, POWERPC_EXCP_FPU); \
2172 return; \
2173 } \
2174 /* NIP cannot be restored if the memory exception comes from an helper */ \
2175 gen_update_nip(ctx, ctx->nip - 4); \
2176 gen_reset_fpstatus(); \
2177 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2178 cpu_fpr[rB(ctx->opcode)]); \
2179 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2180 set_fprf, Rc(ctx->opcode) != 0); \
2181 }
2182
2183 #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
2184 static void gen_f##name(DisasContext *ctx) \
2185 { \
2186 if (unlikely(!ctx->fpu_enabled)) { \
2187 gen_exception(ctx, POWERPC_EXCP_FPU); \
2188 return; \
2189 } \
2190 /* NIP cannot be restored if the memory exception comes from an helper */ \
2191 gen_update_nip(ctx, ctx->nip - 4); \
2192 gen_reset_fpstatus(); \
2193 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2194 cpu_fpr[rB(ctx->opcode)]); \
2195 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2196 set_fprf, Rc(ctx->opcode) != 0); \
2197 }
2198
2199 /* fadd - fadds */
2200 GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT);
2201 /* fdiv - fdivs */
2202 GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT);
2203 /* fmul - fmuls */
2204 GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT);
2205
2206 /* fre */
2207 GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT);
2208
2209 /* fres */
2210 GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES);
2211
2212 /* frsqrte */
2213 GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE);
2214
2215 /* frsqrtes */
2216 static void gen_frsqrtes(DisasContext *ctx)
2217 {
2218 if (unlikely(!ctx->fpu_enabled)) {
2219 gen_exception(ctx, POWERPC_EXCP_FPU);
2220 return;
2221 }
2222 /* NIP cannot be restored if the memory exception comes from an helper */
2223 gen_update_nip(ctx, ctx->nip - 4);
2224 gen_reset_fpstatus();
2225 gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_env,
2226 cpu_fpr[rB(ctx->opcode)]);
2227 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
2228 cpu_fpr[rD(ctx->opcode)]);
2229 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
2230 }
2231
2232 /* fsel */
2233 _GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
2234 /* fsub - fsubs */
2235 GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
2236 /* Optional: */
2237
2238 /* fsqrt */
2239 static void gen_fsqrt(DisasContext *ctx)
2240 {
2241 if (unlikely(!ctx->fpu_enabled)) {
2242 gen_exception(ctx, POWERPC_EXCP_FPU);
2243 return;
2244 }
2245 /* NIP cannot be restored if the memory exception comes from an helper */
2246 gen_update_nip(ctx, ctx->nip - 4);
2247 gen_reset_fpstatus();
2248 gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
2249 cpu_fpr[rB(ctx->opcode)]);
2250 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
2251 }
2252
2253 static void gen_fsqrts(DisasContext *ctx)
2254 {
2255 if (unlikely(!ctx->fpu_enabled)) {
2256 gen_exception(ctx, POWERPC_EXCP_FPU);
2257 return;
2258 }
2259 /* NIP cannot be restored if the memory exception comes from an helper */
2260 gen_update_nip(ctx, ctx->nip - 4);
2261 gen_reset_fpstatus();
2262 gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
2263 cpu_fpr[rB(ctx->opcode)]);
2264 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
2265 cpu_fpr[rD(ctx->opcode)]);
2266 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
2267 }
2268
2269 /*** Floating-Point multiply-and-add ***/
2270 /* fmadd - fmadds */
2271 GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
2272 /* fmsub - fmsubs */
2273 GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT);
2274 /* fnmadd - fnmadds */
2275 GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT);
2276 /* fnmsub - fnmsubs */
2277 GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT);
2278
2279 /*** Floating-Point round & convert ***/
2280 /* fctiw */
2281 GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT);
2282 /* fctiwu */
2283 GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206);
2284 /* fctiwz */
2285 GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT);
2286 /* fctiwuz */
2287 GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206);
2288 /* frsp */
2289 GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT);
2290 #if defined(TARGET_PPC64)
2291 /* fcfid */
2292 GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC_64B);
2293 /* fcfids */
2294 GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206);
2295 /* fcfidu */
2296 GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
2297 /* fcfidus */
2298 GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
2299 /* fctid */
2300 GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC_64B);
2301 /* fctidu */
2302 GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206);
2303 /* fctidz */
2304 GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC_64B);
2305 /* fctidu */
2306 GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206);
2307 #endif
2308
2309 /* frin */
2310 GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT);
2311 /* friz */
2312 GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT);
2313 /* frip */
2314 GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT);
2315 /* frim */
2316 GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT);
2317
2318 static void gen_ftdiv(DisasContext *ctx)
2319 {
2320 if (unlikely(!ctx->fpu_enabled)) {
2321 gen_exception(ctx, POWERPC_EXCP_FPU);
2322 return;
2323 }
2324 gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
2325 cpu_fpr[rB(ctx->opcode)]);
2326 }
2327
2328 static void gen_ftsqrt(DisasContext *ctx)
2329 {
2330 if (unlikely(!ctx->fpu_enabled)) {
2331 gen_exception(ctx, POWERPC_EXCP_FPU);
2332 return;
2333 }
2334 gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2335 }
2336
2337
2338
2339 /*** Floating-Point compare ***/
2340
2341 /* fcmpo */
2342 static void gen_fcmpo(DisasContext *ctx)
2343 {
2344 TCGv_i32 crf;
2345 if (unlikely(!ctx->fpu_enabled)) {
2346 gen_exception(ctx, POWERPC_EXCP_FPU);
2347 return;
2348 }
2349 /* NIP cannot be restored if the memory exception comes from an helper */
2350 gen_update_nip(ctx, ctx->nip - 4);
2351 gen_reset_fpstatus();
2352 crf = tcg_const_i32(crfD(ctx->opcode));
2353 gen_helper_fcmpo(cpu_env, cpu_fpr[rA(ctx->opcode)],
2354 cpu_fpr[rB(ctx->opcode)], crf);
2355 tcg_temp_free_i32(crf);
2356 gen_helper_float_check_status(cpu_env);
2357 }
2358
2359 /* fcmpu */
2360 static void gen_fcmpu(DisasContext *ctx)
2361 {
2362 TCGv_i32 crf;
2363 if (unlikely(!ctx->fpu_enabled)) {
2364 gen_exception(ctx, POWERPC_EXCP_FPU);
2365 return;
2366 }
2367 /* NIP cannot be restored if the memory exception comes from an helper */
2368 gen_update_nip(ctx, ctx->nip - 4);
2369 gen_reset_fpstatus();
2370 crf = tcg_const_i32(crfD(ctx->opcode));
2371 gen_helper_fcmpu(cpu_env, cpu_fpr[rA(ctx->opcode)],
2372 cpu_fpr[rB(ctx->opcode)], crf);
2373 tcg_temp_free_i32(crf);
2374 gen_helper_float_check_status(cpu_env);
2375 }
2376
2377 /*** Floating-point move ***/
2378 /* fabs */
2379 /* XXX: beware that fabs never checks for NaNs nor update FPSCR */
2380 static void gen_fabs(DisasContext *ctx)
2381 {
2382 if (unlikely(!ctx->fpu_enabled)) {
2383 gen_exception(ctx, POWERPC_EXCP_FPU);
2384 return;
2385 }
2386 tcg_gen_andi_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
2387 ~(1ULL << 63));
2388 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2389 }
2390
2391 /* fmr - fmr. */
2392 /* XXX: beware that fmr never checks for NaNs nor update FPSCR */
2393 static void gen_fmr(DisasContext *ctx)
2394 {
2395 if (unlikely(!ctx->fpu_enabled)) {
2396 gen_exception(ctx, POWERPC_EXCP_FPU);
2397 return;
2398 }
2399 tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2400 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2401 }
2402
2403 /* fnabs */
2404 /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
2405 static void gen_fnabs(DisasContext *ctx)
2406 {
2407 if (unlikely(!ctx->fpu_enabled)) {
2408 gen_exception(ctx, POWERPC_EXCP_FPU);
2409 return;
2410 }
2411 tcg_gen_ori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
2412 1ULL << 63);
2413 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2414 }
2415
2416 /* fneg */
2417 /* XXX: beware that fneg never checks for NaNs nor update FPSCR */
2418 static void gen_fneg(DisasContext *ctx)
2419 {
2420 if (unlikely(!ctx->fpu_enabled)) {
2421 gen_exception(ctx, POWERPC_EXCP_FPU);
2422 return;
2423 }
2424 tcg_gen_xori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
2425 1ULL << 63);
2426 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2427 }
2428
2429 /* fcpsgn: PowerPC 2.05 specification */
2430 /* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */
2431 static void gen_fcpsgn(DisasContext *ctx)
2432 {
2433 if (unlikely(!ctx->fpu_enabled)) {
2434 gen_exception(ctx, POWERPC_EXCP_FPU);
2435 return;
2436 }
2437 tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
2438 cpu_fpr[rB(ctx->opcode)], 0, 63);
2439 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2440 }
2441
2442 static void gen_fmrgew(DisasContext *ctx)
2443 {
2444 TCGv_i64 b0;
2445 if (unlikely(!ctx->fpu_enabled)) {
2446 gen_exception(ctx, POWERPC_EXCP_FPU);
2447 return;
2448 }
2449 b0 = tcg_temp_new_i64();
2450 tcg_gen_shri_i64(b0, cpu_fpr[rB(ctx->opcode)], 32);
2451 tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
2452 b0, 0, 32);
2453 tcg_temp_free_i64(b0);
2454 }
2455
2456 static void gen_fmrgow(DisasContext *ctx)
2457 {
2458 if (unlikely(!ctx->fpu_enabled)) {
2459 gen_exception(ctx, POWERPC_EXCP_FPU);
2460 return;
2461 }
2462 tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)],
2463 cpu_fpr[rB(ctx->opcode)],
2464 cpu_fpr[rA(ctx->opcode)],
2465 32, 32);
2466 }
2467
2468 /*** Floating-Point status & ctrl register ***/
2469
2470 /* mcrfs */
2471 static void gen_mcrfs(DisasContext *ctx)
2472 {
2473 TCGv tmp = tcg_temp_new();
2474 int bfa;
2475
2476 if (unlikely(!ctx->fpu_enabled)) {
2477 gen_exception(ctx, POWERPC_EXCP_FPU);
2478 return;
2479 }
2480 bfa = 4 * (7 - crfS(ctx->opcode));
2481 tcg_gen_shri_tl(tmp, cpu_fpscr, bfa);
2482 tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
2483 tcg_temp_free(tmp);
2484 tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
2485 tcg_gen_andi_tl(cpu_fpscr, cpu_fpscr, ~(0xF << bfa));
2486 }
2487
2488 /* mffs */
2489 static void gen_mffs(DisasContext *ctx)
2490 {
2491 if (unlikely(!ctx->fpu_enabled)) {
2492 gen_exception(ctx, POWERPC_EXCP_FPU);
2493 return;
2494 }
2495 gen_reset_fpstatus();
2496 tcg_gen_extu_tl_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpscr);
2497 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2498 }
2499
2500 /* mtfsb0 */
2501 static void gen_mtfsb0(DisasContext *ctx)
2502 {
2503 uint8_t crb;
2504
2505 if (unlikely(!ctx->fpu_enabled)) {
2506 gen_exception(ctx, POWERPC_EXCP_FPU);
2507 return;
2508 }
2509 crb = 31 - crbD(ctx->opcode);
2510 gen_reset_fpstatus();
2511 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
2512 TCGv_i32 t0;
2513 /* NIP cannot be restored if the memory exception comes from an helper */
2514 gen_update_nip(ctx, ctx->nip - 4);
2515 t0 = tcg_const_i32(crb);
2516 gen_helper_fpscr_clrbit(cpu_env, t0);
2517 tcg_temp_free_i32(t0);
2518 }
2519 if (unlikely(Rc(ctx->opcode) != 0)) {
2520 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2521 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2522 }
2523 }
2524
2525 /* mtfsb1 */
2526 static void gen_mtfsb1(DisasContext *ctx)
2527 {
2528 uint8_t crb;
2529
2530 if (unlikely(!ctx->fpu_enabled)) {
2531 gen_exception(ctx, POWERPC_EXCP_FPU);
2532 return;
2533 }
2534 crb = 31 - crbD(ctx->opcode);
2535 gen_reset_fpstatus();
2536 /* XXX: we pretend we can only do IEEE floating-point computations */
2537 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
2538 TCGv_i32 t0;
2539 /* NIP cannot be restored if the memory exception comes from an helper */
2540 gen_update_nip(ctx, ctx->nip - 4);
2541 t0 = tcg_const_i32(crb);
2542 gen_helper_fpscr_setbit(cpu_env, t0);
2543 tcg_temp_free_i32(t0);
2544 }
2545 if (unlikely(Rc(ctx->opcode) != 0)) {
2546 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2547 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2548 }
2549 /* We can raise a differed exception */
2550 gen_helper_float_check_status(cpu_env);
2551 }
2552
2553 /* mtfsf */
2554 static void gen_mtfsf(DisasContext *ctx)
2555 {
2556 TCGv_i32 t0;
2557 int flm, l, w;
2558
2559 if (unlikely(!ctx->fpu_enabled)) {
2560 gen_exception(ctx, POWERPC_EXCP_FPU);
2561 return;
2562 }
2563 flm = FPFLM(ctx->opcode);
2564 l = FPL(ctx->opcode);
2565 w = FPW(ctx->opcode);
2566 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
2567 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2568 return;
2569 }
2570 /* NIP cannot be restored if the memory exception comes from an helper */
2571 gen_update_nip(ctx, ctx->nip - 4);
2572 gen_reset_fpstatus();
2573 if (l) {
2574 t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff);
2575 } else {
2576 t0 = tcg_const_i32(flm << (w * 8));
2577 }
2578 gen_helper_store_fpscr(cpu_env, cpu_fpr[rB(ctx->opcode)], t0);
2579 tcg_temp_free_i32(t0);
2580 if (unlikely(Rc(ctx->opcode) != 0)) {
2581 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2582 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2583 }
2584 /* We can raise a differed exception */
2585 gen_helper_float_check_status(cpu_env);
2586 }
2587
2588 /* mtfsfi */
2589 static void gen_mtfsfi(DisasContext *ctx)
2590 {
2591 int bf, sh, w;
2592 TCGv_i64 t0;
2593 TCGv_i32 t1;
2594
2595 if (unlikely(!ctx->fpu_enabled)) {
2596 gen_exception(ctx, POWERPC_EXCP_FPU);
2597 return;
2598 }
2599 w = FPW(ctx->opcode);
2600 bf = FPBF(ctx->opcode);
2601 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
2602 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2603 return;
2604 }
2605 sh = (8 * w) + 7 - bf;
2606 /* NIP cannot be restored if the memory exception comes from an helper */
2607 gen_update_nip(ctx, ctx->nip - 4);
2608 gen_reset_fpstatus();
2609 t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
2610 t1 = tcg_const_i32(1 << sh);
2611 gen_helper_store_fpscr(cpu_env, t0, t1);
2612 tcg_temp_free_i64(t0);
2613 tcg_temp_free_i32(t1);
2614 if (unlikely(Rc(ctx->opcode) != 0)) {
2615 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2616 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2617 }
2618 /* We can raise a differed exception */
2619 gen_helper_float_check_status(cpu_env);
2620 }
2621
2622 /*** Addressing modes ***/
2623 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2624 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
2625 target_long maskl)
2626 {
2627 target_long simm = SIMM(ctx->opcode);
2628
2629 simm &= ~maskl;
2630 if (rA(ctx->opcode) == 0) {
2631 if (NARROW_MODE(ctx)) {
2632 simm = (uint32_t)simm;
2633 }
2634 tcg_gen_movi_tl(EA, simm);
2635 } else if (likely(simm != 0)) {
2636 tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
2637 if (NARROW_MODE(ctx)) {
2638 tcg_gen_ext32u_tl(EA, EA);
2639 }
2640 } else {
2641 if (NARROW_MODE(ctx)) {
2642 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2643 } else {
2644 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2645 }
2646 }
2647 }
2648
2649 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
2650 {
2651 if (rA(ctx->opcode) == 0) {
2652 if (NARROW_MODE(ctx)) {
2653 tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2654 } else {
2655 tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2656 }
2657 } else {
2658 tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2659 if (NARROW_MODE(ctx)) {
2660 tcg_gen_ext32u_tl(EA, EA);
2661 }
2662 }
2663 }
2664
2665 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
2666 {
2667 if (rA(ctx->opcode) == 0) {
2668 tcg_gen_movi_tl(EA, 0);
2669 } else if (NARROW_MODE(ctx)) {
2670 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2671 } else {
2672 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2673 }
2674 }
2675
2676 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
2677 target_long val)
2678 {
2679 tcg_gen_addi_tl(ret, arg1, val);
2680 if (NARROW_MODE(ctx)) {
2681 tcg_gen_ext32u_tl(ret, ret);
2682 }
2683 }
2684
2685 static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask)
2686 {
2687 int l1 = gen_new_label();
2688 TCGv t0 = tcg_temp_new();
2689 TCGv_i32 t1, t2;
2690 /* NIP cannot be restored if the memory exception comes from an helper */
2691 gen_update_nip(ctx, ctx->nip - 4);
2692 tcg_gen_andi_tl(t0, EA, mask);
2693 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
2694 t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
2695 t2 = tcg_const_i32(0);
2696 gen_helper_raise_exception_err(cpu_env, t1, t2);
2697 tcg_temp_free_i32(t1);
2698 tcg_temp_free_i32(t2);
2699 gen_set_label(l1);
2700 tcg_temp_free(t0);
2701 }
2702
2703 /*** Integer load ***/
2704 static inline void gen_qemu_ld8u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2705 {
2706 tcg_gen_qemu_ld8u(arg1, arg2, ctx->mem_idx);
2707 }
2708
2709 static inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2710 {
2711 TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
2712 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2713 }
2714
2715 static inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2716 {
2717 TCGMemOp op = MO_SW | ctx->default_tcg_memop_mask;
2718 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2719 }
2720
2721 static inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2722 {
2723 TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
2724 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2725 }
2726
2727 static void gen_qemu_ld32u_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
2728 {
2729 TCGv tmp = tcg_temp_new();
2730 gen_qemu_ld32u(ctx, tmp, addr);
2731 tcg_gen_extu_tl_i64(val, tmp);
2732 tcg_temp_free(tmp);
2733 }
2734
2735 static inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2736 {
2737 TCGMemOp op = MO_SL | ctx->default_tcg_memop_mask;
2738 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2739 }
2740
2741 static void gen_qemu_ld32s_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
2742 {
2743 TCGv tmp = tcg_temp_new();
2744 gen_qemu_ld32s(ctx, tmp, addr);
2745 tcg_gen_ext_tl_i64(val, tmp);
2746 tcg_temp_free(tmp);
2747 }
2748
2749 static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
2750 {
2751 TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
2752 tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
2753 }
2754
2755 static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
2756 {
2757 tcg_gen_qemu_st8(arg1, arg2, ctx->mem_idx);
2758 }
2759
2760 static inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2)
2761 {
2762 TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
2763 tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
2764 }
2765
2766 static inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2)
2767 {
2768 TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
2769 tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
2770 }
2771
2772 static void gen_qemu_st32_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
2773 {
2774 TCGv tmp = tcg_temp_new();
2775 tcg_gen_trunc_i64_tl(tmp, val);
2776 gen_qemu_st32(ctx, tmp, addr);
2777 tcg_temp_free(tmp);
2778 }
2779
2780 static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
2781 {
2782 TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
2783 tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
2784 }
2785
2786 #define GEN_LD(name, ldop, opc, type) \
2787 static void glue(gen_, name)(DisasContext *ctx) \
2788 { \
2789 TCGv EA; \
2790 gen_set_access_type(ctx, ACCESS_INT); \
2791 EA = tcg_temp_new(); \
2792 gen_addr_imm_index(ctx, EA, 0); \
2793 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2794 tcg_temp_free(EA); \
2795 }
2796
2797 #define GEN_LDU(name, ldop, opc, type) \
2798 static void glue(gen_, name##u)(DisasContext *ctx) \
2799 { \
2800 TCGv EA; \
2801 if (unlikely(rA(ctx->opcode) == 0 || \
2802 rA(ctx->opcode) == rD(ctx->opcode))) { \
2803 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2804 return; \
2805 } \
2806 gen_set_access_type(ctx, ACCESS_INT); \
2807 EA = tcg_temp_new(); \
2808 if (type == PPC_64B) \
2809 gen_addr_imm_index(ctx, EA, 0x03); \
2810 else \
2811 gen_addr_imm_index(ctx, EA, 0); \
2812 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2813 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2814 tcg_temp_free(EA); \
2815 }
2816
2817 #define GEN_LDUX(name, ldop, opc2, opc3, type) \
2818 static void glue(gen_, name##ux)(DisasContext *ctx) \
2819 { \
2820 TCGv EA; \
2821 if (unlikely(rA(ctx->opcode) == 0 || \
2822 rA(ctx->opcode) == rD(ctx->opcode))) { \
2823 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2824 return; \
2825 } \
2826 gen_set_access_type(ctx, ACCESS_INT); \
2827 EA = tcg_temp_new(); \
2828 gen_addr_reg_index(ctx, EA); \
2829 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2830 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2831 tcg_temp_free(EA); \
2832 }
2833
2834 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2) \
2835 static void glue(gen_, name##x)(DisasContext *ctx) \
2836 { \
2837 TCGv EA; \
2838 gen_set_access_type(ctx, ACCESS_INT); \
2839 EA = tcg_temp_new(); \
2840 gen_addr_reg_index(ctx, EA); \
2841 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2842 tcg_temp_free(EA); \
2843 }
2844 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2845 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE)
2846
2847 #define GEN_LDS(name, ldop, op, type) \
2848 GEN_LD(name, ldop, op | 0x20, type); \
2849 GEN_LDU(name, ldop, op | 0x21, type); \
2850 GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
2851 GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2852
2853 /* lbz lbzu lbzux lbzx */
2854 GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER);
2855 /* lha lhau lhaux lhax */
2856 GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER);
2857 /* lhz lhzu lhzux lhzx */
2858 GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER);
2859 /* lwz lwzu lwzux lwzx */
2860 GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER);
2861 #if defined(TARGET_PPC64)
2862 /* lwaux */
2863 GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
2864 /* lwax */
2865 GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
2866 /* ldux */
2867 GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B);
2868 /* ldx */
2869 GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B);
2870
2871 static void gen_ld(DisasContext *ctx)
2872 {
2873 TCGv EA;
2874 if (Rc(ctx->opcode)) {
2875 if (unlikely(rA(ctx->opcode) == 0 ||
2876 rA(ctx->opcode) == rD(ctx->opcode))) {
2877 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2878 return;
2879 }
2880 }
2881 gen_set_access_type(ctx, ACCESS_INT);
2882 EA = tcg_temp_new();
2883 gen_addr_imm_index(ctx, EA, 0x03);
2884 if (ctx->opcode & 0x02) {
2885 /* lwa (lwau is undefined) */
2886 gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2887 } else {
2888 /* ld - ldu */
2889 gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2890 }
2891 if (Rc(ctx->opcode))
2892 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2893 tcg_temp_free(EA);
2894 }
2895
2896 /* lq */
2897 static void gen_lq(DisasContext *ctx)
2898 {
2899 int ra, rd;
2900 TCGv EA;
2901
2902 /* lq is a legal user mode instruction starting in ISA 2.07 */
2903 bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2904 bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2905
2906 if (!legal_in_user_mode && is_user_mode(ctx)) {
2907 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2908 return;
2909 }
2910
2911 if (!le_is_supported && ctx->le_mode) {
2912 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
2913 return;
2914 }
2915
2916 ra = rA(ctx->opcode);
2917 rd = rD(ctx->opcode);
2918 if (unlikely((rd & 1) || rd == ra)) {
2919 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2920 return;
2921 }
2922
2923 gen_set_access_type(ctx, ACCESS_INT);
2924 EA = tcg_temp_new();
2925 gen_addr_imm_index(ctx, EA, 0x0F);
2926
2927 /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
2928 64-bit byteswap already. */
2929 if (unlikely(ctx->le_mode)) {
2930 gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
2931 gen_addr_add(ctx, EA, EA, 8);
2932 gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
2933 } else {
2934 gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
2935 gen_addr_add(ctx, EA, EA, 8);
2936 gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
2937 }
2938 tcg_temp_free(EA);
2939 }
2940 #endif
2941
2942 /*** Integer store ***/
2943 #define GEN_ST(name, stop, opc, type) \
2944 static void glue(gen_, name)(DisasContext *ctx) \
2945 { \
2946 TCGv EA; \
2947 gen_set_access_type(ctx, ACCESS_INT); \
2948 EA = tcg_temp_new(); \
2949 gen_addr_imm_index(ctx, EA, 0); \
2950 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2951 tcg_temp_free(EA); \
2952 }
2953
2954 #define GEN_STU(name, stop, opc, type) \
2955 static void glue(gen_, stop##u)(DisasContext *ctx) \
2956 { \
2957 TCGv EA; \
2958 if (unlikely(rA(ctx->opcode) == 0)) { \
2959 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2960 return; \
2961 } \
2962 gen_set_access_type(ctx, ACCESS_INT); \
2963 EA = tcg_temp_new(); \
2964 if (type == PPC_64B) \
2965 gen_addr_imm_index(ctx, EA, 0x03); \
2966 else \
2967 gen_addr_imm_index(ctx, EA, 0); \
2968 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2969 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2970 tcg_temp_free(EA); \
2971 }
2972
2973 #define GEN_STUX(name, stop, opc2, opc3, type) \
2974 static void glue(gen_, name##ux)(DisasContext *ctx) \
2975 { \
2976 TCGv EA; \
2977 if (unlikely(rA(ctx->opcode) == 0)) { \
2978 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2979 return; \
2980 } \
2981 gen_set_access_type(ctx, ACCESS_INT); \
2982 EA = tcg_temp_new(); \
2983 gen_addr_reg_index(ctx, EA); \
2984 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2985 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2986 tcg_temp_free(EA); \
2987 }
2988
2989 #define GEN_STX_E(name, stop, opc2, opc3, type, type2) \
2990 static void glue(gen_, name##x)(DisasContext *ctx) \
2991 { \
2992 TCGv EA; \
2993 gen_set_access_type(ctx, ACCESS_INT); \
2994 EA = tcg_temp_new(); \
2995 gen_addr_reg_index(ctx, EA); \
2996 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2997 tcg_temp_free(EA); \
2998 }
2999 #define GEN_STX(name, stop, opc2, opc3, type) \
3000 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE)
3001
3002 #define GEN_STS(name, stop, op, type) \
3003 GEN_ST(name, stop, op | 0x20, type); \
3004 GEN_STU(name, stop, op | 0x21, type); \
3005 GEN_STUX(name, stop, 0x17, op | 0x01, type); \
3006 GEN_STX(name, stop, 0x17, op | 0x00, type)
3007
3008 /* stb stbu stbux stbx */
3009 GEN_STS(stb, st8, 0x06, PPC_INTEGER);
3010 /* sth sthu sthux sthx */
3011 GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
3012 /* stw stwu stwux stwx */
3013 GEN_STS(stw, st32, 0x04, PPC_INTEGER);
3014 #if defined(TARGET_PPC64)
3015 GEN_STUX(std, st64, 0x15, 0x05, PPC_64B);
3016 GEN_STX(std, st64, 0x15, 0x04, PPC_64B);
3017
3018 static void gen_std(DisasContext *ctx)
3019 {
3020 int rs;
3021 TCGv EA;
3022
3023 rs = rS(ctx->opcode);
3024 if ((ctx->opcode & 0x3) == 0x2) { /* stq */
3025
3026 bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
3027 bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
3028
3029 if (!legal_in_user_mode && is_user_mode(ctx)) {
3030 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
3031 return;
3032 }
3033
3034 if (!le_is_supported && ctx->le_mode) {
3035 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
3036 return;
3037 }
3038
3039 if (unlikely(rs & 1)) {
3040 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3041 return;
3042 }
3043 gen_set_access_type(ctx, ACCESS_INT);
3044 EA = tcg_temp_new();
3045 gen_addr_imm_index(ctx, EA, 0x03);
3046
3047 /* We only need to swap high and low halves. gen_qemu_st64 does
3048 necessary 64-bit byteswap already. */
3049 if (unlikely(ctx->le_mode)) {
3050 gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
3051 gen_addr_add(ctx, EA, EA, 8);
3052 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
3053 } else {
3054 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
3055 gen_addr_add(ctx, EA, EA, 8);
3056 gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
3057 }
3058 tcg_temp_free(EA);
3059 } else {
3060 /* std / stdu*/
3061 if (Rc(ctx->opcode)) {
3062 if (unlikely(rA(ctx->opcode) == 0)) {
3063 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3064 return;
3065 }
3066 }
3067 gen_set_access_type(ctx, ACCESS_INT);
3068 EA = tcg_temp_new();
3069 gen_addr_imm_index(ctx, EA, 0x03);
3070 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
3071 if (Rc(ctx->opcode))
3072 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
3073 tcg_temp_free(EA);
3074 }
3075 }
3076 #endif
3077 /*** Integer load and store with byte reverse ***/
3078
3079 /* lhbrx */
3080 static inline void gen_qemu_ld16ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
3081 {
3082 TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
3083 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
3084 }
3085 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
3086
3087 /* lwbrx */
3088 static inline void gen_qemu_ld32ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
3089 {
3090 TCGMemOp op = MO_UL | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
3091 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
3092 }
3093 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
3094
3095 #if defined(TARGET_PPC64)
3096 /* ldbrx */
3097 static inline void gen_qemu_ld64ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
3098 {
3099 TCGMemOp op = MO_Q | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
3100 tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
3101 }
3102 GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX);
3103 #endif /* TARGET_PPC64 */
3104
3105 /* sthbrx */<