tests: virtio-9p: rename PCI configuration test
[qemu.git] / target / ppc / translate.c
1 /*
2 * PowerPC emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg-op.h"
27 #include "qemu/host-utils.h"
28 #include "exec/cpu_ldst.h"
29
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35
36
37 #define CPU_SINGLE_STEP 0x1
38 #define CPU_BRANCH_STEP 0x2
39 #define GDBSTUB_SINGLE_STEP 0x4
40
41 /* Include definitions for instructions classes and implementations flags */
42 //#define PPC_DEBUG_DISAS
43 //#define DO_PPC_STATISTICS
44
45 #ifdef PPC_DEBUG_DISAS
46 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
47 #else
48 # define LOG_DISAS(...) do { } while (0)
49 #endif
50 /*****************************************************************************/
51 /* Code translation helpers */
52
53 /* global register indexes */
54 static TCGv_env cpu_env;
55 static char cpu_reg_names[10*3 + 22*4 /* GPR */
56 + 10*4 + 22*5 /* SPE GPRh */
57 + 10*4 + 22*5 /* FPR */
58 + 2*(10*6 + 22*7) /* AVRh, AVRl */
59 + 10*5 + 22*6 /* VSR */
60 + 8*5 /* CRF */];
61 static TCGv cpu_gpr[32];
62 static TCGv cpu_gprh[32];
63 static TCGv_i64 cpu_fpr[32];
64 static TCGv_i64 cpu_avrh[32], cpu_avrl[32];
65 static TCGv_i64 cpu_vsr[32];
66 static TCGv_i32 cpu_crf[8];
67 static TCGv cpu_nip;
68 static TCGv cpu_msr;
69 static TCGv cpu_ctr;
70 static TCGv cpu_lr;
71 #if defined(TARGET_PPC64)
72 static TCGv cpu_cfar;
73 #endif
74 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca;
75 static TCGv cpu_reserve;
76 static TCGv cpu_fpscr;
77 static TCGv_i32 cpu_access_type;
78
79 #include "exec/gen-icount.h"
80
81 void ppc_translate_init(void)
82 {
83 int i;
84 char* p;
85 size_t cpu_reg_names_size;
86 static int done_init = 0;
87
88 if (done_init)
89 return;
90
91 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
92 tcg_ctx.tcg_env = cpu_env;
93
94 p = cpu_reg_names;
95 cpu_reg_names_size = sizeof(cpu_reg_names);
96
97 for (i = 0; i < 8; i++) {
98 snprintf(p, cpu_reg_names_size, "crf%d", i);
99 cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
100 offsetof(CPUPPCState, crf[i]), p);
101 p += 5;
102 cpu_reg_names_size -= 5;
103 }
104
105 for (i = 0; i < 32; i++) {
106 snprintf(p, cpu_reg_names_size, "r%d", i);
107 cpu_gpr[i] = tcg_global_mem_new(cpu_env,
108 offsetof(CPUPPCState, gpr[i]), p);
109 p += (i < 10) ? 3 : 4;
110 cpu_reg_names_size -= (i < 10) ? 3 : 4;
111 snprintf(p, cpu_reg_names_size, "r%dH", i);
112 cpu_gprh[i] = tcg_global_mem_new(cpu_env,
113 offsetof(CPUPPCState, gprh[i]), p);
114 p += (i < 10) ? 4 : 5;
115 cpu_reg_names_size -= (i < 10) ? 4 : 5;
116
117 snprintf(p, cpu_reg_names_size, "fp%d", i);
118 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
119 offsetof(CPUPPCState, fpr[i]), p);
120 p += (i < 10) ? 4 : 5;
121 cpu_reg_names_size -= (i < 10) ? 4 : 5;
122
123 snprintf(p, cpu_reg_names_size, "avr%dH", i);
124 #ifdef HOST_WORDS_BIGENDIAN
125 cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
126 offsetof(CPUPPCState, avr[i].u64[0]), p);
127 #else
128 cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
129 offsetof(CPUPPCState, avr[i].u64[1]), p);
130 #endif
131 p += (i < 10) ? 6 : 7;
132 cpu_reg_names_size -= (i < 10) ? 6 : 7;
133
134 snprintf(p, cpu_reg_names_size, "avr%dL", i);
135 #ifdef HOST_WORDS_BIGENDIAN
136 cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
137 offsetof(CPUPPCState, avr[i].u64[1]), p);
138 #else
139 cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
140 offsetof(CPUPPCState, avr[i].u64[0]), p);
141 #endif
142 p += (i < 10) ? 6 : 7;
143 cpu_reg_names_size -= (i < 10) ? 6 : 7;
144 snprintf(p, cpu_reg_names_size, "vsr%d", i);
145 cpu_vsr[i] = tcg_global_mem_new_i64(cpu_env,
146 offsetof(CPUPPCState, vsr[i]), p);
147 p += (i < 10) ? 5 : 6;
148 cpu_reg_names_size -= (i < 10) ? 5 : 6;
149 }
150
151 cpu_nip = tcg_global_mem_new(cpu_env,
152 offsetof(CPUPPCState, nip), "nip");
153
154 cpu_msr = tcg_global_mem_new(cpu_env,
155 offsetof(CPUPPCState, msr), "msr");
156
157 cpu_ctr = tcg_global_mem_new(cpu_env,
158 offsetof(CPUPPCState, ctr), "ctr");
159
160 cpu_lr = tcg_global_mem_new(cpu_env,
161 offsetof(CPUPPCState, lr), "lr");
162
163 #if defined(TARGET_PPC64)
164 cpu_cfar = tcg_global_mem_new(cpu_env,
165 offsetof(CPUPPCState, cfar), "cfar");
166 #endif
167
168 cpu_xer = tcg_global_mem_new(cpu_env,
169 offsetof(CPUPPCState, xer), "xer");
170 cpu_so = tcg_global_mem_new(cpu_env,
171 offsetof(CPUPPCState, so), "SO");
172 cpu_ov = tcg_global_mem_new(cpu_env,
173 offsetof(CPUPPCState, ov), "OV");
174 cpu_ca = tcg_global_mem_new(cpu_env,
175 offsetof(CPUPPCState, ca), "CA");
176
177 cpu_reserve = tcg_global_mem_new(cpu_env,
178 offsetof(CPUPPCState, reserve_addr),
179 "reserve_addr");
180
181 cpu_fpscr = tcg_global_mem_new(cpu_env,
182 offsetof(CPUPPCState, fpscr), "fpscr");
183
184 cpu_access_type = tcg_global_mem_new_i32(cpu_env,
185 offsetof(CPUPPCState, access_type), "access_type");
186
187 done_init = 1;
188 }
189
190 /* internal defines */
191 struct DisasContext {
192 struct TranslationBlock *tb;
193 target_ulong nip;
194 uint32_t opcode;
195 uint32_t exception;
196 /* Routine used to access memory */
197 bool pr, hv, dr, le_mode;
198 bool lazy_tlb_flush;
199 bool need_access_type;
200 int mem_idx;
201 int access_type;
202 /* Translation flags */
203 TCGMemOp default_tcg_memop_mask;
204 #if defined(TARGET_PPC64)
205 bool sf_mode;
206 bool has_cfar;
207 #endif
208 bool fpu_enabled;
209 bool altivec_enabled;
210 bool vsx_enabled;
211 bool spe_enabled;
212 bool tm_enabled;
213 ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
214 int singlestep_enabled;
215 uint64_t insns_flags;
216 uint64_t insns_flags2;
217 };
218
219 /* Return true iff byteswap is needed in a scalar memop */
220 static inline bool need_byteswap(const DisasContext *ctx)
221 {
222 #if defined(TARGET_WORDS_BIGENDIAN)
223 return ctx->le_mode;
224 #else
225 return !ctx->le_mode;
226 #endif
227 }
228
229 /* True when active word size < size of target_long. */
230 #ifdef TARGET_PPC64
231 # define NARROW_MODE(C) (!(C)->sf_mode)
232 #else
233 # define NARROW_MODE(C) 0
234 #endif
235
236 struct opc_handler_t {
237 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
238 uint32_t inval1;
239 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
240 uint32_t inval2;
241 /* instruction type */
242 uint64_t type;
243 /* extended instruction type */
244 uint64_t type2;
245 /* handler */
246 void (*handler)(DisasContext *ctx);
247 #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
248 const char *oname;
249 #endif
250 #if defined(DO_PPC_STATISTICS)
251 uint64_t count;
252 #endif
253 };
254
255 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
256 {
257 if (ctx->need_access_type && ctx->access_type != access_type) {
258 tcg_gen_movi_i32(cpu_access_type, access_type);
259 ctx->access_type = access_type;
260 }
261 }
262
263 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
264 {
265 if (NARROW_MODE(ctx)) {
266 nip = (uint32_t)nip;
267 }
268 tcg_gen_movi_tl(cpu_nip, nip);
269 }
270
271 static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
272 {
273 TCGv_i32 t0, t1;
274
275 /* These are all synchronous exceptions, we set the PC back to
276 * the faulting instruction
277 */
278 if (ctx->exception == POWERPC_EXCP_NONE) {
279 gen_update_nip(ctx, ctx->nip - 4);
280 }
281 t0 = tcg_const_i32(excp);
282 t1 = tcg_const_i32(error);
283 gen_helper_raise_exception_err(cpu_env, t0, t1);
284 tcg_temp_free_i32(t0);
285 tcg_temp_free_i32(t1);
286 ctx->exception = (excp);
287 }
288
289 static void gen_exception(DisasContext *ctx, uint32_t excp)
290 {
291 TCGv_i32 t0;
292
293 /* These are all synchronous exceptions, we set the PC back to
294 * the faulting instruction
295 */
296 if (ctx->exception == POWERPC_EXCP_NONE) {
297 gen_update_nip(ctx, ctx->nip - 4);
298 }
299 t0 = tcg_const_i32(excp);
300 gen_helper_raise_exception(cpu_env, t0);
301 tcg_temp_free_i32(t0);
302 ctx->exception = (excp);
303 }
304
305 static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
306 target_ulong nip)
307 {
308 TCGv_i32 t0;
309
310 gen_update_nip(ctx, nip);
311 t0 = tcg_const_i32(excp);
312 gen_helper_raise_exception(cpu_env, t0);
313 tcg_temp_free_i32(t0);
314 ctx->exception = (excp);
315 }
316
317 static void gen_debug_exception(DisasContext *ctx)
318 {
319 TCGv_i32 t0;
320
321 /* These are all synchronous exceptions, we set the PC back to
322 * the faulting instruction
323 */
324 if ((ctx->exception != POWERPC_EXCP_BRANCH) &&
325 (ctx->exception != POWERPC_EXCP_SYNC)) {
326 gen_update_nip(ctx, ctx->nip);
327 }
328 t0 = tcg_const_i32(EXCP_DEBUG);
329 gen_helper_raise_exception(cpu_env, t0);
330 tcg_temp_free_i32(t0);
331 }
332
333 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
334 {
335 /* Will be converted to program check if needed */
336 gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error);
337 }
338
339 static inline void gen_priv_exception(DisasContext *ctx, uint32_t error)
340 {
341 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error);
342 }
343
344 static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error)
345 {
346 /* Will be converted to program check if needed */
347 gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error);
348 }
349
350 /* Stop translation */
351 static inline void gen_stop_exception(DisasContext *ctx)
352 {
353 gen_update_nip(ctx, ctx->nip);
354 ctx->exception = POWERPC_EXCP_STOP;
355 }
356
357 #ifndef CONFIG_USER_ONLY
358 /* No need to update nip here, as execution flow will change */
359 static inline void gen_sync_exception(DisasContext *ctx)
360 {
361 ctx->exception = POWERPC_EXCP_SYNC;
362 }
363 #endif
364
365 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
366 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
367
368 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
369 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
370
371 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
372 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
373
374 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
375 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
376
377 #define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2) \
378 GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2)
379
380 #define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \
381 GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2)
382
383 typedef struct opcode_t {
384 unsigned char opc1, opc2, opc3, opc4;
385 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
386 unsigned char pad[4];
387 #endif
388 opc_handler_t handler;
389 const char *oname;
390 } opcode_t;
391
392 /* Helpers for priv. check */
393 #define GEN_PRIV \
394 do { \
395 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; \
396 } while (0)
397
398 #if defined(CONFIG_USER_ONLY)
399 #define CHK_HV GEN_PRIV
400 #define CHK_SV GEN_PRIV
401 #define CHK_HVRM GEN_PRIV
402 #else
403 #define CHK_HV \
404 do { \
405 if (unlikely(ctx->pr || !ctx->hv)) { \
406 GEN_PRIV; \
407 } \
408 } while (0)
409 #define CHK_SV \
410 do { \
411 if (unlikely(ctx->pr)) { \
412 GEN_PRIV; \
413 } \
414 } while (0)
415 #define CHK_HVRM \
416 do { \
417 if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \
418 GEN_PRIV; \
419 } \
420 } while (0)
421 #endif
422
423 #define CHK_NONE
424
425
426 /*****************************************************************************/
427 /*** Instruction decoding ***/
428 #define EXTRACT_HELPER(name, shift, nb) \
429 static inline uint32_t name(uint32_t opcode) \
430 { \
431 return (opcode >> (shift)) & ((1 << (nb)) - 1); \
432 }
433
434 #define EXTRACT_SHELPER(name, shift, nb) \
435 static inline int32_t name(uint32_t opcode) \
436 { \
437 return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
438 }
439
440 #define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \
441 static inline uint32_t name(uint32_t opcode) \
442 { \
443 return (((opcode >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
444 ((opcode >> (shift2)) & ((1 << (nb2)) - 1)); \
445 }
446
447 #define EXTRACT_HELPER_DXFORM(name, \
448 d0_bits, shift_op_d0, shift_d0, \
449 d1_bits, shift_op_d1, shift_d1, \
450 d2_bits, shift_op_d2, shift_d2) \
451 static inline int16_t name(uint32_t opcode) \
452 { \
453 return \
454 (((opcode >> (shift_op_d0)) & ((1 << (d0_bits)) - 1)) << (shift_d0)) | \
455 (((opcode >> (shift_op_d1)) & ((1 << (d1_bits)) - 1)) << (shift_d1)) | \
456 (((opcode >> (shift_op_d2)) & ((1 << (d2_bits)) - 1)) << (shift_d2)); \
457 }
458
459
460 /* Opcode part 1 */
461 EXTRACT_HELPER(opc1, 26, 6);
462 /* Opcode part 2 */
463 EXTRACT_HELPER(opc2, 1, 5);
464 /* Opcode part 3 */
465 EXTRACT_HELPER(opc3, 6, 5);
466 /* Opcode part 4 */
467 EXTRACT_HELPER(opc4, 16, 5);
468 /* Update Cr0 flags */
469 EXTRACT_HELPER(Rc, 0, 1);
470 /* Update Cr6 flags (Altivec) */
471 EXTRACT_HELPER(Rc21, 10, 1);
472 /* Destination */
473 EXTRACT_HELPER(rD, 21, 5);
474 /* Source */
475 EXTRACT_HELPER(rS, 21, 5);
476 /* First operand */
477 EXTRACT_HELPER(rA, 16, 5);
478 /* Second operand */
479 EXTRACT_HELPER(rB, 11, 5);
480 /* Third operand */
481 EXTRACT_HELPER(rC, 6, 5);
482 /*** Get CRn ***/
483 EXTRACT_HELPER(crfD, 23, 3);
484 EXTRACT_HELPER(crfS, 18, 3);
485 EXTRACT_HELPER(crbD, 21, 5);
486 EXTRACT_HELPER(crbA, 16, 5);
487 EXTRACT_HELPER(crbB, 11, 5);
488 /* SPR / TBL */
489 EXTRACT_HELPER(_SPR, 11, 10);
490 static inline uint32_t SPR(uint32_t opcode)
491 {
492 uint32_t sprn = _SPR(opcode);
493
494 return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
495 }
496 /*** Get constants ***/
497 /* 16 bits signed immediate value */
498 EXTRACT_SHELPER(SIMM, 0, 16);
499 /* 16 bits unsigned immediate value */
500 EXTRACT_HELPER(UIMM, 0, 16);
501 /* 5 bits signed immediate value */
502 EXTRACT_HELPER(SIMM5, 16, 5);
503 /* 5 bits signed immediate value */
504 EXTRACT_HELPER(UIMM5, 16, 5);
505 /* 4 bits unsigned immediate value */
506 EXTRACT_HELPER(UIMM4, 16, 4);
507 /* Bit count */
508 EXTRACT_HELPER(NB, 11, 5);
509 /* Shift count */
510 EXTRACT_HELPER(SH, 11, 5);
511 /* Vector shift count */
512 EXTRACT_HELPER(VSH, 6, 4);
513 /* Mask start */
514 EXTRACT_HELPER(MB, 6, 5);
515 /* Mask end */
516 EXTRACT_HELPER(ME, 1, 5);
517 /* Trap operand */
518 EXTRACT_HELPER(TO, 21, 5);
519
520 EXTRACT_HELPER(CRM, 12, 8);
521
522 #ifndef CONFIG_USER_ONLY
523 EXTRACT_HELPER(SR, 16, 4);
524 #endif
525
526 /* mtfsf/mtfsfi */
527 EXTRACT_HELPER(FPBF, 23, 3);
528 EXTRACT_HELPER(FPIMM, 12, 4);
529 EXTRACT_HELPER(FPL, 25, 1);
530 EXTRACT_HELPER(FPFLM, 17, 8);
531 EXTRACT_HELPER(FPW, 16, 1);
532
533 /* addpcis */
534 EXTRACT_HELPER_DXFORM(DX, 10, 6, 6, 5, 16, 1, 1, 0, 0)
535 #if defined(TARGET_PPC64)
536 /* darn */
537 EXTRACT_HELPER(L, 16, 2);
538 #endif
539
540 /*** Jump target decoding ***/
541 /* Immediate address */
542 static inline target_ulong LI(uint32_t opcode)
543 {
544 return (opcode >> 0) & 0x03FFFFFC;
545 }
546
547 static inline uint32_t BD(uint32_t opcode)
548 {
549 return (opcode >> 0) & 0xFFFC;
550 }
551
552 EXTRACT_HELPER(BO, 21, 5);
553 EXTRACT_HELPER(BI, 16, 5);
554 /* Absolute/relative address */
555 EXTRACT_HELPER(AA, 1, 1);
556 /* Link */
557 EXTRACT_HELPER(LK, 0, 1);
558
559 /* DFP Z22-form */
560 EXTRACT_HELPER(DCM, 10, 6)
561
562 /* DFP Z23-form */
563 EXTRACT_HELPER(RMC, 9, 2)
564
565 EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5);
566 EXTRACT_HELPER_SPLIT(xS, 0, 1, 21, 5);
567 EXTRACT_HELPER_SPLIT(xA, 2, 1, 16, 5);
568 EXTRACT_HELPER_SPLIT(xB, 1, 1, 11, 5);
569 EXTRACT_HELPER_SPLIT(xC, 3, 1, 6, 5);
570 EXTRACT_HELPER(DM, 8, 2);
571 EXTRACT_HELPER(UIM, 16, 2);
572 EXTRACT_HELPER(SHW, 8, 2);
573 EXTRACT_HELPER(SP, 19, 2);
574 EXTRACT_HELPER(IMM8, 11, 8);
575
576 /*****************************************************************************/
577 /* PowerPC instructions table */
578
579 #if defined(DO_PPC_STATISTICS)
580 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
581 { \
582 .opc1 = op1, \
583 .opc2 = op2, \
584 .opc3 = op3, \
585 .opc4 = 0xff, \
586 .handler = { \
587 .inval1 = invl, \
588 .type = _typ, \
589 .type2 = _typ2, \
590 .handler = &gen_##name, \
591 .oname = stringify(name), \
592 }, \
593 .oname = stringify(name), \
594 }
595 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
596 { \
597 .opc1 = op1, \
598 .opc2 = op2, \
599 .opc3 = op3, \
600 .opc4 = 0xff, \
601 .handler = { \
602 .inval1 = invl1, \
603 .inval2 = invl2, \
604 .type = _typ, \
605 .type2 = _typ2, \
606 .handler = &gen_##name, \
607 .oname = stringify(name), \
608 }, \
609 .oname = stringify(name), \
610 }
611 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
612 { \
613 .opc1 = op1, \
614 .opc2 = op2, \
615 .opc3 = op3, \
616 .opc4 = 0xff, \
617 .handler = { \
618 .inval1 = invl, \
619 .type = _typ, \
620 .type2 = _typ2, \
621 .handler = &gen_##name, \
622 .oname = onam, \
623 }, \
624 .oname = onam, \
625 }
626 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \
627 { \
628 .opc1 = op1, \
629 .opc2 = op2, \
630 .opc3 = op3, \
631 .opc4 = op4, \
632 .handler = { \
633 .inval1 = invl, \
634 .type = _typ, \
635 .type2 = _typ2, \
636 .handler = &gen_##name, \
637 .oname = stringify(name), \
638 }, \
639 .oname = stringify(name), \
640 }
641 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \
642 { \
643 .opc1 = op1, \
644 .opc2 = op2, \
645 .opc3 = op3, \
646 .opc4 = op4, \
647 .handler = { \
648 .inval1 = invl, \
649 .type = _typ, \
650 .type2 = _typ2, \
651 .handler = &gen_##name, \
652 .oname = onam, \
653 }, \
654 .oname = onam, \
655 }
656 #else
657 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
658 { \
659 .opc1 = op1, \
660 .opc2 = op2, \
661 .opc3 = op3, \
662 .opc4 = 0xff, \
663 .handler = { \
664 .inval1 = invl, \
665 .type = _typ, \
666 .type2 = _typ2, \
667 .handler = &gen_##name, \
668 }, \
669 .oname = stringify(name), \
670 }
671 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
672 { \
673 .opc1 = op1, \
674 .opc2 = op2, \
675 .opc3 = op3, \
676 .opc4 = 0xff, \
677 .handler = { \
678 .inval1 = invl1, \
679 .inval2 = invl2, \
680 .type = _typ, \
681 .type2 = _typ2, \
682 .handler = &gen_##name, \
683 }, \
684 .oname = stringify(name), \
685 }
686 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
687 { \
688 .opc1 = op1, \
689 .opc2 = op2, \
690 .opc3 = op3, \
691 .opc4 = 0xff, \
692 .handler = { \
693 .inval1 = invl, \
694 .type = _typ, \
695 .type2 = _typ2, \
696 .handler = &gen_##name, \
697 }, \
698 .oname = onam, \
699 }
700 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \
701 { \
702 .opc1 = op1, \
703 .opc2 = op2, \
704 .opc3 = op3, \
705 .opc4 = op4, \
706 .handler = { \
707 .inval1 = invl, \
708 .type = _typ, \
709 .type2 = _typ2, \
710 .handler = &gen_##name, \
711 }, \
712 .oname = stringify(name), \
713 }
714 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \
715 { \
716 .opc1 = op1, \
717 .opc2 = op2, \
718 .opc3 = op3, \
719 .opc4 = op4, \
720 .handler = { \
721 .inval1 = invl, \
722 .type = _typ, \
723 .type2 = _typ2, \
724 .handler = &gen_##name, \
725 }, \
726 .oname = onam, \
727 }
728 #endif
729
730 /* SPR load/store helpers */
731 static inline void gen_load_spr(TCGv t, int reg)
732 {
733 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
734 }
735
736 static inline void gen_store_spr(int reg, TCGv t)
737 {
738 tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
739 }
740
741 /* Invalid instruction */
742 static void gen_invalid(DisasContext *ctx)
743 {
744 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
745 }
746
747 static opc_handler_t invalid_handler = {
748 .inval1 = 0xFFFFFFFF,
749 .inval2 = 0xFFFFFFFF,
750 .type = PPC_NONE,
751 .type2 = PPC_NONE,
752 .handler = gen_invalid,
753 };
754
755 /*** Integer comparison ***/
756
757 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
758 {
759 TCGv t0 = tcg_temp_new();
760 TCGv_i32 t1 = tcg_temp_new_i32();
761
762 tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
763
764 tcg_gen_setcond_tl((s ? TCG_COND_LT: TCG_COND_LTU), t0, arg0, arg1);
765 tcg_gen_trunc_tl_i32(t1, t0);
766 tcg_gen_shli_i32(t1, t1, CRF_LT);
767 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
768
769 tcg_gen_setcond_tl((s ? TCG_COND_GT: TCG_COND_GTU), t0, arg0, arg1);
770 tcg_gen_trunc_tl_i32(t1, t0);
771 tcg_gen_shli_i32(t1, t1, CRF_GT);
772 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
773
774 tcg_gen_setcond_tl(TCG_COND_EQ, t0, arg0, arg1);
775 tcg_gen_trunc_tl_i32(t1, t0);
776 tcg_gen_shli_i32(t1, t1, CRF_EQ);
777 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
778
779 tcg_temp_free(t0);
780 tcg_temp_free_i32(t1);
781 }
782
783 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
784 {
785 TCGv t0 = tcg_const_tl(arg1);
786 gen_op_cmp(arg0, t0, s, crf);
787 tcg_temp_free(t0);
788 }
789
790 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
791 {
792 TCGv t0, t1;
793 t0 = tcg_temp_new();
794 t1 = tcg_temp_new();
795 if (s) {
796 tcg_gen_ext32s_tl(t0, arg0);
797 tcg_gen_ext32s_tl(t1, arg1);
798 } else {
799 tcg_gen_ext32u_tl(t0, arg0);
800 tcg_gen_ext32u_tl(t1, arg1);
801 }
802 gen_op_cmp(t0, t1, s, crf);
803 tcg_temp_free(t1);
804 tcg_temp_free(t0);
805 }
806
807 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
808 {
809 TCGv t0 = tcg_const_tl(arg1);
810 gen_op_cmp32(arg0, t0, s, crf);
811 tcg_temp_free(t0);
812 }
813
814 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
815 {
816 if (NARROW_MODE(ctx)) {
817 gen_op_cmpi32(reg, 0, 1, 0);
818 } else {
819 gen_op_cmpi(reg, 0, 1, 0);
820 }
821 }
822
823 /* cmp */
824 static void gen_cmp(DisasContext *ctx)
825 {
826 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
827 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
828 1, crfD(ctx->opcode));
829 } else {
830 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
831 1, crfD(ctx->opcode));
832 }
833 }
834
835 /* cmpi */
836 static void gen_cmpi(DisasContext *ctx)
837 {
838 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
839 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
840 1, crfD(ctx->opcode));
841 } else {
842 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
843 1, crfD(ctx->opcode));
844 }
845 }
846
847 /* cmpl */
848 static void gen_cmpl(DisasContext *ctx)
849 {
850 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
851 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
852 0, crfD(ctx->opcode));
853 } else {
854 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
855 0, crfD(ctx->opcode));
856 }
857 }
858
859 /* cmpli */
860 static void gen_cmpli(DisasContext *ctx)
861 {
862 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
863 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
864 0, crfD(ctx->opcode));
865 } else {
866 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
867 0, crfD(ctx->opcode));
868 }
869 }
870
871 /* cmprb - range comparison: isupper, isaplha, islower*/
872 static void gen_cmprb(DisasContext *ctx)
873 {
874 TCGv_i32 src1 = tcg_temp_new_i32();
875 TCGv_i32 src2 = tcg_temp_new_i32();
876 TCGv_i32 src2lo = tcg_temp_new_i32();
877 TCGv_i32 src2hi = tcg_temp_new_i32();
878 TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)];
879
880 tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]);
881 tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]);
882
883 tcg_gen_andi_i32(src1, src1, 0xFF);
884 tcg_gen_ext8u_i32(src2lo, src2);
885 tcg_gen_shri_i32(src2, src2, 8);
886 tcg_gen_ext8u_i32(src2hi, src2);
887
888 tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
889 tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
890 tcg_gen_and_i32(crf, src2lo, src2hi);
891
892 if (ctx->opcode & 0x00200000) {
893 tcg_gen_shri_i32(src2, src2, 8);
894 tcg_gen_ext8u_i32(src2lo, src2);
895 tcg_gen_shri_i32(src2, src2, 8);
896 tcg_gen_ext8u_i32(src2hi, src2);
897 tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
898 tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
899 tcg_gen_and_i32(src2lo, src2lo, src2hi);
900 tcg_gen_or_i32(crf, crf, src2lo);
901 }
902 tcg_gen_shli_i32(crf, crf, CRF_GT);
903 tcg_temp_free_i32(src1);
904 tcg_temp_free_i32(src2);
905 tcg_temp_free_i32(src2lo);
906 tcg_temp_free_i32(src2hi);
907 }
908
909 #if defined(TARGET_PPC64)
910 /* cmpeqb */
911 static void gen_cmpeqb(DisasContext *ctx)
912 {
913 gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
914 cpu_gpr[rB(ctx->opcode)]);
915 }
916 #endif
917
918 /* isel (PowerPC 2.03 specification) */
919 static void gen_isel(DisasContext *ctx)
920 {
921 uint32_t bi = rC(ctx->opcode);
922 uint32_t mask = 0x08 >> (bi & 0x03);
923 TCGv t0 = tcg_temp_new();
924 TCGv zr;
925
926 tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
927 tcg_gen_andi_tl(t0, t0, mask);
928
929 zr = tcg_const_tl(0);
930 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
931 rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
932 cpu_gpr[rB(ctx->opcode)]);
933 tcg_temp_free(zr);
934 tcg_temp_free(t0);
935 }
936
937 /* cmpb: PowerPC 2.05 specification */
938 static void gen_cmpb(DisasContext *ctx)
939 {
940 gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
941 cpu_gpr[rB(ctx->opcode)]);
942 }
943
944 /*** Integer arithmetic ***/
945
946 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
947 TCGv arg1, TCGv arg2, int sub)
948 {
949 TCGv t0 = tcg_temp_new();
950
951 tcg_gen_xor_tl(cpu_ov, arg0, arg2);
952 tcg_gen_xor_tl(t0, arg1, arg2);
953 if (sub) {
954 tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
955 } else {
956 tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
957 }
958 tcg_temp_free(t0);
959 if (NARROW_MODE(ctx)) {
960 tcg_gen_ext32s_tl(cpu_ov, cpu_ov);
961 }
962 tcg_gen_shri_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1);
963 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
964 }
965
966 /* Common add function */
967 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
968 TCGv arg2, bool add_ca, bool compute_ca,
969 bool compute_ov, bool compute_rc0)
970 {
971 TCGv t0 = ret;
972
973 if (compute_ca || compute_ov) {
974 t0 = tcg_temp_new();
975 }
976
977 if (compute_ca) {
978 if (NARROW_MODE(ctx)) {
979 /* Caution: a non-obvious corner case of the spec is that we
980 must produce the *entire* 64-bit addition, but produce the
981 carry into bit 32. */
982 TCGv t1 = tcg_temp_new();
983 tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */
984 tcg_gen_add_tl(t0, arg1, arg2);
985 if (add_ca) {
986 tcg_gen_add_tl(t0, t0, cpu_ca);
987 }
988 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changed w/ carry */
989 tcg_temp_free(t1);
990 tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
991 tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
992 } else {
993 TCGv zero = tcg_const_tl(0);
994 if (add_ca) {
995 tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, cpu_ca, zero);
996 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, arg2, zero);
997 } else {
998 tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, arg2, zero);
999 }
1000 tcg_temp_free(zero);
1001 }
1002 } else {
1003 tcg_gen_add_tl(t0, arg1, arg2);
1004 if (add_ca) {
1005 tcg_gen_add_tl(t0, t0, cpu_ca);
1006 }
1007 }
1008
1009 if (compute_ov) {
1010 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
1011 }
1012 if (unlikely(compute_rc0)) {
1013 gen_set_Rc0(ctx, t0);
1014 }
1015
1016 if (!TCGV_EQUAL(t0, ret)) {
1017 tcg_gen_mov_tl(ret, t0);
1018 tcg_temp_free(t0);
1019 }
1020 }
1021 /* Add functions with two operands */
1022 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
1023 static void glue(gen_, name)(DisasContext *ctx) \
1024 { \
1025 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
1026 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1027 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1028 }
1029 /* Add functions with one operand and one immediate */
1030 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
1031 add_ca, compute_ca, compute_ov) \
1032 static void glue(gen_, name)(DisasContext *ctx) \
1033 { \
1034 TCGv t0 = tcg_const_tl(const_val); \
1035 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
1036 cpu_gpr[rA(ctx->opcode)], t0, \
1037 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1038 tcg_temp_free(t0); \
1039 }
1040
1041 /* add add. addo addo. */
1042 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
1043 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
1044 /* addc addc. addco addco. */
1045 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
1046 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
1047 /* adde adde. addeo addeo. */
1048 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
1049 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
1050 /* addme addme. addmeo addmeo. */
1051 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
1052 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
1053 /* addze addze. addzeo addzeo.*/
1054 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
1055 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
1056 /* addi */
1057 static void gen_addi(DisasContext *ctx)
1058 {
1059 target_long simm = SIMM(ctx->opcode);
1060
1061 if (rA(ctx->opcode) == 0) {
1062 /* li case */
1063 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm);
1064 } else {
1065 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
1066 cpu_gpr[rA(ctx->opcode)], simm);
1067 }
1068 }
1069 /* addic addic.*/
1070 static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
1071 {
1072 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
1073 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1074 c, 0, 1, 0, compute_rc0);
1075 tcg_temp_free(c);
1076 }
1077
1078 static void gen_addic(DisasContext *ctx)
1079 {
1080 gen_op_addic(ctx, 0);
1081 }
1082
1083 static void gen_addic_(DisasContext *ctx)
1084 {
1085 gen_op_addic(ctx, 1);
1086 }
1087
1088 /* addis */
1089 static void gen_addis(DisasContext *ctx)
1090 {
1091 target_long simm = SIMM(ctx->opcode);
1092
1093 if (rA(ctx->opcode) == 0) {
1094 /* lis case */
1095 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm << 16);
1096 } else {
1097 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
1098 cpu_gpr[rA(ctx->opcode)], simm << 16);
1099 }
1100 }
1101
1102 /* addpcis */
1103 static void gen_addpcis(DisasContext *ctx)
1104 {
1105 target_long d = DX(ctx->opcode);
1106
1107 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], ctx->nip + (d << 16));
1108 }
1109
1110 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
1111 TCGv arg2, int sign, int compute_ov)
1112 {
1113 TCGv_i32 t0 = tcg_temp_new_i32();
1114 TCGv_i32 t1 = tcg_temp_new_i32();
1115 TCGv_i32 t2 = tcg_temp_new_i32();
1116 TCGv_i32 t3 = tcg_temp_new_i32();
1117
1118 tcg_gen_trunc_tl_i32(t0, arg1);
1119 tcg_gen_trunc_tl_i32(t1, arg2);
1120 if (sign) {
1121 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1122 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1123 tcg_gen_and_i32(t2, t2, t3);
1124 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1125 tcg_gen_or_i32(t2, t2, t3);
1126 tcg_gen_movi_i32(t3, 0);
1127 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1128 tcg_gen_div_i32(t3, t0, t1);
1129 tcg_gen_extu_i32_tl(ret, t3);
1130 } else {
1131 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0);
1132 tcg_gen_movi_i32(t3, 0);
1133 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1134 tcg_gen_divu_i32(t3, t0, t1);
1135 tcg_gen_extu_i32_tl(ret, t3);
1136 }
1137 if (compute_ov) {
1138 tcg_gen_extu_i32_tl(cpu_ov, t2);
1139 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1140 }
1141 tcg_temp_free_i32(t0);
1142 tcg_temp_free_i32(t1);
1143 tcg_temp_free_i32(t2);
1144 tcg_temp_free_i32(t3);
1145
1146 if (unlikely(Rc(ctx->opcode) != 0))
1147 gen_set_Rc0(ctx, ret);
1148 }
1149 /* Div functions */
1150 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
1151 static void glue(gen_, name)(DisasContext *ctx) \
1152 { \
1153 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
1154 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1155 sign, compute_ov); \
1156 }
1157 /* divwu divwu. divwuo divwuo. */
1158 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
1159 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
1160 /* divw divw. divwo divwo. */
1161 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
1162 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
1163
1164 /* div[wd]eu[o][.] */
1165 #define GEN_DIVE(name, hlpr, compute_ov) \
1166 static void gen_##name(DisasContext *ctx) \
1167 { \
1168 TCGv_i32 t0 = tcg_const_i32(compute_ov); \
1169 gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
1170 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1171 tcg_temp_free_i32(t0); \
1172 if (unlikely(Rc(ctx->opcode) != 0)) { \
1173 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
1174 } \
1175 }
1176
1177 GEN_DIVE(divweu, divweu, 0);
1178 GEN_DIVE(divweuo, divweu, 1);
1179 GEN_DIVE(divwe, divwe, 0);
1180 GEN_DIVE(divweo, divwe, 1);
1181
1182 #if defined(TARGET_PPC64)
1183 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
1184 TCGv arg2, int sign, int compute_ov)
1185 {
1186 TCGv_i64 t0 = tcg_temp_new_i64();
1187 TCGv_i64 t1 = tcg_temp_new_i64();
1188 TCGv_i64 t2 = tcg_temp_new_i64();
1189 TCGv_i64 t3 = tcg_temp_new_i64();
1190
1191 tcg_gen_mov_i64(t0, arg1);
1192 tcg_gen_mov_i64(t1, arg2);
1193 if (sign) {
1194 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1195 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1196 tcg_gen_and_i64(t2, t2, t3);
1197 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1198 tcg_gen_or_i64(t2, t2, t3);
1199 tcg_gen_movi_i64(t3, 0);
1200 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1201 tcg_gen_div_i64(ret, t0, t1);
1202 } else {
1203 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0);
1204 tcg_gen_movi_i64(t3, 0);
1205 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1206 tcg_gen_divu_i64(ret, t0, t1);
1207 }
1208 if (compute_ov) {
1209 tcg_gen_mov_tl(cpu_ov, t2);
1210 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1211 }
1212 tcg_temp_free_i64(t0);
1213 tcg_temp_free_i64(t1);
1214 tcg_temp_free_i64(t2);
1215 tcg_temp_free_i64(t3);
1216
1217 if (unlikely(Rc(ctx->opcode) != 0))
1218 gen_set_Rc0(ctx, ret);
1219 }
1220
1221 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1222 static void glue(gen_, name)(DisasContext *ctx) \
1223 { \
1224 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1225 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1226 sign, compute_ov); \
1227 }
1228 /* divwu divwu. divwuo divwuo. */
1229 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1230 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1231 /* divw divw. divwo divwo. */
1232 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1233 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1234
1235 GEN_DIVE(divdeu, divdeu, 0);
1236 GEN_DIVE(divdeuo, divdeu, 1);
1237 GEN_DIVE(divde, divde, 0);
1238 GEN_DIVE(divdeo, divde, 1);
1239 #endif
1240
1241 static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1,
1242 TCGv arg2, int sign)
1243 {
1244 TCGv_i32 t0 = tcg_temp_new_i32();
1245 TCGv_i32 t1 = tcg_temp_new_i32();
1246
1247 tcg_gen_trunc_tl_i32(t0, arg1);
1248 tcg_gen_trunc_tl_i32(t1, arg2);
1249 if (sign) {
1250 TCGv_i32 t2 = tcg_temp_new_i32();
1251 TCGv_i32 t3 = tcg_temp_new_i32();
1252 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1253 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1254 tcg_gen_and_i32(t2, t2, t3);
1255 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1256 tcg_gen_or_i32(t2, t2, t3);
1257 tcg_gen_movi_i32(t3, 0);
1258 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1259 tcg_gen_rem_i32(t3, t0, t1);
1260 tcg_gen_ext_i32_tl(ret, t3);
1261 tcg_temp_free_i32(t2);
1262 tcg_temp_free_i32(t3);
1263 } else {
1264 TCGv_i32 t2 = tcg_const_i32(1);
1265 TCGv_i32 t3 = tcg_const_i32(0);
1266 tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
1267 tcg_gen_remu_i32(t3, t0, t1);
1268 tcg_gen_extu_i32_tl(ret, t3);
1269 tcg_temp_free_i32(t2);
1270 tcg_temp_free_i32(t3);
1271 }
1272 tcg_temp_free_i32(t0);
1273 tcg_temp_free_i32(t1);
1274 }
1275
1276 #define GEN_INT_ARITH_MODW(name, opc3, sign) \
1277 static void glue(gen_, name)(DisasContext *ctx) \
1278 { \
1279 gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)], \
1280 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1281 sign); \
1282 }
1283
1284 GEN_INT_ARITH_MODW(moduw, 0x08, 0);
1285 GEN_INT_ARITH_MODW(modsw, 0x18, 1);
1286
1287 #if defined(TARGET_PPC64)
1288 static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1,
1289 TCGv arg2, int sign)
1290 {
1291 TCGv_i64 t0 = tcg_temp_new_i64();
1292 TCGv_i64 t1 = tcg_temp_new_i64();
1293
1294 tcg_gen_mov_i64(t0, arg1);
1295 tcg_gen_mov_i64(t1, arg2);
1296 if (sign) {
1297 TCGv_i64 t2 = tcg_temp_new_i64();
1298 TCGv_i64 t3 = tcg_temp_new_i64();
1299 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1300 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1301 tcg_gen_and_i64(t2, t2, t3);
1302 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1303 tcg_gen_or_i64(t2, t2, t3);
1304 tcg_gen_movi_i64(t3, 0);
1305 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1306 tcg_gen_rem_i64(ret, t0, t1);
1307 tcg_temp_free_i64(t2);
1308 tcg_temp_free_i64(t3);
1309 } else {
1310 TCGv_i64 t2 = tcg_const_i64(1);
1311 TCGv_i64 t3 = tcg_const_i64(0);
1312 tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
1313 tcg_gen_remu_i64(ret, t0, t1);
1314 tcg_temp_free_i64(t2);
1315 tcg_temp_free_i64(t3);
1316 }
1317 tcg_temp_free_i64(t0);
1318 tcg_temp_free_i64(t1);
1319 }
1320
1321 #define GEN_INT_ARITH_MODD(name, opc3, sign) \
1322 static void glue(gen_, name)(DisasContext *ctx) \
1323 { \
1324 gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)], \
1325 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1326 sign); \
1327 }
1328
1329 GEN_INT_ARITH_MODD(modud, 0x08, 0);
1330 GEN_INT_ARITH_MODD(modsd, 0x18, 1);
1331 #endif
1332
1333 /* mulhw mulhw. */
1334 static void gen_mulhw(DisasContext *ctx)
1335 {
1336 TCGv_i32 t0 = tcg_temp_new_i32();
1337 TCGv_i32 t1 = tcg_temp_new_i32();
1338
1339 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1340 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1341 tcg_gen_muls2_i32(t0, t1, t0, t1);
1342 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1343 tcg_temp_free_i32(t0);
1344 tcg_temp_free_i32(t1);
1345 if (unlikely(Rc(ctx->opcode) != 0))
1346 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1347 }
1348
1349 /* mulhwu mulhwu. */
1350 static void gen_mulhwu(DisasContext *ctx)
1351 {
1352 TCGv_i32 t0 = tcg_temp_new_i32();
1353 TCGv_i32 t1 = tcg_temp_new_i32();
1354
1355 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1356 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1357 tcg_gen_mulu2_i32(t0, t1, t0, t1);
1358 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1359 tcg_temp_free_i32(t0);
1360 tcg_temp_free_i32(t1);
1361 if (unlikely(Rc(ctx->opcode) != 0))
1362 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1363 }
1364
1365 /* mullw mullw. */
1366 static void gen_mullw(DisasContext *ctx)
1367 {
1368 #if defined(TARGET_PPC64)
1369 TCGv_i64 t0, t1;
1370 t0 = tcg_temp_new_i64();
1371 t1 = tcg_temp_new_i64();
1372 tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
1373 tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
1374 tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1375 tcg_temp_free(t0);
1376 tcg_temp_free(t1);
1377 #else
1378 tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1379 cpu_gpr[rB(ctx->opcode)]);
1380 #endif
1381 if (unlikely(Rc(ctx->opcode) != 0))
1382 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1383 }
1384
1385 /* mullwo mullwo. */
1386 static void gen_mullwo(DisasContext *ctx)
1387 {
1388 TCGv_i32 t0 = tcg_temp_new_i32();
1389 TCGv_i32 t1 = tcg_temp_new_i32();
1390
1391 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1392 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1393 tcg_gen_muls2_i32(t0, t1, t0, t1);
1394 #if defined(TARGET_PPC64)
1395 tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1396 #else
1397 tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
1398 #endif
1399
1400 tcg_gen_sari_i32(t0, t0, 31);
1401 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
1402 tcg_gen_extu_i32_tl(cpu_ov, t0);
1403 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1404
1405 tcg_temp_free_i32(t0);
1406 tcg_temp_free_i32(t1);
1407 if (unlikely(Rc(ctx->opcode) != 0))
1408 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1409 }
1410
1411 /* mulli */
1412 static void gen_mulli(DisasContext *ctx)
1413 {
1414 tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1415 SIMM(ctx->opcode));
1416 }
1417
1418 #if defined(TARGET_PPC64)
1419 /* mulhd mulhd. */
1420 static void gen_mulhd(DisasContext *ctx)
1421 {
1422 TCGv lo = tcg_temp_new();
1423 tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1424 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1425 tcg_temp_free(lo);
1426 if (unlikely(Rc(ctx->opcode) != 0)) {
1427 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1428 }
1429 }
1430
1431 /* mulhdu mulhdu. */
1432 static void gen_mulhdu(DisasContext *ctx)
1433 {
1434 TCGv lo = tcg_temp_new();
1435 tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1436 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1437 tcg_temp_free(lo);
1438 if (unlikely(Rc(ctx->opcode) != 0)) {
1439 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1440 }
1441 }
1442
1443 /* mulld mulld. */
1444 static void gen_mulld(DisasContext *ctx)
1445 {
1446 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1447 cpu_gpr[rB(ctx->opcode)]);
1448 if (unlikely(Rc(ctx->opcode) != 0))
1449 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1450 }
1451
1452 /* mulldo mulldo. */
1453 static void gen_mulldo(DisasContext *ctx)
1454 {
1455 TCGv_i64 t0 = tcg_temp_new_i64();
1456 TCGv_i64 t1 = tcg_temp_new_i64();
1457
1458 tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
1459 cpu_gpr[rB(ctx->opcode)]);
1460 tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
1461
1462 tcg_gen_sari_i64(t0, t0, 63);
1463 tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
1464 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1465
1466 tcg_temp_free_i64(t0);
1467 tcg_temp_free_i64(t1);
1468
1469 if (unlikely(Rc(ctx->opcode) != 0)) {
1470 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1471 }
1472 }
1473 #endif
1474
1475 /* Common subf function */
1476 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
1477 TCGv arg2, bool add_ca, bool compute_ca,
1478 bool compute_ov, bool compute_rc0)
1479 {
1480 TCGv t0 = ret;
1481
1482 if (compute_ca || compute_ov) {
1483 t0 = tcg_temp_new();
1484 }
1485
1486 if (compute_ca) {
1487 /* dest = ~arg1 + arg2 [+ ca]. */
1488 if (NARROW_MODE(ctx)) {
1489 /* Caution: a non-obvious corner case of the spec is that we
1490 must produce the *entire* 64-bit addition, but produce the
1491 carry into bit 32. */
1492 TCGv inv1 = tcg_temp_new();
1493 TCGv t1 = tcg_temp_new();
1494 tcg_gen_not_tl(inv1, arg1);
1495 if (add_ca) {
1496 tcg_gen_add_tl(t0, arg2, cpu_ca);
1497 } else {
1498 tcg_gen_addi_tl(t0, arg2, 1);
1499 }
1500 tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */
1501 tcg_gen_add_tl(t0, t0, inv1);
1502 tcg_temp_free(inv1);
1503 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */
1504 tcg_temp_free(t1);
1505 tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
1506 tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
1507 } else if (add_ca) {
1508 TCGv zero, inv1 = tcg_temp_new();
1509 tcg_gen_not_tl(inv1, arg1);
1510 zero = tcg_const_tl(0);
1511 tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
1512 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
1513 tcg_temp_free(zero);
1514 tcg_temp_free(inv1);
1515 } else {
1516 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
1517 tcg_gen_sub_tl(t0, arg2, arg1);
1518 }
1519 } else if (add_ca) {
1520 /* Since we're ignoring carry-out, we can simplify the
1521 standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */
1522 tcg_gen_sub_tl(t0, arg2, arg1);
1523 tcg_gen_add_tl(t0, t0, cpu_ca);
1524 tcg_gen_subi_tl(t0, t0, 1);
1525 } else {
1526 tcg_gen_sub_tl(t0, arg2, arg1);
1527 }
1528
1529 if (compute_ov) {
1530 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
1531 }
1532 if (unlikely(compute_rc0)) {
1533 gen_set_Rc0(ctx, t0);
1534 }
1535
1536 if (!TCGV_EQUAL(t0, ret)) {
1537 tcg_gen_mov_tl(ret, t0);
1538 tcg_temp_free(t0);
1539 }
1540 }
1541 /* Sub functions with Two operands functions */
1542 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
1543 static void glue(gen_, name)(DisasContext *ctx) \
1544 { \
1545 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1546 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1547 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1548 }
1549 /* Sub functions with one operand and one immediate */
1550 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
1551 add_ca, compute_ca, compute_ov) \
1552 static void glue(gen_, name)(DisasContext *ctx) \
1553 { \
1554 TCGv t0 = tcg_const_tl(const_val); \
1555 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1556 cpu_gpr[rA(ctx->opcode)], t0, \
1557 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1558 tcg_temp_free(t0); \
1559 }
1560 /* subf subf. subfo subfo. */
1561 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
1562 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
1563 /* subfc subfc. subfco subfco. */
1564 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
1565 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
1566 /* subfe subfe. subfeo subfo. */
1567 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
1568 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
1569 /* subfme subfme. subfmeo subfmeo. */
1570 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
1571 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
1572 /* subfze subfze. subfzeo subfzeo.*/
1573 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
1574 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
1575
1576 /* subfic */
1577 static void gen_subfic(DisasContext *ctx)
1578 {
1579 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
1580 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1581 c, 0, 1, 0, 0);
1582 tcg_temp_free(c);
1583 }
1584
1585 /* neg neg. nego nego. */
1586 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
1587 {
1588 TCGv zero = tcg_const_tl(0);
1589 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1590 zero, 0, 0, compute_ov, Rc(ctx->opcode));
1591 tcg_temp_free(zero);
1592 }
1593
1594 static void gen_neg(DisasContext *ctx)
1595 {
1596 gen_op_arith_neg(ctx, 0);
1597 }
1598
1599 static void gen_nego(DisasContext *ctx)
1600 {
1601 gen_op_arith_neg(ctx, 1);
1602 }
1603
1604 /*** Integer logical ***/
1605 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
1606 static void glue(gen_, name)(DisasContext *ctx) \
1607 { \
1608 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
1609 cpu_gpr[rB(ctx->opcode)]); \
1610 if (unlikely(Rc(ctx->opcode) != 0)) \
1611 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1612 }
1613
1614 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
1615 static void glue(gen_, name)(DisasContext *ctx) \
1616 { \
1617 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
1618 if (unlikely(Rc(ctx->opcode) != 0)) \
1619 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1620 }
1621
1622 /* and & and. */
1623 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
1624 /* andc & andc. */
1625 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
1626
1627 /* andi. */
1628 static void gen_andi_(DisasContext *ctx)
1629 {
1630 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode));
1631 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1632 }
1633
1634 /* andis. */
1635 static void gen_andis_(DisasContext *ctx)
1636 {
1637 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16);
1638 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1639 }
1640
1641 /* cntlzw */
1642 static void gen_cntlzw(DisasContext *ctx)
1643 {
1644 gen_helper_cntlzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1645 if (unlikely(Rc(ctx->opcode) != 0))
1646 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1647 }
1648
1649 /* cnttzw */
1650 static void gen_cnttzw(DisasContext *ctx)
1651 {
1652 gen_helper_cnttzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1653 if (unlikely(Rc(ctx->opcode) != 0)) {
1654 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1655 }
1656 }
1657
1658 /* eqv & eqv. */
1659 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
1660 /* extsb & extsb. */
1661 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
1662 /* extsh & extsh. */
1663 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
1664 /* nand & nand. */
1665 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
1666 /* nor & nor. */
1667 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
1668
1669 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
1670 static void gen_pause(DisasContext *ctx)
1671 {
1672 TCGv_i32 t0 = tcg_const_i32(0);
1673 tcg_gen_st_i32(t0, cpu_env,
1674 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
1675 tcg_temp_free_i32(t0);
1676
1677 /* Stop translation, this gives other CPUs a chance to run */
1678 gen_exception_nip(ctx, EXCP_HLT, ctx->nip);
1679 }
1680 #endif /* defined(TARGET_PPC64) */
1681
1682 /* or & or. */
1683 static void gen_or(DisasContext *ctx)
1684 {
1685 int rs, ra, rb;
1686
1687 rs = rS(ctx->opcode);
1688 ra = rA(ctx->opcode);
1689 rb = rB(ctx->opcode);
1690 /* Optimisation for mr. ri case */
1691 if (rs != ra || rs != rb) {
1692 if (rs != rb)
1693 tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
1694 else
1695 tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
1696 if (unlikely(Rc(ctx->opcode) != 0))
1697 gen_set_Rc0(ctx, cpu_gpr[ra]);
1698 } else if (unlikely(Rc(ctx->opcode) != 0)) {
1699 gen_set_Rc0(ctx, cpu_gpr[rs]);
1700 #if defined(TARGET_PPC64)
1701 } else if (rs != 0) { /* 0 is nop */
1702 int prio = 0;
1703
1704 switch (rs) {
1705 case 1:
1706 /* Set process priority to low */
1707 prio = 2;
1708 break;
1709 case 6:
1710 /* Set process priority to medium-low */
1711 prio = 3;
1712 break;
1713 case 2:
1714 /* Set process priority to normal */
1715 prio = 4;
1716 break;
1717 #if !defined(CONFIG_USER_ONLY)
1718 case 31:
1719 if (!ctx->pr) {
1720 /* Set process priority to very low */
1721 prio = 1;
1722 }
1723 break;
1724 case 5:
1725 if (!ctx->pr) {
1726 /* Set process priority to medium-hight */
1727 prio = 5;
1728 }
1729 break;
1730 case 3:
1731 if (!ctx->pr) {
1732 /* Set process priority to high */
1733 prio = 6;
1734 }
1735 break;
1736 case 7:
1737 if (ctx->hv && !ctx->pr) {
1738 /* Set process priority to very high */
1739 prio = 7;
1740 }
1741 break;
1742 #endif
1743 default:
1744 break;
1745 }
1746 if (prio) {
1747 TCGv t0 = tcg_temp_new();
1748 gen_load_spr(t0, SPR_PPR);
1749 tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
1750 tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
1751 gen_store_spr(SPR_PPR, t0);
1752 tcg_temp_free(t0);
1753 }
1754 #if !defined(CONFIG_USER_ONLY)
1755 /* Pause out of TCG otherwise spin loops with smt_low eat too much
1756 * CPU and the kernel hangs. This applies to all encodings other
1757 * than no-op, e.g., miso(rs=26), yield(27), mdoio(29), mdoom(30),
1758 * and all currently undefined.
1759 */
1760 gen_pause(ctx);
1761 #endif
1762 #endif
1763 }
1764 }
1765 /* orc & orc. */
1766 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
1767
1768 /* xor & xor. */
1769 static void gen_xor(DisasContext *ctx)
1770 {
1771 /* Optimisation for "set to zero" case */
1772 if (rS(ctx->opcode) != rB(ctx->opcode))
1773 tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1774 else
1775 tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
1776 if (unlikely(Rc(ctx->opcode) != 0))
1777 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1778 }
1779
1780 /* ori */
1781 static void gen_ori(DisasContext *ctx)
1782 {
1783 target_ulong uimm = UIMM(ctx->opcode);
1784
1785 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1786 return;
1787 }
1788 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1789 }
1790
1791 /* oris */
1792 static void gen_oris(DisasContext *ctx)
1793 {
1794 target_ulong uimm = UIMM(ctx->opcode);
1795
1796 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1797 /* NOP */
1798 return;
1799 }
1800 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1801 }
1802
1803 /* xori */
1804 static void gen_xori(DisasContext *ctx)
1805 {
1806 target_ulong uimm = UIMM(ctx->opcode);
1807
1808 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1809 /* NOP */
1810 return;
1811 }
1812 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1813 }
1814
1815 /* xoris */
1816 static void gen_xoris(DisasContext *ctx)
1817 {
1818 target_ulong uimm = UIMM(ctx->opcode);
1819
1820 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1821 /* NOP */
1822 return;
1823 }
1824 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1825 }
1826
1827 /* popcntb : PowerPC 2.03 specification */
1828 static void gen_popcntb(DisasContext *ctx)
1829 {
1830 gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1831 }
1832
1833 static void gen_popcntw(DisasContext *ctx)
1834 {
1835 gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1836 }
1837
1838 #if defined(TARGET_PPC64)
1839 /* popcntd: PowerPC 2.06 specification */
1840 static void gen_popcntd(DisasContext *ctx)
1841 {
1842 gen_helper_popcntd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1843 }
1844 #endif
1845
1846 /* prtyw: PowerPC 2.05 specification */
1847 static void gen_prtyw(DisasContext *ctx)
1848 {
1849 TCGv ra = cpu_gpr[rA(ctx->opcode)];
1850 TCGv rs = cpu_gpr[rS(ctx->opcode)];
1851 TCGv t0 = tcg_temp_new();
1852 tcg_gen_shri_tl(t0, rs, 16);
1853 tcg_gen_xor_tl(ra, rs, t0);
1854 tcg_gen_shri_tl(t0, ra, 8);
1855 tcg_gen_xor_tl(ra, ra, t0);
1856 tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
1857 tcg_temp_free(t0);
1858 }
1859
1860 #if defined(TARGET_PPC64)
1861 /* prtyd: PowerPC 2.05 specification */
1862 static void gen_prtyd(DisasContext *ctx)
1863 {
1864 TCGv ra = cpu_gpr[rA(ctx->opcode)];
1865 TCGv rs = cpu_gpr[rS(ctx->opcode)];
1866 TCGv t0 = tcg_temp_new();
1867 tcg_gen_shri_tl(t0, rs, 32);
1868 tcg_gen_xor_tl(ra, rs, t0);
1869 tcg_gen_shri_tl(t0, ra, 16);
1870 tcg_gen_xor_tl(ra, ra, t0);
1871 tcg_gen_shri_tl(t0, ra, 8);
1872 tcg_gen_xor_tl(ra, ra, t0);
1873 tcg_gen_andi_tl(ra, ra, 1);
1874 tcg_temp_free(t0);
1875 }
1876 #endif
1877
1878 #if defined(TARGET_PPC64)
1879 /* bpermd */
1880 static void gen_bpermd(DisasContext *ctx)
1881 {
1882 gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
1883 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1884 }
1885 #endif
1886
1887 #if defined(TARGET_PPC64)
1888 /* extsw & extsw. */
1889 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
1890
1891 /* cntlzd */
1892 static void gen_cntlzd(DisasContext *ctx)
1893 {
1894 gen_helper_cntlzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1895 if (unlikely(Rc(ctx->opcode) != 0))
1896 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1897 }
1898
1899 /* cnttzd */
1900 static void gen_cnttzd(DisasContext *ctx)
1901 {
1902 gen_helper_cnttzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1903 if (unlikely(Rc(ctx->opcode) != 0)) {
1904 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1905 }
1906 }
1907
1908 /* darn */
1909 static void gen_darn(DisasContext *ctx)
1910 {
1911 int l = L(ctx->opcode);
1912
1913 if (l == 0) {
1914 gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
1915 } else if (l <= 2) {
1916 /* Return 64-bit random for both CRN and RRN */
1917 gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
1918 } else {
1919 tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
1920 }
1921 }
1922 #endif
1923
1924 /*** Integer rotate ***/
1925
1926 /* rlwimi & rlwimi. */
1927 static void gen_rlwimi(DisasContext *ctx)
1928 {
1929 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1930 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1931 uint32_t sh = SH(ctx->opcode);
1932 uint32_t mb = MB(ctx->opcode);
1933 uint32_t me = ME(ctx->opcode);
1934
1935 if (sh == (31-me) && mb <= me) {
1936 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
1937 } else {
1938 target_ulong mask;
1939 TCGv t1;
1940
1941 #if defined(TARGET_PPC64)
1942 mb += 32;
1943 me += 32;
1944 #endif
1945 mask = MASK(mb, me);
1946
1947 t1 = tcg_temp_new();
1948 if (mask <= 0xffffffffu) {
1949 TCGv_i32 t0 = tcg_temp_new_i32();
1950 tcg_gen_trunc_tl_i32(t0, t_rs);
1951 tcg_gen_rotli_i32(t0, t0, sh);
1952 tcg_gen_extu_i32_tl(t1, t0);
1953 tcg_temp_free_i32(t0);
1954 } else {
1955 #if defined(TARGET_PPC64)
1956 tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32);
1957 tcg_gen_rotli_i64(t1, t1, sh);
1958 #else
1959 g_assert_not_reached();
1960 #endif
1961 }
1962
1963 tcg_gen_andi_tl(t1, t1, mask);
1964 tcg_gen_andi_tl(t_ra, t_ra, ~mask);
1965 tcg_gen_or_tl(t_ra, t_ra, t1);
1966 tcg_temp_free(t1);
1967 }
1968 if (unlikely(Rc(ctx->opcode) != 0)) {
1969 gen_set_Rc0(ctx, t_ra);
1970 }
1971 }
1972
1973 /* rlwinm & rlwinm. */
1974 static void gen_rlwinm(DisasContext *ctx)
1975 {
1976 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1977 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1978 uint32_t sh = SH(ctx->opcode);
1979 uint32_t mb = MB(ctx->opcode);
1980 uint32_t me = ME(ctx->opcode);
1981
1982 if (mb == 0 && me == (31 - sh)) {
1983 tcg_gen_shli_tl(t_ra, t_rs, sh);
1984 tcg_gen_ext32u_tl(t_ra, t_ra);
1985 } else if (sh != 0 && me == 31 && sh == (32 - mb)) {
1986 tcg_gen_ext32u_tl(t_ra, t_rs);
1987 tcg_gen_shri_tl(t_ra, t_ra, mb);
1988 } else {
1989 target_ulong mask;
1990 #if defined(TARGET_PPC64)
1991 mb += 32;
1992 me += 32;
1993 #endif
1994 mask = MASK(mb, me);
1995
1996 if (mask <= 0xffffffffu) {
1997 TCGv_i32 t0 = tcg_temp_new_i32();
1998 tcg_gen_trunc_tl_i32(t0, t_rs);
1999 tcg_gen_rotli_i32(t0, t0, sh);
2000 tcg_gen_andi_i32(t0, t0, mask);
2001 tcg_gen_extu_i32_tl(t_ra, t0);
2002 tcg_temp_free_i32(t0);
2003 } else {
2004 #if defined(TARGET_PPC64)
2005 tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2006 tcg_gen_rotli_i64(t_ra, t_ra, sh);
2007 tcg_gen_andi_i64(t_ra, t_ra, mask);
2008 #else
2009 g_assert_not_reached();
2010 #endif
2011 }
2012 }
2013 if (unlikely(Rc(ctx->opcode) != 0)) {
2014 gen_set_Rc0(ctx, t_ra);
2015 }
2016 }
2017
2018 /* rlwnm & rlwnm. */
2019 static void gen_rlwnm(DisasContext *ctx)
2020 {
2021 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2022 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2023 TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2024 uint32_t mb = MB(ctx->opcode);
2025 uint32_t me = ME(ctx->opcode);
2026 target_ulong mask;
2027
2028 #if defined(TARGET_PPC64)
2029 mb += 32;
2030 me += 32;
2031 #endif
2032 mask = MASK(mb, me);
2033
2034 if (mask <= 0xffffffffu) {
2035 TCGv_i32 t0 = tcg_temp_new_i32();
2036 TCGv_i32 t1 = tcg_temp_new_i32();
2037 tcg_gen_trunc_tl_i32(t0, t_rb);
2038 tcg_gen_trunc_tl_i32(t1, t_rs);
2039 tcg_gen_andi_i32(t0, t0, 0x1f);
2040 tcg_gen_rotl_i32(t1, t1, t0);
2041 tcg_gen_extu_i32_tl(t_ra, t1);
2042 tcg_temp_free_i32(t0);
2043 tcg_temp_free_i32(t1);
2044 } else {
2045 #if defined(TARGET_PPC64)
2046 TCGv_i64 t0 = tcg_temp_new_i64();
2047 tcg_gen_andi_i64(t0, t_rb, 0x1f);
2048 tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2049 tcg_gen_rotl_i64(t_ra, t_ra, t0);
2050 tcg_temp_free_i64(t0);
2051 #else
2052 g_assert_not_reached();
2053 #endif
2054 }
2055
2056 tcg_gen_andi_tl(t_ra, t_ra, mask);
2057
2058 if (unlikely(Rc(ctx->opcode) != 0)) {
2059 gen_set_Rc0(ctx, t_ra);
2060 }
2061 }
2062
2063 #if defined(TARGET_PPC64)
2064 #define GEN_PPC64_R2(name, opc1, opc2) \
2065 static void glue(gen_, name##0)(DisasContext *ctx) \
2066 { \
2067 gen_##name(ctx, 0); \
2068 } \
2069 \
2070 static void glue(gen_, name##1)(DisasContext *ctx) \
2071 { \
2072 gen_##name(ctx, 1); \
2073 }
2074 #define GEN_PPC64_R4(name, opc1, opc2) \
2075 static void glue(gen_, name##0)(DisasContext *ctx) \
2076 { \
2077 gen_##name(ctx, 0, 0); \
2078 } \
2079 \
2080 static void glue(gen_, name##1)(DisasContext *ctx) \
2081 { \
2082 gen_##name(ctx, 0, 1); \
2083 } \
2084 \
2085 static void glue(gen_, name##2)(DisasContext *ctx) \
2086 { \
2087 gen_##name(ctx, 1, 0); \
2088 } \
2089 \
2090 static void glue(gen_, name##3)(DisasContext *ctx) \
2091 { \
2092 gen_##name(ctx, 1, 1); \
2093 }
2094
2095 static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
2096 {
2097 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2098 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2099
2100 if (sh != 0 && mb == 0 && me == (63 - sh)) {
2101 tcg_gen_shli_tl(t_ra, t_rs, sh);
2102 } else if (sh != 0 && me == 63 && sh == (64 - mb)) {
2103 tcg_gen_shri_tl(t_ra, t_rs, mb);
2104 } else {
2105 tcg_gen_rotli_tl(t_ra, t_rs, sh);
2106 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2107 }
2108 if (unlikely(Rc(ctx->opcode) != 0)) {
2109 gen_set_Rc0(ctx, t_ra);
2110 }
2111 }
2112
2113 /* rldicl - rldicl. */
2114 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
2115 {
2116 uint32_t sh, mb;
2117
2118 sh = SH(ctx->opcode) | (shn << 5);
2119 mb = MB(ctx->opcode) | (mbn << 5);
2120 gen_rldinm(ctx, mb, 63, sh);
2121 }
2122 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
2123
2124 /* rldicr - rldicr. */
2125 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
2126 {
2127 uint32_t sh, me;
2128
2129 sh = SH(ctx->opcode) | (shn << 5);
2130 me = MB(ctx->opcode) | (men << 5);
2131 gen_rldinm(ctx, 0, me, sh);
2132 }
2133 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
2134
2135 /* rldic - rldic. */
2136 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
2137 {
2138 uint32_t sh, mb;
2139
2140 sh = SH(ctx->opcode) | (shn << 5);
2141 mb = MB(ctx->opcode) | (mbn << 5);
2142 gen_rldinm(ctx, mb, 63 - sh, sh);
2143 }
2144 GEN_PPC64_R4(rldic, 0x1E, 0x04);
2145
2146 static void gen_rldnm(DisasContext *ctx, int mb, int me)
2147 {
2148 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2149 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2150 TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2151 TCGv t0;
2152
2153 t0 = tcg_temp_new();
2154 tcg_gen_andi_tl(t0, t_rb, 0x3f);
2155 tcg_gen_rotl_tl(t_ra, t_rs, t0);
2156 tcg_temp_free(t0);
2157
2158 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2159 if (unlikely(Rc(ctx->opcode) != 0)) {
2160 gen_set_Rc0(ctx, t_ra);
2161 }
2162 }
2163
2164 /* rldcl - rldcl. */
2165 static inline void gen_rldcl(DisasContext *ctx, int mbn)
2166 {
2167 uint32_t mb;
2168
2169 mb = MB(ctx->opcode) | (mbn << 5);
2170 gen_rldnm(ctx, mb, 63);
2171 }
2172 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
2173
2174 /* rldcr - rldcr. */
2175 static inline void gen_rldcr(DisasContext *ctx, int men)
2176 {
2177 uint32_t me;
2178
2179 me = MB(ctx->opcode) | (men << 5);
2180 gen_rldnm(ctx, 0, me);
2181 }
2182 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
2183
2184 /* rldimi - rldimi. */
2185 static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
2186 {
2187 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2188 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2189 uint32_t sh = SH(ctx->opcode) | (shn << 5);
2190 uint32_t mb = MB(ctx->opcode) | (mbn << 5);
2191 uint32_t me = 63 - sh;
2192
2193 if (mb <= me) {
2194 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2195 } else {
2196 target_ulong mask = MASK(mb, me);
2197 TCGv t1 = tcg_temp_new();
2198
2199 tcg_gen_rotli_tl(t1, t_rs, sh);
2200 tcg_gen_andi_tl(t1, t1, mask);
2201 tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2202 tcg_gen_or_tl(t_ra, t_ra, t1);
2203 tcg_temp_free(t1);
2204 }
2205 if (unlikely(Rc(ctx->opcode) != 0)) {
2206 gen_set_Rc0(ctx, t_ra);
2207 }
2208 }
2209 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
2210 #endif
2211
2212 /*** Integer shift ***/
2213
2214 /* slw & slw. */
2215 static void gen_slw(DisasContext *ctx)
2216 {
2217 TCGv t0, t1;
2218
2219 t0 = tcg_temp_new();
2220 /* AND rS with a mask that is 0 when rB >= 0x20 */
2221 #if defined(TARGET_PPC64)
2222 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2223 tcg_gen_sari_tl(t0, t0, 0x3f);
2224 #else
2225 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2226 tcg_gen_sari_tl(t0, t0, 0x1f);
2227 #endif
2228 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2229 t1 = tcg_temp_new();
2230 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2231 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2232 tcg_temp_free(t1);
2233 tcg_temp_free(t0);
2234 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2235 if (unlikely(Rc(ctx->opcode) != 0))
2236 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2237 }
2238
2239 /* sraw & sraw. */
2240 static void gen_sraw(DisasContext *ctx)
2241 {
2242 gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
2243 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2244 if (unlikely(Rc(ctx->opcode) != 0))
2245 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2246 }
2247
2248 /* srawi & srawi. */
2249 static void gen_srawi(DisasContext *ctx)
2250 {
2251 int sh = SH(ctx->opcode);
2252 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2253 TCGv src = cpu_gpr[rS(ctx->opcode)];
2254 if (sh == 0) {
2255 tcg_gen_ext32s_tl(dst, src);
2256 tcg_gen_movi_tl(cpu_ca, 0);
2257 } else {
2258 TCGv t0;
2259 tcg_gen_ext32s_tl(dst, src);
2260 tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
2261 t0 = tcg_temp_new();
2262 tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
2263 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2264 tcg_temp_free(t0);
2265 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2266 tcg_gen_sari_tl(dst, dst, sh);
2267 }
2268 if (unlikely(Rc(ctx->opcode) != 0)) {
2269 gen_set_Rc0(ctx, dst);
2270 }
2271 }
2272
2273 /* srw & srw. */
2274 static void gen_srw(DisasContext *ctx)
2275 {
2276 TCGv t0, t1;
2277
2278 t0 = tcg_temp_new();
2279 /* AND rS with a mask that is 0 when rB >= 0x20 */
2280 #if defined(TARGET_PPC64)
2281 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2282 tcg_gen_sari_tl(t0, t0, 0x3f);
2283 #else
2284 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2285 tcg_gen_sari_tl(t0, t0, 0x1f);
2286 #endif
2287 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2288 tcg_gen_ext32u_tl(t0, t0);
2289 t1 = tcg_temp_new();
2290 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2291 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2292 tcg_temp_free(t1);
2293 tcg_temp_free(t0);
2294 if (unlikely(Rc(ctx->opcode) != 0))
2295 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2296 }
2297
2298 #if defined(TARGET_PPC64)
2299 /* sld & sld. */
2300 static void gen_sld(DisasContext *ctx)
2301 {
2302 TCGv t0, t1;
2303
2304 t0 = tcg_temp_new();
2305 /* AND rS with a mask that is 0 when rB >= 0x40 */
2306 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2307 tcg_gen_sari_tl(t0, t0, 0x3f);
2308 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2309 t1 = tcg_temp_new();
2310 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2311 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2312 tcg_temp_free(t1);
2313 tcg_temp_free(t0);
2314 if (unlikely(Rc(ctx->opcode) != 0))
2315 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2316 }
2317
2318 /* srad & srad. */
2319 static void gen_srad(DisasContext *ctx)
2320 {
2321 gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
2322 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2323 if (unlikely(Rc(ctx->opcode) != 0))
2324 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2325 }
2326 /* sradi & sradi. */
2327 static inline void gen_sradi(DisasContext *ctx, int n)
2328 {
2329 int sh = SH(ctx->opcode) + (n << 5);
2330 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2331 TCGv src = cpu_gpr[rS(ctx->opcode)];
2332 if (sh == 0) {
2333 tcg_gen_mov_tl(dst, src);
2334 tcg_gen_movi_tl(cpu_ca, 0);
2335 } else {
2336 TCGv t0;
2337 tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
2338 t0 = tcg_temp_new();
2339 tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
2340 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2341 tcg_temp_free(t0);
2342 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2343 tcg_gen_sari_tl(dst, src, sh);
2344 }
2345 if (unlikely(Rc(ctx->opcode) != 0)) {
2346 gen_set_Rc0(ctx, dst);
2347 }
2348 }
2349
2350 static void gen_sradi0(DisasContext *ctx)
2351 {
2352 gen_sradi(ctx, 0);
2353 }
2354
2355 static void gen_sradi1(DisasContext *ctx)
2356 {
2357 gen_sradi(ctx, 1);
2358 }
2359
2360 /* extswsli & extswsli. */
2361 static inline void gen_extswsli(DisasContext *ctx, int n)
2362 {
2363 int sh = SH(ctx->opcode) + (n << 5);
2364 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2365 TCGv src = cpu_gpr[rS(ctx->opcode)];
2366
2367 tcg_gen_ext32s_tl(dst, src);
2368 tcg_gen_shli_tl(dst, dst, sh);
2369 if (unlikely(Rc(ctx->opcode) != 0)) {
2370 gen_set_Rc0(ctx, dst);
2371 }
2372 }
2373
2374 static void gen_extswsli0(DisasContext *ctx)
2375 {
2376 gen_extswsli(ctx, 0);
2377 }
2378
2379 static void gen_extswsli1(DisasContext *ctx)
2380 {
2381 gen_extswsli(ctx, 1);
2382 }
2383
2384 /* srd & srd. */
2385 static void gen_srd(DisasContext *ctx)
2386 {
2387 TCGv t0, t1;
2388
2389 t0 = tcg_temp_new();
2390 /* AND rS with a mask that is 0 when rB >= 0x40 */
2391 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2392 tcg_gen_sari_tl(t0, t0, 0x3f);
2393 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2394 t1 = tcg_temp_new();
2395 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2396 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2397 tcg_temp_free(t1);
2398 tcg_temp_free(t0);
2399 if (unlikely(Rc(ctx->opcode) != 0))
2400 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2401 }
2402 #endif
2403
2404 /*** Addressing modes ***/
2405 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2406 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
2407 target_long maskl)
2408 {
2409 target_long simm = SIMM(ctx->opcode);
2410
2411 simm &= ~maskl;
2412 if (rA(ctx->opcode) == 0) {
2413 if (NARROW_MODE(ctx)) {
2414 simm = (uint32_t)simm;
2415 }
2416 tcg_gen_movi_tl(EA, simm);
2417 } else if (likely(simm != 0)) {
2418 tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
2419 if (NARROW_MODE(ctx)) {
2420 tcg_gen_ext32u_tl(EA, EA);
2421 }
2422 } else {
2423 if (NARROW_MODE(ctx)) {
2424 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2425 } else {
2426 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2427 }
2428 }
2429 }
2430
2431 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
2432 {
2433 if (rA(ctx->opcode) == 0) {
2434 if (NARROW_MODE(ctx)) {
2435 tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2436 } else {
2437 tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2438 }
2439 } else {
2440 tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2441 if (NARROW_MODE(ctx)) {
2442 tcg_gen_ext32u_tl(EA, EA);
2443 }
2444 }
2445 }
2446
2447 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
2448 {
2449 if (rA(ctx->opcode) == 0) {
2450 tcg_gen_movi_tl(EA, 0);
2451 } else if (NARROW_MODE(ctx)) {
2452 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2453 } else {
2454 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2455 }
2456 }
2457
2458 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
2459 target_long val)
2460 {
2461 tcg_gen_addi_tl(ret, arg1, val);
2462 if (NARROW_MODE(ctx)) {
2463 tcg_gen_ext32u_tl(ret, ret);
2464 }
2465 }
2466
2467 static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask)
2468 {
2469 TCGLabel *l1 = gen_new_label();
2470 TCGv t0 = tcg_temp_new();
2471 TCGv_i32 t1, t2;
2472 tcg_gen_andi_tl(t0, EA, mask);
2473 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
2474 t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
2475 t2 = tcg_const_i32(ctx->opcode & 0x03FF0000);
2476 gen_update_nip(ctx, ctx->nip - 4);
2477 gen_helper_raise_exception_err(cpu_env, t1, t2);
2478 tcg_temp_free_i32(t1);
2479 tcg_temp_free_i32(t2);
2480 gen_set_label(l1);
2481 tcg_temp_free(t0);
2482 }
2483
2484 static inline void gen_align_no_le(DisasContext *ctx)
2485 {
2486 gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
2487 (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
2488 }
2489
2490 /*** Integer load ***/
2491 #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
2492 #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
2493
2494 #define GEN_QEMU_LOAD_TL(ldop, op) \
2495 static void glue(gen_qemu_, ldop)(DisasContext *ctx, \
2496 TCGv val, \
2497 TCGv addr) \
2498 { \
2499 tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op); \
2500 }
2501
2502 GEN_QEMU_LOAD_TL(ld8u, DEF_MEMOP(MO_UB))
2503 GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
2504 GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
2505 GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
2506 GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
2507
2508 GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
2509 GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
2510
2511 #define GEN_QEMU_LOAD_64(ldop, op) \
2512 static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \
2513 TCGv_i64 val, \
2514 TCGv addr) \
2515 { \
2516 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op); \
2517 }
2518
2519 GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB))
2520 GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
2521 GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
2522 GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
2523 GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_Q))
2524
2525 #if defined(TARGET_PPC64)
2526 GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_Q))
2527 #endif
2528
2529 #define GEN_QEMU_STORE_TL(stop, op) \
2530 static void glue(gen_qemu_, stop)(DisasContext *ctx, \
2531 TCGv val, \
2532 TCGv addr) \
2533 { \
2534 tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op); \
2535 }
2536
2537 GEN_QEMU_STORE_TL(st8, DEF_MEMOP(MO_UB))
2538 GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
2539 GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
2540
2541 GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
2542 GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
2543
2544 #define GEN_QEMU_STORE_64(stop, op) \
2545 static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
2546 TCGv_i64 val, \
2547 TCGv addr) \
2548 { \
2549 tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op); \
2550 }
2551
2552 GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB))
2553 GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
2554 GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
2555 GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q))
2556
2557 #if defined(TARGET_PPC64)
2558 GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q))
2559 #endif
2560
2561 #define GEN_LD(name, ldop, opc, type) \
2562 static void glue(gen_, name)(DisasContext *ctx) \
2563 { \
2564 TCGv EA; \
2565 gen_set_access_type(ctx, ACCESS_INT); \
2566 EA = tcg_temp_new(); \
2567 gen_addr_imm_index(ctx, EA, 0); \
2568 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2569 tcg_temp_free(EA); \
2570 }
2571
2572 #define GEN_LDU(name, ldop, opc, type) \
2573 static void glue(gen_, name##u)(DisasContext *ctx) \
2574 { \
2575 TCGv EA; \
2576 if (unlikely(rA(ctx->opcode) == 0 || \
2577 rA(ctx->opcode) == rD(ctx->opcode))) { \
2578 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2579 return; \
2580 } \
2581 gen_set_access_type(ctx, ACCESS_INT); \
2582 EA = tcg_temp_new(); \
2583 if (type == PPC_64B) \
2584 gen_addr_imm_index(ctx, EA, 0x03); \
2585 else \
2586 gen_addr_imm_index(ctx, EA, 0); \
2587 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2588 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2589 tcg_temp_free(EA); \
2590 }
2591
2592 #define GEN_LDUX(name, ldop, opc2, opc3, type) \
2593 static void glue(gen_, name##ux)(DisasContext *ctx) \
2594 { \
2595 TCGv EA; \
2596 if (unlikely(rA(ctx->opcode) == 0 || \
2597 rA(ctx->opcode) == rD(ctx->opcode))) { \
2598 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2599 return; \
2600 } \
2601 gen_set_access_type(ctx, ACCESS_INT); \
2602 EA = tcg_temp_new(); \
2603 gen_addr_reg_index(ctx, EA); \
2604 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2605 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2606 tcg_temp_free(EA); \
2607 }
2608
2609 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
2610 static void glue(gen_, name##x)(DisasContext *ctx) \
2611 { \
2612 TCGv EA; \
2613 chk; \
2614 gen_set_access_type(ctx, ACCESS_INT); \
2615 EA = tcg_temp_new(); \
2616 gen_addr_reg_index(ctx, EA); \
2617 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2618 tcg_temp_free(EA); \
2619 }
2620
2621 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2622 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE)
2623
2624 #define GEN_LDX_HVRM(name, ldop, opc2, opc3, type) \
2625 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
2626
2627 #define GEN_LDS(name, ldop, op, type) \
2628 GEN_LD(name, ldop, op | 0x20, type); \
2629 GEN_LDU(name, ldop, op | 0x21, type); \
2630 GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
2631 GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2632
2633 /* lbz lbzu lbzux lbzx */
2634 GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER);
2635 /* lha lhau lhaux lhax */
2636 GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER);
2637 /* lhz lhzu lhzux lhzx */
2638 GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER);
2639 /* lwz lwzu lwzux lwzx */
2640 GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER);
2641 #if defined(TARGET_PPC64)
2642 /* lwaux */
2643 GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
2644 /* lwax */
2645 GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
2646 /* ldux */
2647 GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B);
2648 /* ldx */
2649 GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B);
2650
2651 /* CI load/store variants */
2652 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
2653 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
2654 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
2655 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
2656
2657 static void gen_ld(DisasContext *ctx)
2658 {
2659 TCGv EA;
2660 if (Rc(ctx->opcode)) {
2661 if (unlikely(rA(ctx->opcode) == 0 ||
2662 rA(ctx->opcode) == rD(ctx->opcode))) {
2663 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2664 return;
2665 }
2666 }
2667 gen_set_access_type(ctx, ACCESS_INT);
2668 EA = tcg_temp_new();
2669 gen_addr_imm_index(ctx, EA, 0x03);
2670 if (ctx->opcode & 0x02) {
2671 /* lwa (lwau is undefined) */
2672 gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2673 } else {
2674 /* ld - ldu */
2675 gen_qemu_ld64_i64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2676 }
2677 if (Rc(ctx->opcode))
2678 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2679 tcg_temp_free(EA);
2680 }
2681
2682 /* lq */
2683 static void gen_lq(DisasContext *ctx)
2684 {
2685 int ra, rd;
2686 TCGv EA;
2687
2688 /* lq is a legal user mode instruction starting in ISA 2.07 */
2689 bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2690 bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2691
2692 if (!legal_in_user_mode && ctx->pr) {
2693 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2694 return;
2695 }
2696
2697 if (!le_is_supported && ctx->le_mode) {
2698 gen_align_no_le(ctx);
2699 return;
2700 }
2701 ra = rA(ctx->opcode);
2702 rd = rD(ctx->opcode);
2703 if (unlikely((rd & 1) || rd == ra)) {
2704 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2705 return;
2706 }
2707
2708 gen_set_access_type(ctx, ACCESS_INT);
2709 EA = tcg_temp_new();
2710 gen_addr_imm_index(ctx, EA, 0x0F);
2711
2712 /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
2713 necessary 64-bit byteswap already. */
2714 if (unlikely(ctx->le_mode)) {
2715 gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
2716 gen_addr_add(ctx, EA, EA, 8);
2717 gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
2718 } else {
2719 gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
2720 gen_addr_add(ctx, EA, EA, 8);
2721 gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
2722 }
2723 tcg_temp_free(EA);
2724 }
2725 #endif
2726
2727 /*** Integer store ***/
2728 #define GEN_ST(name, stop, opc, type) \
2729 static void glue(gen_, name)(DisasContext *ctx) \
2730 { \
2731 TCGv EA; \
2732 gen_set_access_type(ctx, ACCESS_INT); \
2733 EA = tcg_temp_new(); \
2734 gen_addr_imm_index(ctx, EA, 0); \
2735 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2736 tcg_temp_free(EA); \
2737 }
2738
2739 #define GEN_STU(name, stop, opc, type) \
2740 static void glue(gen_, stop##u)(DisasContext *ctx) \
2741 { \
2742 TCGv EA; \
2743 if (unlikely(rA(ctx->opcode) == 0)) { \
2744 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2745 return; \
2746 } \
2747 gen_set_access_type(ctx, ACCESS_INT); \
2748 EA = tcg_temp_new(); \
2749 if (type == PPC_64B) \
2750 gen_addr_imm_index(ctx, EA, 0x03); \
2751 else \
2752 gen_addr_imm_index(ctx, EA, 0); \
2753 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2754 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2755 tcg_temp_free(EA); \
2756 }
2757
2758 #define GEN_STUX(name, stop, opc2, opc3, type) \
2759 static void glue(gen_, name##ux)(DisasContext *ctx) \
2760 { \
2761 TCGv EA; \
2762 if (unlikely(rA(ctx->opcode) == 0)) { \
2763 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2764 return; \
2765 } \
2766 gen_set_access_type(ctx, ACCESS_INT); \
2767 EA = tcg_temp_new(); \
2768 gen_addr_reg_index(ctx, EA); \
2769 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2770 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2771 tcg_temp_free(EA); \
2772 }
2773
2774 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \
2775 static void glue(gen_, name##x)(DisasContext *ctx) \
2776 { \
2777 TCGv EA; \
2778 chk; \
2779 gen_set_access_type(ctx, ACCESS_INT); \
2780 EA = tcg_temp_new(); \
2781 gen_addr_reg_index(ctx, EA); \
2782 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2783 tcg_temp_free(EA); \
2784 }
2785 #define GEN_STX(name, stop, opc2, opc3, type) \
2786 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE)
2787
2788 #define GEN_STX_HVRM(name, stop, opc2, opc3, type) \
2789 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
2790
2791 #define GEN_STS(name, stop, op, type) \
2792 GEN_ST(name, stop, op | 0x20, type); \
2793 GEN_STU(name, stop, op | 0x21, type); \
2794 GEN_STUX(name, stop, 0x17, op | 0x01, type); \
2795 GEN_STX(name, stop, 0x17, op | 0x00, type)
2796
2797 /* stb stbu stbux stbx */
2798 GEN_STS(stb, st8, 0x06, PPC_INTEGER);
2799 /* sth sthu sthux sthx */
2800 GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
2801 /* stw stwu stwux stwx */
2802 GEN_STS(stw, st32, 0x04, PPC_INTEGER);
2803 #if defined(TARGET_PPC64)
2804 GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B);
2805 GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B);
2806 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
2807 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
2808 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
2809 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
2810
2811 static void gen_std(DisasContext *ctx)
2812 {
2813 int rs;
2814 TCGv EA;
2815
2816 rs = rS(ctx->opcode);
2817 if ((ctx->opcode & 0x3) == 0x2) { /* stq */
2818 bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2819 bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2820
2821 if (!(ctx->insns_flags & PPC_64BX)) {
2822 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2823 }
2824
2825 if (!legal_in_user_mode && ctx->pr) {
2826 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2827 return;
2828 }
2829
2830 if (!le_is_supported && ctx->le_mode) {
2831 gen_align_no_le(ctx);
2832 return;
2833 }
2834
2835 if (unlikely(rs & 1)) {
2836 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2837 return;
2838 }
2839 gen_set_access_type(ctx, ACCESS_INT);
2840 EA = tcg_temp_new();
2841 gen_addr_imm_index(ctx, EA, 0x03);
2842
2843 /* We only need to swap high and low halves. gen_qemu_st64_i64 does
2844 necessary 64-bit byteswap already. */
2845 if (unlikely(ctx->le_mode)) {
2846 gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
2847 gen_addr_add(ctx, EA, EA, 8);
2848 gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
2849 } else {
2850 gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
2851 gen_addr_add(ctx, EA, EA, 8);
2852 gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
2853 }
2854 tcg_temp_free(EA);
2855 } else {
2856 /* std / stdu*/
2857 if (Rc(ctx->opcode)) {
2858 if (unlikely(rA(ctx->opcode) == 0)) {
2859 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2860 return;
2861 }
2862 }
2863 gen_set_access_type(ctx, ACCESS_INT);
2864 EA = tcg_temp_new();
2865 gen_addr_imm_index(ctx, EA, 0x03);
2866 gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
2867 if (Rc(ctx->opcode))
2868 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2869 tcg_temp_free(EA);
2870 }
2871 }
2872 #endif
2873 /*** Integer load and store with byte reverse ***/
2874
2875 /* lhbrx */
2876 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
2877
2878 /* lwbrx */
2879 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
2880
2881 #if defined(TARGET_PPC64)
2882 /* ldbrx */
2883 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
2884 /* stdbrx */
2885 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
2886 #endif /* TARGET_PPC64 */
2887
2888 /* sthbrx */
2889 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
2890 /* stwbrx */
2891 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
2892
2893 /*** Integer load and store multiple ***/
2894
2895 /* lmw */
2896 static void gen_lmw(DisasContext *ctx)
2897 {
2898 TCGv t0;
2899 TCGv_i32 t1;
2900
2901 if (ctx->le_mode) {
2902 gen_align_no_le(ctx);
2903 return;
2904 }
2905 gen_set_access_type(ctx, ACCESS_INT);
2906 t0 = tcg_temp_new();
2907 t1 = tcg_const_i32(rD(ctx->opcode));
2908 gen_addr_imm_index(ctx, t0, 0);
2909 gen_helper_lmw(cpu_env, t0, t1);
2910 tcg_temp_free(t0);
2911 tcg_temp_free_i32(t1);
2912 }
2913
2914 /* stmw */
2915 static void gen_stmw(DisasContext *ctx)
2916 {
2917 TCGv t0;
2918 TCGv_i32 t1;
2919
2920 if (ctx->le_mode) {
2921 gen_align_no_le(ctx);
2922 return;
2923 }
2924 gen_set_access_type(ctx, ACCESS_INT);
2925 t0 = tcg_temp_new();
2926 t1 = tcg_const_i32(rS(ctx->opcode));
2927 gen_addr_imm_index(ctx, t0, 0);
2928 gen_helper_stmw(cpu_env, t0, t1);
2929 tcg_temp_free(t0);
2930 tcg_temp_free_i32(t1);
2931 }
2932
2933 /*** Integer load and store strings ***/
2934
2935 /* lswi */
2936 /* PowerPC32 specification says we must generate an exception if
2937 * rA is in the range of registers to be loaded.
2938 * In an other hand, IBM says this is valid, but rA won't be loaded.
2939 * For now, I'll follow the spec...
2940 */
2941 static void gen_lswi(DisasContext *ctx)
2942 {
2943 TCGv t0;
2944 TCGv_i32 t1, t2;
2945 int nb = NB(ctx->opcode);
2946 int start = rD(ctx->opcode);
2947 int ra = rA(ctx->opcode);
2948 int nr;
2949
2950 if (ctx->le_mode) {
2951 gen_align_no_le(ctx);
2952 return;
2953 }
2954 if (nb == 0)
2955 nb = 32;
2956 nr = (nb + 3) / 4;
2957 if (unlikely(lsw_reg_in_range(start, nr, ra))) {
2958 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
2959 return;
2960 }
2961 gen_set_access_type(ctx, ACCESS_INT);
2962 t0 = tcg_temp_new();
2963 gen_addr_register(ctx, t0);
2964 t1 = tcg_const_i32(nb);
2965 t2 = tcg_const_i32(start);
2966 gen_helper_lsw(cpu_env, t0, t1, t2);
2967 tcg_temp_free(t0);
2968 tcg_temp_free_i32(t1);
2969 tcg_temp_free_i32(t2);
2970 }
2971
2972 /* lswx */
2973 static void gen_lswx(DisasContext *ctx)
2974 {
2975 TCGv t0;
2976 TCGv_i32 t1, t2, t3;
2977
2978 if (ctx->le_mode) {
2979 gen_align_no_le(ctx);
2980 return;
2981 }
2982 gen_set_access_type(ctx, ACCESS_INT);
2983 t0 = tcg_temp_new();
2984 gen_addr_reg_index(ctx, t0);
2985 t1 = tcg_const_i32(rD(ctx->opcode));
2986 t2 = tcg_const_i32(rA(ctx->opcode));
2987 t3 = tcg_const_i32(rB(ctx->opcode));
2988 gen_helper_lswx(cpu_env, t0, t1, t2, t3);
2989 tcg_temp_free(t0);
2990 tcg_temp_free_i32(t1);
2991 tcg_temp_free_i32(t2);
2992 tcg_temp_free_i32(t3);
2993 }
2994
2995 /* stswi */
2996 static void gen_stswi(DisasContext *ctx)
2997 {
2998 TCGv t0;
2999 TCGv_i32 t1, t2;
3000 int nb = NB(ctx->opcode);
3001
3002 if (ctx->le_mode) {
3003 gen_align_no_le(ctx);
3004 return;
3005 }
3006 gen_set_access_type(ctx, ACCESS_INT);
3007 t0 = tcg_temp_new();
3008 gen_addr_register(ctx, t0);
3009 if (nb == 0)
3010 nb = 32;
3011 t1 = tcg_const_i32(nb);
3012 t2 = tcg_const_i32(rS(ctx->opcode));
3013 gen_helper_stsw(cpu_env, t0, t1, t2);
3014 tcg_temp_free(t0);
3015 tcg_temp_free_i32(t1);
3016 tcg_temp_free_i32(t2);
3017 }
3018
3019 /* stswx */
3020 static void gen_stswx(DisasContext *ctx)
3021 {
3022 TCGv t0;
3023 TCGv_i32 t1, t2;
3024
3025 if (ctx->le_mode) {
3026 gen_align_no_le(ctx);
3027 return;
3028 }
3029 gen_set_access_type(ctx, ACCESS_INT);
3030 t0 = tcg_temp_new();
3031 gen_addr_reg_index(ctx, t0);
3032 t1 = tcg_temp_new_i32();
3033 tcg_gen_trunc_tl_i32(t1, cpu_xer);
3034 tcg_gen_andi_i32(t1, t1, 0x7F);
3035 t2 = tcg_const_i32(rS(ctx->opcode));
3036 gen_helper_stsw(cpu_env, t0, t1, t2);
3037 tcg_temp_free(t0);
3038 tcg_temp_free_i32(t1);
3039 tcg_temp_free_i32(t2);
3040 }
3041
3042 /*** Memory synchronisation ***/
3043 /* eieio */
3044 static void gen_eieio(DisasContext *ctx)
3045 {
3046 }
3047
3048 #if !defined(CONFIG_USER_ONLY)
3049 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
3050 {
3051 TCGv_i32 t;
3052 TCGLabel *l;
3053
3054 if (!ctx->lazy_tlb_flush) {
3055 return;
3056 }
3057 l = gen_new_label();
3058 t = tcg_temp_new_i32();
3059 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
3060 tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
3061 if (global) {
3062 gen_helper_check_tlb_flush_global(cpu_env);
3063 } else {
3064 gen_helper_check_tlb_flush_local(cpu_env);
3065 }
3066 gen_set_label(l);
3067 tcg_temp_free_i32(t);
3068 }
3069 #else
3070 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
3071 #endif
3072
3073 /* isync */
3074 static void gen_isync(DisasContext *ctx)
3075 {
3076 /*
3077 * We need to check for a pending TLB flush. This can only happen in
3078 * kernel mode however so check MSR_PR
3079 */
3080 if (!ctx->pr) {
3081 gen_check_tlb_flush(ctx, false);
3082 }
3083 gen_stop_exception(ctx);
3084 }
3085
3086 #define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE))
3087
3088 #define LARX(name, memop) \
3089 static void gen_##name(DisasContext *ctx) \
3090 { \
3091 TCGv t0; \
3092 TCGv gpr = cpu_gpr[rD(ctx->opcode)]; \
3093 int len = MEMOP_GET_SIZE(memop); \
3094 gen_set_access_type(ctx, ACCESS_RES); \
3095 t0 = tcg_temp_local_new(); \
3096 gen_addr_reg_index(ctx, t0); \
3097 if ((len) > 1) { \
3098 gen_check_align(ctx, t0, (len)-1); \
3099 } \
3100 tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop); \
3101 tcg_gen_mov_tl(cpu_reserve, t0); \
3102 tcg_gen_st_tl(gpr, cpu_env, offsetof(CPUPPCState, reserve_val)); \
3103 tcg_temp_free(t0); \
3104 }
3105
3106 /* lwarx */
3107 LARX(lbarx, DEF_MEMOP(MO_UB))
3108 LARX(lharx, DEF_MEMOP(MO_UW))
3109 LARX(lwarx, DEF_MEMOP(MO_UL))
3110
3111 #if defined(CONFIG_USER_ONLY)
3112 static void gen_conditional_store(DisasContext *ctx, TCGv EA,
3113 int reg, int memop)
3114 {
3115 TCGv t0 = tcg_temp_new();
3116
3117 tcg_gen_st_tl(EA, cpu_env, offsetof(CPUPPCState, reserve_ea));
3118 tcg_gen_movi_tl(t0, (MEMOP_GET_SIZE(memop) << 5) | reg);
3119 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, reserve_info));
3120 tcg_temp_free(t0);
3121 gen_exception_err(ctx, POWERPC_EXCP_STCX, 0);
3122 }
3123 #else
3124 static void gen_conditional_store(DisasContext *ctx, TCGv EA,
3125 int reg, int memop)
3126 {
3127 TCGLabel *l1;
3128
3129 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
3130 l1 = gen_new_label();
3131 tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
3132 tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
3133 tcg_gen_qemu_st_tl(cpu_gpr[reg], EA, ctx->mem_idx, memop);
3134 gen_set_label(l1);
3135 tcg_gen_movi_tl(cpu_reserve, -1);
3136 }
3137 #endif
3138
3139 #define STCX(name, memop) \
3140 static void gen_##name(DisasContext *ctx) \
3141 { \
3142 TCGv t0; \
3143 int len = MEMOP_GET_SIZE(memop); \
3144 gen_set_access_type(ctx, ACCESS_RES); \
3145 t0 = tcg_temp_local_new(); \
3146 gen_addr_reg_index(ctx, t0); \
3147 if (len > 1) { \
3148 gen_check_align(ctx, t0, (len) - 1); \
3149 } \
3150 gen_conditional_store(ctx, t0, rS(ctx->opcode), memop); \
3151 tcg_temp_free(t0); \
3152 }
3153
3154 STCX(stbcx_, DEF_MEMOP(MO_UB))
3155 STCX(sthcx_, DEF_MEMOP(MO_UW))
3156 STCX(stwcx_, DEF_MEMOP(MO_UL))
3157
3158 #if defined(TARGET_PPC64)
3159 /* ldarx */
3160 LARX(ldarx, DEF_MEMOP(MO_Q))
3161 /* stdcx. */
3162 STCX(stdcx_, DEF_MEMOP(MO_Q))
3163
3164 /* lqarx */
3165 static void gen_lqarx(DisasContext *ctx)
3166 {
3167 TCGv EA;
3168 int rd =