vmsvga: don't process more than 1024 fifo commands at once
[qemu.git] / target-ppc / translate.c
1 /*
2 * PowerPC emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/host-utils.h"
27 #include "exec/cpu_ldst.h"
28
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34
35
36 #define CPU_SINGLE_STEP 0x1
37 #define CPU_BRANCH_STEP 0x2
38 #define GDBSTUB_SINGLE_STEP 0x4
39
40 /* Include definitions for instructions classes and implementations flags */
41 //#define PPC_DEBUG_DISAS
42 //#define DO_PPC_STATISTICS
43
44 #ifdef PPC_DEBUG_DISAS
45 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
46 #else
47 # define LOG_DISAS(...) do { } while (0)
48 #endif
49 /*****************************************************************************/
50 /* Code translation helpers */
51
52 /* global register indexes */
53 static TCGv_env cpu_env;
54 static char cpu_reg_names[10*3 + 22*4 /* GPR */
55 + 10*4 + 22*5 /* SPE GPRh */
56 + 10*4 + 22*5 /* FPR */
57 + 2*(10*6 + 22*7) /* AVRh, AVRl */
58 + 10*5 + 22*6 /* VSR */
59 + 8*5 /* CRF */];
60 static TCGv cpu_gpr[32];
61 static TCGv cpu_gprh[32];
62 static TCGv_i64 cpu_fpr[32];
63 static TCGv_i64 cpu_avrh[32], cpu_avrl[32];
64 static TCGv_i64 cpu_vsr[32];
65 static TCGv_i32 cpu_crf[8];
66 static TCGv cpu_nip;
67 static TCGv cpu_msr;
68 static TCGv cpu_ctr;
69 static TCGv cpu_lr;
70 #if defined(TARGET_PPC64)
71 static TCGv cpu_cfar;
72 #endif
73 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca;
74 static TCGv cpu_reserve;
75 static TCGv cpu_fpscr;
76 static TCGv_i32 cpu_access_type;
77
78 #include "exec/gen-icount.h"
79
80 void ppc_translate_init(void)
81 {
82 int i;
83 char* p;
84 size_t cpu_reg_names_size;
85 static int done_init = 0;
86
87 if (done_init)
88 return;
89
90 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
91
92 p = cpu_reg_names;
93 cpu_reg_names_size = sizeof(cpu_reg_names);
94
95 for (i = 0; i < 8; i++) {
96 snprintf(p, cpu_reg_names_size, "crf%d", i);
97 cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
98 offsetof(CPUPPCState, crf[i]), p);
99 p += 5;
100 cpu_reg_names_size -= 5;
101 }
102
103 for (i = 0; i < 32; i++) {
104 snprintf(p, cpu_reg_names_size, "r%d", i);
105 cpu_gpr[i] = tcg_global_mem_new(cpu_env,
106 offsetof(CPUPPCState, gpr[i]), p);
107 p += (i < 10) ? 3 : 4;
108 cpu_reg_names_size -= (i < 10) ? 3 : 4;
109 snprintf(p, cpu_reg_names_size, "r%dH", i);
110 cpu_gprh[i] = tcg_global_mem_new(cpu_env,
111 offsetof(CPUPPCState, gprh[i]), p);
112 p += (i < 10) ? 4 : 5;
113 cpu_reg_names_size -= (i < 10) ? 4 : 5;
114
115 snprintf(p, cpu_reg_names_size, "fp%d", i);
116 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
117 offsetof(CPUPPCState, fpr[i]), p);
118 p += (i < 10) ? 4 : 5;
119 cpu_reg_names_size -= (i < 10) ? 4 : 5;
120
121 snprintf(p, cpu_reg_names_size, "avr%dH", i);
122 #ifdef HOST_WORDS_BIGENDIAN
123 cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
124 offsetof(CPUPPCState, avr[i].u64[0]), p);
125 #else
126 cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
127 offsetof(CPUPPCState, avr[i].u64[1]), p);
128 #endif
129 p += (i < 10) ? 6 : 7;
130 cpu_reg_names_size -= (i < 10) ? 6 : 7;
131
132 snprintf(p, cpu_reg_names_size, "avr%dL", i);
133 #ifdef HOST_WORDS_BIGENDIAN
134 cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
135 offsetof(CPUPPCState, avr[i].u64[1]), p);
136 #else
137 cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
138 offsetof(CPUPPCState, avr[i].u64[0]), p);
139 #endif
140 p += (i < 10) ? 6 : 7;
141 cpu_reg_names_size -= (i < 10) ? 6 : 7;
142 snprintf(p, cpu_reg_names_size, "vsr%d", i);
143 cpu_vsr[i] = tcg_global_mem_new_i64(cpu_env,
144 offsetof(CPUPPCState, vsr[i]), p);
145 p += (i < 10) ? 5 : 6;
146 cpu_reg_names_size -= (i < 10) ? 5 : 6;
147 }
148
149 cpu_nip = tcg_global_mem_new(cpu_env,
150 offsetof(CPUPPCState, nip), "nip");
151
152 cpu_msr = tcg_global_mem_new(cpu_env,
153 offsetof(CPUPPCState, msr), "msr");
154
155 cpu_ctr = tcg_global_mem_new(cpu_env,
156 offsetof(CPUPPCState, ctr), "ctr");
157
158 cpu_lr = tcg_global_mem_new(cpu_env,
159 offsetof(CPUPPCState, lr), "lr");
160
161 #if defined(TARGET_PPC64)
162 cpu_cfar = tcg_global_mem_new(cpu_env,
163 offsetof(CPUPPCState, cfar), "cfar");
164 #endif
165
166 cpu_xer = tcg_global_mem_new(cpu_env,
167 offsetof(CPUPPCState, xer), "xer");
168 cpu_so = tcg_global_mem_new(cpu_env,
169 offsetof(CPUPPCState, so), "SO");
170 cpu_ov = tcg_global_mem_new(cpu_env,
171 offsetof(CPUPPCState, ov), "OV");
172 cpu_ca = tcg_global_mem_new(cpu_env,
173 offsetof(CPUPPCState, ca), "CA");
174
175 cpu_reserve = tcg_global_mem_new(cpu_env,
176 offsetof(CPUPPCState, reserve_addr),
177 "reserve_addr");
178
179 cpu_fpscr = tcg_global_mem_new(cpu_env,
180 offsetof(CPUPPCState, fpscr), "fpscr");
181
182 cpu_access_type = tcg_global_mem_new_i32(cpu_env,
183 offsetof(CPUPPCState, access_type), "access_type");
184
185 done_init = 1;
186 }
187
188 /* internal defines */
189 struct DisasContext {
190 struct TranslationBlock *tb;
191 target_ulong nip;
192 uint32_t opcode;
193 uint32_t exception;
194 /* Routine used to access memory */
195 bool pr, hv;
196 int mem_idx;
197 int access_type;
198 /* Translation flags */
199 int le_mode;
200 TCGMemOp default_tcg_memop_mask;
201 #if defined(TARGET_PPC64)
202 int sf_mode;
203 int has_cfar;
204 #endif
205 int fpu_enabled;
206 int altivec_enabled;
207 int vsx_enabled;
208 int spe_enabled;
209 int tm_enabled;
210 ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
211 int singlestep_enabled;
212 uint64_t insns_flags;
213 uint64_t insns_flags2;
214 };
215
216 /* Return true iff byteswap is needed in a scalar memop */
217 static inline bool need_byteswap(const DisasContext *ctx)
218 {
219 #if defined(TARGET_WORDS_BIGENDIAN)
220 return ctx->le_mode;
221 #else
222 return !ctx->le_mode;
223 #endif
224 }
225
226 /* True when active word size < size of target_long. */
227 #ifdef TARGET_PPC64
228 # define NARROW_MODE(C) (!(C)->sf_mode)
229 #else
230 # define NARROW_MODE(C) 0
231 #endif
232
233 struct opc_handler_t {
234 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
235 uint32_t inval1;
236 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
237 uint32_t inval2;
238 /* instruction type */
239 uint64_t type;
240 /* extended instruction type */
241 uint64_t type2;
242 /* handler */
243 void (*handler)(DisasContext *ctx);
244 #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
245 const char *oname;
246 #endif
247 #if defined(DO_PPC_STATISTICS)
248 uint64_t count;
249 #endif
250 };
251
252 static inline void gen_reset_fpstatus(void)
253 {
254 gen_helper_reset_fpstatus(cpu_env);
255 }
256
257 static inline void gen_compute_fprf(TCGv_i64 arg)
258 {
259 gen_helper_compute_fprf(cpu_env, arg);
260 gen_helper_float_check_status(cpu_env);
261 }
262
263 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
264 {
265 if (ctx->access_type != access_type) {
266 tcg_gen_movi_i32(cpu_access_type, access_type);
267 ctx->access_type = access_type;
268 }
269 }
270
271 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
272 {
273 if (NARROW_MODE(ctx)) {
274 nip = (uint32_t)nip;
275 }
276 tcg_gen_movi_tl(cpu_nip, nip);
277 }
278
279 void gen_update_current_nip(void *opaque)
280 {
281 DisasContext *ctx = opaque;
282
283 tcg_gen_movi_tl(cpu_nip, ctx->nip);
284 }
285
286 static inline void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
287 {
288 TCGv_i32 t0, t1;
289 if (ctx->exception == POWERPC_EXCP_NONE) {
290 gen_update_nip(ctx, ctx->nip);
291 }
292 t0 = tcg_const_i32(excp);
293 t1 = tcg_const_i32(error);
294 gen_helper_raise_exception_err(cpu_env, t0, t1);
295 tcg_temp_free_i32(t0);
296 tcg_temp_free_i32(t1);
297 ctx->exception = (excp);
298 }
299
300 static inline void gen_exception(DisasContext *ctx, uint32_t excp)
301 {
302 TCGv_i32 t0;
303 if (ctx->exception == POWERPC_EXCP_NONE) {
304 gen_update_nip(ctx, ctx->nip);
305 }
306 t0 = tcg_const_i32(excp);
307 gen_helper_raise_exception(cpu_env, t0);
308 tcg_temp_free_i32(t0);
309 ctx->exception = (excp);
310 }
311
312 static inline void gen_debug_exception(DisasContext *ctx)
313 {
314 TCGv_i32 t0;
315
316 if ((ctx->exception != POWERPC_EXCP_BRANCH) &&
317 (ctx->exception != POWERPC_EXCP_SYNC)) {
318 gen_update_nip(ctx, ctx->nip);
319 }
320 t0 = tcg_const_i32(EXCP_DEBUG);
321 gen_helper_raise_exception(cpu_env, t0);
322 tcg_temp_free_i32(t0);
323 }
324
325 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
326 {
327 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | error);
328 }
329
330 /* Stop translation */
331 static inline void gen_stop_exception(DisasContext *ctx)
332 {
333 gen_update_nip(ctx, ctx->nip);
334 ctx->exception = POWERPC_EXCP_STOP;
335 }
336
337 #ifndef CONFIG_USER_ONLY
338 /* No need to update nip here, as execution flow will change */
339 static inline void gen_sync_exception(DisasContext *ctx)
340 {
341 ctx->exception = POWERPC_EXCP_SYNC;
342 }
343 #endif
344
345 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
346 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
347
348 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
349 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
350
351 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
352 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
353
354 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
355 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
356
357 typedef struct opcode_t {
358 unsigned char opc1, opc2, opc3;
359 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
360 unsigned char pad[5];
361 #else
362 unsigned char pad[1];
363 #endif
364 opc_handler_t handler;
365 const char *oname;
366 } opcode_t;
367
368 /*****************************************************************************/
369 /*** Instruction decoding ***/
370 #define EXTRACT_HELPER(name, shift, nb) \
371 static inline uint32_t name(uint32_t opcode) \
372 { \
373 return (opcode >> (shift)) & ((1 << (nb)) - 1); \
374 }
375
376 #define EXTRACT_SHELPER(name, shift, nb) \
377 static inline int32_t name(uint32_t opcode) \
378 { \
379 return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
380 }
381
382 #define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \
383 static inline uint32_t name(uint32_t opcode) \
384 { \
385 return (((opcode >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
386 ((opcode >> (shift2)) & ((1 << (nb2)) - 1)); \
387 }
388 /* Opcode part 1 */
389 EXTRACT_HELPER(opc1, 26, 6);
390 /* Opcode part 2 */
391 EXTRACT_HELPER(opc2, 1, 5);
392 /* Opcode part 3 */
393 EXTRACT_HELPER(opc3, 6, 5);
394 /* Update Cr0 flags */
395 EXTRACT_HELPER(Rc, 0, 1);
396 /* Update Cr6 flags (Altivec) */
397 EXTRACT_HELPER(Rc21, 10, 1);
398 /* Destination */
399 EXTRACT_HELPER(rD, 21, 5);
400 /* Source */
401 EXTRACT_HELPER(rS, 21, 5);
402 /* First operand */
403 EXTRACT_HELPER(rA, 16, 5);
404 /* Second operand */
405 EXTRACT_HELPER(rB, 11, 5);
406 /* Third operand */
407 EXTRACT_HELPER(rC, 6, 5);
408 /*** Get CRn ***/
409 EXTRACT_HELPER(crfD, 23, 3);
410 EXTRACT_HELPER(crfS, 18, 3);
411 EXTRACT_HELPER(crbD, 21, 5);
412 EXTRACT_HELPER(crbA, 16, 5);
413 EXTRACT_HELPER(crbB, 11, 5);
414 /* SPR / TBL */
415 EXTRACT_HELPER(_SPR, 11, 10);
416 static inline uint32_t SPR(uint32_t opcode)
417 {
418 uint32_t sprn = _SPR(opcode);
419
420 return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
421 }
422 /*** Get constants ***/
423 /* 16 bits signed immediate value */
424 EXTRACT_SHELPER(SIMM, 0, 16);
425 /* 16 bits unsigned immediate value */
426 EXTRACT_HELPER(UIMM, 0, 16);
427 /* 5 bits signed immediate value */
428 EXTRACT_HELPER(SIMM5, 16, 5);
429 /* 5 bits signed immediate value */
430 EXTRACT_HELPER(UIMM5, 16, 5);
431 /* Bit count */
432 EXTRACT_HELPER(NB, 11, 5);
433 /* Shift count */
434 EXTRACT_HELPER(SH, 11, 5);
435 /* Vector shift count */
436 EXTRACT_HELPER(VSH, 6, 4);
437 /* Mask start */
438 EXTRACT_HELPER(MB, 6, 5);
439 /* Mask end */
440 EXTRACT_HELPER(ME, 1, 5);
441 /* Trap operand */
442 EXTRACT_HELPER(TO, 21, 5);
443
444 EXTRACT_HELPER(CRM, 12, 8);
445
446 #ifndef CONFIG_USER_ONLY
447 EXTRACT_HELPER(SR, 16, 4);
448 #endif
449
450 /* mtfsf/mtfsfi */
451 EXTRACT_HELPER(FPBF, 23, 3);
452 EXTRACT_HELPER(FPIMM, 12, 4);
453 EXTRACT_HELPER(FPL, 25, 1);
454 EXTRACT_HELPER(FPFLM, 17, 8);
455 EXTRACT_HELPER(FPW, 16, 1);
456
457 /*** Jump target decoding ***/
458 /* Immediate address */
459 static inline target_ulong LI(uint32_t opcode)
460 {
461 return (opcode >> 0) & 0x03FFFFFC;
462 }
463
464 static inline uint32_t BD(uint32_t opcode)
465 {
466 return (opcode >> 0) & 0xFFFC;
467 }
468
469 EXTRACT_HELPER(BO, 21, 5);
470 EXTRACT_HELPER(BI, 16, 5);
471 /* Absolute/relative address */
472 EXTRACT_HELPER(AA, 1, 1);
473 /* Link */
474 EXTRACT_HELPER(LK, 0, 1);
475
476 /* DFP Z22-form */
477 EXTRACT_HELPER(DCM, 10, 6)
478
479 /* DFP Z23-form */
480 EXTRACT_HELPER(RMC, 9, 2)
481
482 /* Create a mask between <start> and <end> bits */
483 static inline target_ulong MASK(uint32_t start, uint32_t end)
484 {
485 target_ulong ret;
486
487 #if defined(TARGET_PPC64)
488 if (likely(start == 0)) {
489 ret = UINT64_MAX << (63 - end);
490 } else if (likely(end == 63)) {
491 ret = UINT64_MAX >> start;
492 }
493 #else
494 if (likely(start == 0)) {
495 ret = UINT32_MAX << (31 - end);
496 } else if (likely(end == 31)) {
497 ret = UINT32_MAX >> start;
498 }
499 #endif
500 else {
501 ret = (((target_ulong)(-1ULL)) >> (start)) ^
502 (((target_ulong)(-1ULL) >> (end)) >> 1);
503 if (unlikely(start > end))
504 return ~ret;
505 }
506
507 return ret;
508 }
509
510 EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5);
511 EXTRACT_HELPER_SPLIT(xS, 0, 1, 21, 5);
512 EXTRACT_HELPER_SPLIT(xA, 2, 1, 16, 5);
513 EXTRACT_HELPER_SPLIT(xB, 1, 1, 11, 5);
514 EXTRACT_HELPER_SPLIT(xC, 3, 1, 6, 5);
515 EXTRACT_HELPER(DM, 8, 2);
516 EXTRACT_HELPER(UIM, 16, 2);
517 EXTRACT_HELPER(SHW, 8, 2);
518 EXTRACT_HELPER(SP, 19, 2);
519 /*****************************************************************************/
520 /* PowerPC instructions table */
521
522 #if defined(DO_PPC_STATISTICS)
523 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
524 { \
525 .opc1 = op1, \
526 .opc2 = op2, \
527 .opc3 = op3, \
528 .pad = { 0, }, \
529 .handler = { \
530 .inval1 = invl, \
531 .type = _typ, \
532 .type2 = _typ2, \
533 .handler = &gen_##name, \
534 .oname = stringify(name), \
535 }, \
536 .oname = stringify(name), \
537 }
538 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
539 { \
540 .opc1 = op1, \
541 .opc2 = op2, \
542 .opc3 = op3, \
543 .pad = { 0, }, \
544 .handler = { \
545 .inval1 = invl1, \
546 .inval2 = invl2, \
547 .type = _typ, \
548 .type2 = _typ2, \
549 .handler = &gen_##name, \
550 .oname = stringify(name), \
551 }, \
552 .oname = stringify(name), \
553 }
554 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
555 { \
556 .opc1 = op1, \
557 .opc2 = op2, \
558 .opc3 = op3, \
559 .pad = { 0, }, \
560 .handler = { \
561 .inval1 = invl, \
562 .type = _typ, \
563 .type2 = _typ2, \
564 .handler = &gen_##name, \
565 .oname = onam, \
566 }, \
567 .oname = onam, \
568 }
569 #else
570 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
571 { \
572 .opc1 = op1, \
573 .opc2 = op2, \
574 .opc3 = op3, \
575 .pad = { 0, }, \
576 .handler = { \
577 .inval1 = invl, \
578 .type = _typ, \
579 .type2 = _typ2, \
580 .handler = &gen_##name, \
581 }, \
582 .oname = stringify(name), \
583 }
584 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
585 { \
586 .opc1 = op1, \
587 .opc2 = op2, \
588 .opc3 = op3, \
589 .pad = { 0, }, \
590 .handler = { \
591 .inval1 = invl1, \
592 .inval2 = invl2, \
593 .type = _typ, \
594 .type2 = _typ2, \
595 .handler = &gen_##name, \
596 }, \
597 .oname = stringify(name), \
598 }
599 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
600 { \
601 .opc1 = op1, \
602 .opc2 = op2, \
603 .opc3 = op3, \
604 .pad = { 0, }, \
605 .handler = { \
606 .inval1 = invl, \
607 .type = _typ, \
608 .type2 = _typ2, \
609 .handler = &gen_##name, \
610 }, \
611 .oname = onam, \
612 }
613 #endif
614
615 /* SPR load/store helpers */
616 static inline void gen_load_spr(TCGv t, int reg)
617 {
618 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
619 }
620
621 static inline void gen_store_spr(int reg, TCGv t)
622 {
623 tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
624 }
625
626 /* Invalid instruction */
627 static void gen_invalid(DisasContext *ctx)
628 {
629 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
630 }
631
632 static opc_handler_t invalid_handler = {
633 .inval1 = 0xFFFFFFFF,
634 .inval2 = 0xFFFFFFFF,
635 .type = PPC_NONE,
636 .type2 = PPC_NONE,
637 .handler = gen_invalid,
638 };
639
640 /*** Integer comparison ***/
641
642 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
643 {
644 TCGv t0 = tcg_temp_new();
645 TCGv_i32 t1 = tcg_temp_new_i32();
646
647 tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
648
649 tcg_gen_setcond_tl((s ? TCG_COND_LT: TCG_COND_LTU), t0, arg0, arg1);
650 tcg_gen_trunc_tl_i32(t1, t0);
651 tcg_gen_shli_i32(t1, t1, CRF_LT);
652 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
653
654 tcg_gen_setcond_tl((s ? TCG_COND_GT: TCG_COND_GTU), t0, arg0, arg1);
655 tcg_gen_trunc_tl_i32(t1, t0);
656 tcg_gen_shli_i32(t1, t1, CRF_GT);
657 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
658
659 tcg_gen_setcond_tl(TCG_COND_EQ, t0, arg0, arg1);
660 tcg_gen_trunc_tl_i32(t1, t0);
661 tcg_gen_shli_i32(t1, t1, CRF_EQ);
662 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
663
664 tcg_temp_free(t0);
665 tcg_temp_free_i32(t1);
666 }
667
668 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
669 {
670 TCGv t0 = tcg_const_tl(arg1);
671 gen_op_cmp(arg0, t0, s, crf);
672 tcg_temp_free(t0);
673 }
674
675 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
676 {
677 TCGv t0, t1;
678 t0 = tcg_temp_new();
679 t1 = tcg_temp_new();
680 if (s) {
681 tcg_gen_ext32s_tl(t0, arg0);
682 tcg_gen_ext32s_tl(t1, arg1);
683 } else {
684 tcg_gen_ext32u_tl(t0, arg0);
685 tcg_gen_ext32u_tl(t1, arg1);
686 }
687 gen_op_cmp(t0, t1, s, crf);
688 tcg_temp_free(t1);
689 tcg_temp_free(t0);
690 }
691
692 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
693 {
694 TCGv t0 = tcg_const_tl(arg1);
695 gen_op_cmp32(arg0, t0, s, crf);
696 tcg_temp_free(t0);
697 }
698
699 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
700 {
701 if (NARROW_MODE(ctx)) {
702 gen_op_cmpi32(reg, 0, 1, 0);
703 } else {
704 gen_op_cmpi(reg, 0, 1, 0);
705 }
706 }
707
708 /* cmp */
709 static void gen_cmp(DisasContext *ctx)
710 {
711 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
712 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
713 1, crfD(ctx->opcode));
714 } else {
715 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
716 1, crfD(ctx->opcode));
717 }
718 }
719
720 /* cmpi */
721 static void gen_cmpi(DisasContext *ctx)
722 {
723 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
724 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
725 1, crfD(ctx->opcode));
726 } else {
727 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
728 1, crfD(ctx->opcode));
729 }
730 }
731
732 /* cmpl */
733 static void gen_cmpl(DisasContext *ctx)
734 {
735 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
736 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
737 0, crfD(ctx->opcode));
738 } else {
739 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
740 0, crfD(ctx->opcode));
741 }
742 }
743
744 /* cmpli */
745 static void gen_cmpli(DisasContext *ctx)
746 {
747 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
748 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
749 0, crfD(ctx->opcode));
750 } else {
751 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
752 0, crfD(ctx->opcode));
753 }
754 }
755
756 /* isel (PowerPC 2.03 specification) */
757 static void gen_isel(DisasContext *ctx)
758 {
759 uint32_t bi = rC(ctx->opcode);
760 uint32_t mask = 0x08 >> (bi & 0x03);
761 TCGv t0 = tcg_temp_new();
762 TCGv zr;
763
764 tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
765 tcg_gen_andi_tl(t0, t0, mask);
766
767 zr = tcg_const_tl(0);
768 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
769 rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
770 cpu_gpr[rB(ctx->opcode)]);
771 tcg_temp_free(zr);
772 tcg_temp_free(t0);
773 }
774
775 /* cmpb: PowerPC 2.05 specification */
776 static void gen_cmpb(DisasContext *ctx)
777 {
778 gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
779 cpu_gpr[rB(ctx->opcode)]);
780 }
781
782 /*** Integer arithmetic ***/
783
784 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
785 TCGv arg1, TCGv arg2, int sub)
786 {
787 TCGv t0 = tcg_temp_new();
788
789 tcg_gen_xor_tl(cpu_ov, arg0, arg2);
790 tcg_gen_xor_tl(t0, arg1, arg2);
791 if (sub) {
792 tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
793 } else {
794 tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
795 }
796 tcg_temp_free(t0);
797 if (NARROW_MODE(ctx)) {
798 tcg_gen_ext32s_tl(cpu_ov, cpu_ov);
799 }
800 tcg_gen_shri_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1);
801 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
802 }
803
804 /* Common add function */
805 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
806 TCGv arg2, bool add_ca, bool compute_ca,
807 bool compute_ov, bool compute_rc0)
808 {
809 TCGv t0 = ret;
810
811 if (compute_ca || compute_ov) {
812 t0 = tcg_temp_new();
813 }
814
815 if (compute_ca) {
816 if (NARROW_MODE(ctx)) {
817 /* Caution: a non-obvious corner case of the spec is that we
818 must produce the *entire* 64-bit addition, but produce the
819 carry into bit 32. */
820 TCGv t1 = tcg_temp_new();
821 tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */
822 tcg_gen_add_tl(t0, arg1, arg2);
823 if (add_ca) {
824 tcg_gen_add_tl(t0, t0, cpu_ca);
825 }
826 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changed w/ carry */
827 tcg_temp_free(t1);
828 tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
829 tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
830 } else {
831 TCGv zero = tcg_const_tl(0);
832 if (add_ca) {
833 tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, cpu_ca, zero);
834 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, arg2, zero);
835 } else {
836 tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, arg2, zero);
837 }
838 tcg_temp_free(zero);
839 }
840 } else {
841 tcg_gen_add_tl(t0, arg1, arg2);
842 if (add_ca) {
843 tcg_gen_add_tl(t0, t0, cpu_ca);
844 }
845 }
846
847 if (compute_ov) {
848 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
849 }
850 if (unlikely(compute_rc0)) {
851 gen_set_Rc0(ctx, t0);
852 }
853
854 if (!TCGV_EQUAL(t0, ret)) {
855 tcg_gen_mov_tl(ret, t0);
856 tcg_temp_free(t0);
857 }
858 }
859 /* Add functions with two operands */
860 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
861 static void glue(gen_, name)(DisasContext *ctx) \
862 { \
863 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
864 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
865 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
866 }
867 /* Add functions with one operand and one immediate */
868 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
869 add_ca, compute_ca, compute_ov) \
870 static void glue(gen_, name)(DisasContext *ctx) \
871 { \
872 TCGv t0 = tcg_const_tl(const_val); \
873 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
874 cpu_gpr[rA(ctx->opcode)], t0, \
875 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
876 tcg_temp_free(t0); \
877 }
878
879 /* add add. addo addo. */
880 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
881 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
882 /* addc addc. addco addco. */
883 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
884 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
885 /* adde adde. addeo addeo. */
886 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
887 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
888 /* addme addme. addmeo addmeo. */
889 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
890 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
891 /* addze addze. addzeo addzeo.*/
892 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
893 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
894 /* addi */
895 static void gen_addi(DisasContext *ctx)
896 {
897 target_long simm = SIMM(ctx->opcode);
898
899 if (rA(ctx->opcode) == 0) {
900 /* li case */
901 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm);
902 } else {
903 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
904 cpu_gpr[rA(ctx->opcode)], simm);
905 }
906 }
907 /* addic addic.*/
908 static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
909 {
910 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
911 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
912 c, 0, 1, 0, compute_rc0);
913 tcg_temp_free(c);
914 }
915
916 static void gen_addic(DisasContext *ctx)
917 {
918 gen_op_addic(ctx, 0);
919 }
920
921 static void gen_addic_(DisasContext *ctx)
922 {
923 gen_op_addic(ctx, 1);
924 }
925
926 /* addis */
927 static void gen_addis(DisasContext *ctx)
928 {
929 target_long simm = SIMM(ctx->opcode);
930
931 if (rA(ctx->opcode) == 0) {
932 /* lis case */
933 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm << 16);
934 } else {
935 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
936 cpu_gpr[rA(ctx->opcode)], simm << 16);
937 }
938 }
939
940 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
941 TCGv arg2, int sign, int compute_ov)
942 {
943 TCGLabel *l1 = gen_new_label();
944 TCGLabel *l2 = gen_new_label();
945 TCGv_i32 t0 = tcg_temp_local_new_i32();
946 TCGv_i32 t1 = tcg_temp_local_new_i32();
947
948 tcg_gen_trunc_tl_i32(t0, arg1);
949 tcg_gen_trunc_tl_i32(t1, arg2);
950 tcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1);
951 if (sign) {
952 TCGLabel *l3 = gen_new_label();
953 tcg_gen_brcondi_i32(TCG_COND_NE, t1, -1, l3);
954 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, INT32_MIN, l1);
955 gen_set_label(l3);
956 tcg_gen_div_i32(t0, t0, t1);
957 } else {
958 tcg_gen_divu_i32(t0, t0, t1);
959 }
960 if (compute_ov) {
961 tcg_gen_movi_tl(cpu_ov, 0);
962 }
963 tcg_gen_br(l2);
964 gen_set_label(l1);
965 if (sign) {
966 tcg_gen_sari_i32(t0, t0, 31);
967 } else {
968 tcg_gen_movi_i32(t0, 0);
969 }
970 if (compute_ov) {
971 tcg_gen_movi_tl(cpu_ov, 1);
972 tcg_gen_movi_tl(cpu_so, 1);
973 }
974 gen_set_label(l2);
975 tcg_gen_extu_i32_tl(ret, t0);
976 tcg_temp_free_i32(t0);
977 tcg_temp_free_i32(t1);
978 if (unlikely(Rc(ctx->opcode) != 0))
979 gen_set_Rc0(ctx, ret);
980 }
981 /* Div functions */
982 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
983 static void glue(gen_, name)(DisasContext *ctx) \
984 { \
985 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
986 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
987 sign, compute_ov); \
988 }
989 /* divwu divwu. divwuo divwuo. */
990 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
991 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
992 /* divw divw. divwo divwo. */
993 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
994 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
995
996 /* div[wd]eu[o][.] */
997 #define GEN_DIVE(name, hlpr, compute_ov) \
998 static void gen_##name(DisasContext *ctx) \
999 { \
1000 TCGv_i32 t0 = tcg_const_i32(compute_ov); \
1001 gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
1002 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1003 tcg_temp_free_i32(t0); \
1004 if (unlikely(Rc(ctx->opcode) != 0)) { \
1005 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
1006 } \
1007 }
1008
1009 GEN_DIVE(divweu, divweu, 0);
1010 GEN_DIVE(divweuo, divweu, 1);
1011 GEN_DIVE(divwe, divwe, 0);
1012 GEN_DIVE(divweo, divwe, 1);
1013
1014 #if defined(TARGET_PPC64)
1015 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
1016 TCGv arg2, int sign, int compute_ov)
1017 {
1018 TCGLabel *l1 = gen_new_label();
1019 TCGLabel *l2 = gen_new_label();
1020
1021 tcg_gen_brcondi_i64(TCG_COND_EQ, arg2, 0, l1);
1022 if (sign) {
1023 TCGLabel *l3 = gen_new_label();
1024 tcg_gen_brcondi_i64(TCG_COND_NE, arg2, -1, l3);
1025 tcg_gen_brcondi_i64(TCG_COND_EQ, arg1, INT64_MIN, l1);
1026 gen_set_label(l3);
1027 tcg_gen_div_i64(ret, arg1, arg2);
1028 } else {
1029 tcg_gen_divu_i64(ret, arg1, arg2);
1030 }
1031 if (compute_ov) {
1032 tcg_gen_movi_tl(cpu_ov, 0);
1033 }
1034 tcg_gen_br(l2);
1035 gen_set_label(l1);
1036 if (sign) {
1037 tcg_gen_sari_i64(ret, arg1, 63);
1038 } else {
1039 tcg_gen_movi_i64(ret, 0);
1040 }
1041 if (compute_ov) {
1042 tcg_gen_movi_tl(cpu_ov, 1);
1043 tcg_gen_movi_tl(cpu_so, 1);
1044 }
1045 gen_set_label(l2);
1046 if (unlikely(Rc(ctx->opcode) != 0))
1047 gen_set_Rc0(ctx, ret);
1048 }
1049 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1050 static void glue(gen_, name)(DisasContext *ctx) \
1051 { \
1052 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1053 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1054 sign, compute_ov); \
1055 }
1056 /* divwu divwu. divwuo divwuo. */
1057 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1058 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1059 /* divw divw. divwo divwo. */
1060 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1061 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1062
1063 GEN_DIVE(divdeu, divdeu, 0);
1064 GEN_DIVE(divdeuo, divdeu, 1);
1065 GEN_DIVE(divde, divde, 0);
1066 GEN_DIVE(divdeo, divde, 1);
1067 #endif
1068
1069 /* mulhw mulhw. */
1070 static void gen_mulhw(DisasContext *ctx)
1071 {
1072 TCGv_i32 t0 = tcg_temp_new_i32();
1073 TCGv_i32 t1 = tcg_temp_new_i32();
1074
1075 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1076 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1077 tcg_gen_muls2_i32(t0, t1, t0, t1);
1078 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1079 tcg_temp_free_i32(t0);
1080 tcg_temp_free_i32(t1);
1081 if (unlikely(Rc(ctx->opcode) != 0))
1082 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1083 }
1084
1085 /* mulhwu mulhwu. */
1086 static void gen_mulhwu(DisasContext *ctx)
1087 {
1088 TCGv_i32 t0 = tcg_temp_new_i32();
1089 TCGv_i32 t1 = tcg_temp_new_i32();
1090
1091 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1092 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1093 tcg_gen_mulu2_i32(t0, t1, t0, t1);
1094 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1095 tcg_temp_free_i32(t0);
1096 tcg_temp_free_i32(t1);
1097 if (unlikely(Rc(ctx->opcode) != 0))
1098 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1099 }
1100
1101 /* mullw mullw. */
1102 static void gen_mullw(DisasContext *ctx)
1103 {
1104 #if defined(TARGET_PPC64)
1105 TCGv_i64 t0, t1;
1106 t0 = tcg_temp_new_i64();
1107 t1 = tcg_temp_new_i64();
1108 tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
1109 tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
1110 tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1111 tcg_temp_free(t0);
1112 tcg_temp_free(t1);
1113 #else
1114 tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1115 cpu_gpr[rB(ctx->opcode)]);
1116 #endif
1117 if (unlikely(Rc(ctx->opcode) != 0))
1118 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1119 }
1120
1121 /* mullwo mullwo. */
1122 static void gen_mullwo(DisasContext *ctx)
1123 {
1124 TCGv_i32 t0 = tcg_temp_new_i32();
1125 TCGv_i32 t1 = tcg_temp_new_i32();
1126
1127 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1128 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1129 tcg_gen_muls2_i32(t0, t1, t0, t1);
1130 #if defined(TARGET_PPC64)
1131 tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1132 #else
1133 tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
1134 #endif
1135
1136 tcg_gen_sari_i32(t0, t0, 31);
1137 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
1138 tcg_gen_extu_i32_tl(cpu_ov, t0);
1139 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1140
1141 tcg_temp_free_i32(t0);
1142 tcg_temp_free_i32(t1);
1143 if (unlikely(Rc(ctx->opcode) != 0))
1144 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1145 }
1146
1147 /* mulli */
1148 static void gen_mulli(DisasContext *ctx)
1149 {
1150 tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1151 SIMM(ctx->opcode));
1152 }
1153
1154 #if defined(TARGET_PPC64)
1155 /* mulhd mulhd. */
1156 static void gen_mulhd(DisasContext *ctx)
1157 {
1158 TCGv lo = tcg_temp_new();
1159 tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1160 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1161 tcg_temp_free(lo);
1162 if (unlikely(Rc(ctx->opcode) != 0)) {
1163 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1164 }
1165 }
1166
1167 /* mulhdu mulhdu. */
1168 static void gen_mulhdu(DisasContext *ctx)
1169 {
1170 TCGv lo = tcg_temp_new();
1171 tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1172 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1173 tcg_temp_free(lo);
1174 if (unlikely(Rc(ctx->opcode) != 0)) {
1175 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1176 }
1177 }
1178
1179 /* mulld mulld. */
1180 static void gen_mulld(DisasContext *ctx)
1181 {
1182 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1183 cpu_gpr[rB(ctx->opcode)]);
1184 if (unlikely(Rc(ctx->opcode) != 0))
1185 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1186 }
1187
1188 /* mulldo mulldo. */
1189 static void gen_mulldo(DisasContext *ctx)
1190 {
1191 TCGv_i64 t0 = tcg_temp_new_i64();
1192 TCGv_i64 t1 = tcg_temp_new_i64();
1193
1194 tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
1195 cpu_gpr[rB(ctx->opcode)]);
1196 tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
1197
1198 tcg_gen_sari_i64(t0, t0, 63);
1199 tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
1200 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1201
1202 tcg_temp_free_i64(t0);
1203 tcg_temp_free_i64(t1);
1204
1205 if (unlikely(Rc(ctx->opcode) != 0)) {
1206 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1207 }
1208 }
1209 #endif
1210
1211 /* Common subf function */
1212 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
1213 TCGv arg2, bool add_ca, bool compute_ca,
1214 bool compute_ov, bool compute_rc0)
1215 {
1216 TCGv t0 = ret;
1217
1218 if (compute_ca || compute_ov) {
1219 t0 = tcg_temp_new();
1220 }
1221
1222 if (compute_ca) {
1223 /* dest = ~arg1 + arg2 [+ ca]. */
1224 if (NARROW_MODE(ctx)) {
1225 /* Caution: a non-obvious corner case of the spec is that we
1226 must produce the *entire* 64-bit addition, but produce the
1227 carry into bit 32. */
1228 TCGv inv1 = tcg_temp_new();
1229 TCGv t1 = tcg_temp_new();
1230 tcg_gen_not_tl(inv1, arg1);
1231 if (add_ca) {
1232 tcg_gen_add_tl(t0, arg2, cpu_ca);
1233 } else {
1234 tcg_gen_addi_tl(t0, arg2, 1);
1235 }
1236 tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */
1237 tcg_gen_add_tl(t0, t0, inv1);
1238 tcg_temp_free(inv1);
1239 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */
1240 tcg_temp_free(t1);
1241 tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
1242 tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
1243 } else if (add_ca) {
1244 TCGv zero, inv1 = tcg_temp_new();
1245 tcg_gen_not_tl(inv1, arg1);
1246 zero = tcg_const_tl(0);
1247 tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
1248 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
1249 tcg_temp_free(zero);
1250 tcg_temp_free(inv1);
1251 } else {
1252 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
1253 tcg_gen_sub_tl(t0, arg2, arg1);
1254 }
1255 } else if (add_ca) {
1256 /* Since we're ignoring carry-out, we can simplify the
1257 standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */
1258 tcg_gen_sub_tl(t0, arg2, arg1);
1259 tcg_gen_add_tl(t0, t0, cpu_ca);
1260 tcg_gen_subi_tl(t0, t0, 1);
1261 } else {
1262 tcg_gen_sub_tl(t0, arg2, arg1);
1263 }
1264
1265 if (compute_ov) {
1266 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
1267 }
1268 if (unlikely(compute_rc0)) {
1269 gen_set_Rc0(ctx, t0);
1270 }
1271
1272 if (!TCGV_EQUAL(t0, ret)) {
1273 tcg_gen_mov_tl(ret, t0);
1274 tcg_temp_free(t0);
1275 }
1276 }
1277 /* Sub functions with Two operands functions */
1278 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
1279 static void glue(gen_, name)(DisasContext *ctx) \
1280 { \
1281 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1282 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1283 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1284 }
1285 /* Sub functions with one operand and one immediate */
1286 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
1287 add_ca, compute_ca, compute_ov) \
1288 static void glue(gen_, name)(DisasContext *ctx) \
1289 { \
1290 TCGv t0 = tcg_const_tl(const_val); \
1291 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1292 cpu_gpr[rA(ctx->opcode)], t0, \
1293 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1294 tcg_temp_free(t0); \
1295 }
1296 /* subf subf. subfo subfo. */
1297 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
1298 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
1299 /* subfc subfc. subfco subfco. */
1300 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
1301 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
1302 /* subfe subfe. subfeo subfo. */
1303 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
1304 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
1305 /* subfme subfme. subfmeo subfmeo. */
1306 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
1307 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
1308 /* subfze subfze. subfzeo subfzeo.*/
1309 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
1310 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
1311
1312 /* subfic */
1313 static void gen_subfic(DisasContext *ctx)
1314 {
1315 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
1316 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1317 c, 0, 1, 0, 0);
1318 tcg_temp_free(c);
1319 }
1320
1321 /* neg neg. nego nego. */
1322 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
1323 {
1324 TCGv zero = tcg_const_tl(0);
1325 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1326 zero, 0, 0, compute_ov, Rc(ctx->opcode));
1327 tcg_temp_free(zero);
1328 }
1329
1330 static void gen_neg(DisasContext *ctx)
1331 {
1332 gen_op_arith_neg(ctx, 0);
1333 }
1334
1335 static void gen_nego(DisasContext *ctx)
1336 {
1337 gen_op_arith_neg(ctx, 1);
1338 }
1339
1340 /*** Integer logical ***/
1341 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
1342 static void glue(gen_, name)(DisasContext *ctx) \
1343 { \
1344 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
1345 cpu_gpr[rB(ctx->opcode)]); \
1346 if (unlikely(Rc(ctx->opcode) != 0)) \
1347 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1348 }
1349
1350 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
1351 static void glue(gen_, name)(DisasContext *ctx) \
1352 { \
1353 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
1354 if (unlikely(Rc(ctx->opcode) != 0)) \
1355 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1356 }
1357
1358 /* and & and. */
1359 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
1360 /* andc & andc. */
1361 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
1362
1363 /* andi. */
1364 static void gen_andi_(DisasContext *ctx)
1365 {
1366 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode));
1367 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1368 }
1369
1370 /* andis. */
1371 static void gen_andis_(DisasContext *ctx)
1372 {
1373 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16);
1374 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1375 }
1376
1377 /* cntlzw */
1378 static void gen_cntlzw(DisasContext *ctx)
1379 {
1380 gen_helper_cntlzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1381 if (unlikely(Rc(ctx->opcode) != 0))
1382 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1383 }
1384 /* eqv & eqv. */
1385 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
1386 /* extsb & extsb. */
1387 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
1388 /* extsh & extsh. */
1389 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
1390 /* nand & nand. */
1391 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
1392 /* nor & nor. */
1393 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
1394
1395 #if defined(TARGET_PPC64)
1396 static void gen_pause(DisasContext *ctx)
1397 {
1398 TCGv_i32 t0 = tcg_const_i32(0);
1399 tcg_gen_st_i32(t0, cpu_env,
1400 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
1401 tcg_temp_free_i32(t0);
1402
1403 /* Stop translation, this gives other CPUs a chance to run */
1404 gen_exception_err(ctx, EXCP_HLT, 1);
1405 }
1406 #endif /* defined(TARGET_PPC64) */
1407
1408 /* or & or. */
1409 static void gen_or(DisasContext *ctx)
1410 {
1411 int rs, ra, rb;
1412
1413 rs = rS(ctx->opcode);
1414 ra = rA(ctx->opcode);
1415 rb = rB(ctx->opcode);
1416 /* Optimisation for mr. ri case */
1417 if (rs != ra || rs != rb) {
1418 if (rs != rb)
1419 tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
1420 else
1421 tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
1422 if (unlikely(Rc(ctx->opcode) != 0))
1423 gen_set_Rc0(ctx, cpu_gpr[ra]);
1424 } else if (unlikely(Rc(ctx->opcode) != 0)) {
1425 gen_set_Rc0(ctx, cpu_gpr[rs]);
1426 #if defined(TARGET_PPC64)
1427 } else {
1428 int prio = 0;
1429
1430 switch (rs) {
1431 case 1:
1432 /* Set process priority to low */
1433 prio = 2;
1434 break;
1435 case 6:
1436 /* Set process priority to medium-low */
1437 prio = 3;
1438 break;
1439 case 2:
1440 /* Set process priority to normal */
1441 prio = 4;
1442 break;
1443 #if !defined(CONFIG_USER_ONLY)
1444 case 31:
1445 if (!ctx->pr) {
1446 /* Set process priority to very low */
1447 prio = 1;
1448 }
1449 break;
1450 case 5:
1451 if (!ctx->pr) {
1452 /* Set process priority to medium-hight */
1453 prio = 5;
1454 }
1455 break;
1456 case 3:
1457 if (!ctx->pr) {
1458 /* Set process priority to high */
1459 prio = 6;
1460 }
1461 break;
1462 case 7:
1463 if (ctx->hv && !ctx->pr) {
1464 /* Set process priority to very high */
1465 prio = 7;
1466 }
1467 break;
1468 #endif
1469 default:
1470 /* nop */
1471 break;
1472 }
1473 if (prio) {
1474 TCGv t0 = tcg_temp_new();
1475 gen_load_spr(t0, SPR_PPR);
1476 tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
1477 tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
1478 gen_store_spr(SPR_PPR, t0);
1479 tcg_temp_free(t0);
1480 /* Pause us out of TCG otherwise spin loops with smt_low
1481 * eat too much CPU and the kernel hangs
1482 */
1483 gen_pause(ctx);
1484 }
1485 #endif
1486 }
1487 }
1488 /* orc & orc. */
1489 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
1490
1491 /* xor & xor. */
1492 static void gen_xor(DisasContext *ctx)
1493 {
1494 /* Optimisation for "set to zero" case */
1495 if (rS(ctx->opcode) != rB(ctx->opcode))
1496 tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1497 else
1498 tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
1499 if (unlikely(Rc(ctx->opcode) != 0))
1500 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1501 }
1502
1503 /* ori */
1504 static void gen_ori(DisasContext *ctx)
1505 {
1506 target_ulong uimm = UIMM(ctx->opcode);
1507
1508 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1509 return;
1510 }
1511 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1512 }
1513
1514 /* oris */
1515 static void gen_oris(DisasContext *ctx)
1516 {
1517 target_ulong uimm = UIMM(ctx->opcode);
1518
1519 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1520 /* NOP */
1521 return;
1522 }
1523 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1524 }
1525
1526 /* xori */
1527 static void gen_xori(DisasContext *ctx)
1528 {
1529 target_ulong uimm = UIMM(ctx->opcode);
1530
1531 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1532 /* NOP */
1533 return;
1534 }
1535 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1536 }
1537
1538 /* xoris */
1539 static void gen_xoris(DisasContext *ctx)
1540 {
1541 target_ulong uimm = UIMM(ctx->opcode);
1542
1543 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1544 /* NOP */
1545 return;
1546 }
1547 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1548 }
1549
1550 /* popcntb : PowerPC 2.03 specification */
1551 static void gen_popcntb(DisasContext *ctx)
1552 {
1553 gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1554 }
1555
1556 static void gen_popcntw(DisasContext *ctx)
1557 {
1558 gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1559 }
1560
1561 #if defined(TARGET_PPC64)
1562 /* popcntd: PowerPC 2.06 specification */
1563 static void gen_popcntd(DisasContext *ctx)
1564 {
1565 gen_helper_popcntd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1566 }
1567 #endif
1568
1569 /* prtyw: PowerPC 2.05 specification */
1570 static void gen_prtyw(DisasContext *ctx)
1571 {
1572 TCGv ra = cpu_gpr[rA(ctx->opcode)];
1573 TCGv rs = cpu_gpr[rS(ctx->opcode)];
1574 TCGv t0 = tcg_temp_new();
1575 tcg_gen_shri_tl(t0, rs, 16);
1576 tcg_gen_xor_tl(ra, rs, t0);
1577 tcg_gen_shri_tl(t0, ra, 8);
1578 tcg_gen_xor_tl(ra, ra, t0);
1579 tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
1580 tcg_temp_free(t0);
1581 }
1582
1583 #if defined(TARGET_PPC64)
1584 /* prtyd: PowerPC 2.05 specification */
1585 static void gen_prtyd(DisasContext *ctx)
1586 {
1587 TCGv ra = cpu_gpr[rA(ctx->opcode)];
1588 TCGv rs = cpu_gpr[rS(ctx->opcode)];
1589 TCGv t0 = tcg_temp_new();
1590 tcg_gen_shri_tl(t0, rs, 32);
1591 tcg_gen_xor_tl(ra, rs, t0);
1592 tcg_gen_shri_tl(t0, ra, 16);
1593 tcg_gen_xor_tl(ra, ra, t0);
1594 tcg_gen_shri_tl(t0, ra, 8);
1595 tcg_gen_xor_tl(ra, ra, t0);
1596 tcg_gen_andi_tl(ra, ra, 1);
1597 tcg_temp_free(t0);
1598 }
1599 #endif
1600
1601 #if defined(TARGET_PPC64)
1602 /* bpermd */
1603 static void gen_bpermd(DisasContext *ctx)
1604 {
1605 gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
1606 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1607 }
1608 #endif
1609
1610 #if defined(TARGET_PPC64)
1611 /* extsw & extsw. */
1612 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
1613
1614 /* cntlzd */
1615 static void gen_cntlzd(DisasContext *ctx)
1616 {
1617 gen_helper_cntlzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1618 if (unlikely(Rc(ctx->opcode) != 0))
1619 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1620 }
1621 #endif
1622
1623 /*** Integer rotate ***/
1624
1625 /* rlwimi & rlwimi. */
1626 static void gen_rlwimi(DisasContext *ctx)
1627 {
1628 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1629 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1630 uint32_t sh = SH(ctx->opcode);
1631 uint32_t mb = MB(ctx->opcode);
1632 uint32_t me = ME(ctx->opcode);
1633
1634 if (sh == (31-me) && mb <= me) {
1635 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
1636 } else {
1637 target_ulong mask;
1638 TCGv_i32 t0;
1639 TCGv t1;
1640
1641 #if defined(TARGET_PPC64)
1642 mb += 32;
1643 me += 32;
1644 #endif
1645 mask = MASK(mb, me);
1646
1647 t0 = tcg_temp_new_i32();
1648 t1 = tcg_temp_new();
1649 tcg_gen_trunc_tl_i32(t0, t_rs);
1650 tcg_gen_rotli_i32(t0, t0, sh);
1651 tcg_gen_extu_i32_tl(t1, t0);
1652 tcg_temp_free_i32(t0);
1653
1654 tcg_gen_andi_tl(t1, t1, mask);
1655 tcg_gen_andi_tl(t_ra, t_ra, ~mask);
1656 tcg_gen_or_tl(t_ra, t_ra, t1);
1657 tcg_temp_free(t1);
1658 }
1659 if (unlikely(Rc(ctx->opcode) != 0)) {
1660 gen_set_Rc0(ctx, t_ra);
1661 }
1662 }
1663
1664 /* rlwinm & rlwinm. */
1665 static void gen_rlwinm(DisasContext *ctx)
1666 {
1667 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1668 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1669 uint32_t sh = SH(ctx->opcode);
1670 uint32_t mb = MB(ctx->opcode);
1671 uint32_t me = ME(ctx->opcode);
1672
1673 if (mb == 0 && me == (31 - sh)) {
1674 tcg_gen_shli_tl(t_ra, t_rs, sh);
1675 tcg_gen_ext32u_tl(t_ra, t_ra);
1676 } else if (sh != 0 && me == 31 && sh == (32 - mb)) {
1677 tcg_gen_ext32u_tl(t_ra, t_rs);
1678 tcg_gen_shri_tl(t_ra, t_ra, mb);
1679 } else {
1680 #if defined(TARGET_PPC64)
1681 mb += 32;
1682 me += 32;
1683 #endif
1684 if (sh == 0) {
1685 tcg_gen_andi_tl(t_ra, t_rs, MASK(mb, me));
1686 } else {
1687 TCGv_i32 t0 = tcg_temp_new_i32();
1688
1689 tcg_gen_trunc_tl_i32(t0, t_rs);
1690 tcg_gen_rotli_i32(t0, t0, sh);
1691 tcg_gen_andi_i32(t0, t0, MASK(mb, me));
1692 tcg_gen_extu_i32_tl(t_ra, t0);
1693 tcg_temp_free_i32(t0);
1694 }
1695 }
1696 if (unlikely(Rc(ctx->opcode) != 0)) {
1697 gen_set_Rc0(ctx, t_ra);
1698 }
1699 }
1700
1701 /* rlwnm & rlwnm. */
1702 static void gen_rlwnm(DisasContext *ctx)
1703 {
1704 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1705 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1706 TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
1707 uint32_t mb = MB(ctx->opcode);
1708 uint32_t me = ME(ctx->opcode);
1709 TCGv_i32 t0, t1;
1710
1711 #if defined(TARGET_PPC64)
1712 mb += 32;
1713 me += 32;
1714 #endif
1715
1716 t0 = tcg_temp_new_i32();
1717 t1 = tcg_temp_new_i32();
1718 tcg_gen_trunc_tl_i32(t0, t_rb);
1719 tcg_gen_trunc_tl_i32(t1, t_rs);
1720 tcg_gen_andi_i32(t0, t0, 0x1f);
1721 tcg_gen_rotl_i32(t1, t1, t0);
1722 tcg_temp_free_i32(t0);
1723
1724 tcg_gen_andi_i32(t1, t1, MASK(mb, me));
1725 tcg_gen_extu_i32_tl(t_ra, t1);
1726 tcg_temp_free_i32(t1);
1727
1728 if (unlikely(Rc(ctx->opcode) != 0)) {
1729 gen_set_Rc0(ctx, t_ra);
1730 }
1731 }
1732
1733 #if defined(TARGET_PPC64)
1734 #define GEN_PPC64_R2(name, opc1, opc2) \
1735 static void glue(gen_, name##0)(DisasContext *ctx) \
1736 { \
1737 gen_##name(ctx, 0); \
1738 } \
1739 \
1740 static void glue(gen_, name##1)(DisasContext *ctx) \
1741 { \
1742 gen_##name(ctx, 1); \
1743 }
1744 #define GEN_PPC64_R4(name, opc1, opc2) \
1745 static void glue(gen_, name##0)(DisasContext *ctx) \
1746 { \
1747 gen_##name(ctx, 0, 0); \
1748 } \
1749 \
1750 static void glue(gen_, name##1)(DisasContext *ctx) \
1751 { \
1752 gen_##name(ctx, 0, 1); \
1753 } \
1754 \
1755 static void glue(gen_, name##2)(DisasContext *ctx) \
1756 { \
1757 gen_##name(ctx, 1, 0); \
1758 } \
1759 \
1760 static void glue(gen_, name##3)(DisasContext *ctx) \
1761 { \
1762 gen_##name(ctx, 1, 1); \
1763 }
1764
1765 static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
1766 {
1767 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1768 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1769
1770 if (sh != 0 && mb == 0 && me == (63 - sh)) {
1771 tcg_gen_shli_tl(t_ra, t_rs, sh);
1772 } else if (sh != 0 && me == 63 && sh == (64 - mb)) {
1773 tcg_gen_shri_tl(t_ra, t_rs, mb);
1774 } else {
1775 tcg_gen_rotli_tl(t_ra, t_rs, sh);
1776 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
1777 }
1778 if (unlikely(Rc(ctx->opcode) != 0)) {
1779 gen_set_Rc0(ctx, t_ra);
1780 }
1781 }
1782
1783 /* rldicl - rldicl. */
1784 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
1785 {
1786 uint32_t sh, mb;
1787
1788 sh = SH(ctx->opcode) | (shn << 5);
1789 mb = MB(ctx->opcode) | (mbn << 5);
1790 gen_rldinm(ctx, mb, 63, sh);
1791 }
1792 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
1793
1794 /* rldicr - rldicr. */
1795 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
1796 {
1797 uint32_t sh, me;
1798
1799 sh = SH(ctx->opcode) | (shn << 5);
1800 me = MB(ctx->opcode) | (men << 5);
1801 gen_rldinm(ctx, 0, me, sh);
1802 }
1803 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
1804
1805 /* rldic - rldic. */
1806 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
1807 {
1808 uint32_t sh, mb;
1809
1810 sh = SH(ctx->opcode) | (shn << 5);
1811 mb = MB(ctx->opcode) | (mbn << 5);
1812 gen_rldinm(ctx, mb, 63 - sh, sh);
1813 }
1814 GEN_PPC64_R4(rldic, 0x1E, 0x04);
1815
1816 static void gen_rldnm(DisasContext *ctx, int mb, int me)
1817 {
1818 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1819 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1820 TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
1821 TCGv t0;
1822
1823 t0 = tcg_temp_new();
1824 tcg_gen_andi_tl(t0, t_rb, 0x3f);
1825 tcg_gen_rotl_tl(t_ra, t_rs, t0);
1826 tcg_temp_free(t0);
1827
1828 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
1829 if (unlikely(Rc(ctx->opcode) != 0)) {
1830 gen_set_Rc0(ctx, t_ra);
1831 }
1832 }
1833
1834 /* rldcl - rldcl. */
1835 static inline void gen_rldcl(DisasContext *ctx, int mbn)
1836 {
1837 uint32_t mb;
1838
1839 mb = MB(ctx->opcode) | (mbn << 5);
1840 gen_rldnm(ctx, mb, 63);
1841 }
1842 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
1843
1844 /* rldcr - rldcr. */
1845 static inline void gen_rldcr(DisasContext *ctx, int men)
1846 {
1847 uint32_t me;
1848
1849 me = MB(ctx->opcode) | (men << 5);
1850 gen_rldnm(ctx, 0, me);
1851 }
1852 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
1853
1854 /* rldimi - rldimi. */
1855 static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
1856 {
1857 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1858 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1859 uint32_t sh = SH(ctx->opcode) | (shn << 5);
1860 uint32_t mb = MB(ctx->opcode) | (mbn << 5);
1861 uint32_t me = 63 - sh;
1862
1863 if (mb <= me) {
1864 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
1865 } else {
1866 target_ulong mask = MASK(mb, me);
1867 TCGv t1 = tcg_temp_new();
1868
1869 tcg_gen_rotli_tl(t1, t_rs, sh);
1870 tcg_gen_andi_tl(t1, t1, mask);
1871 tcg_gen_andi_tl(t_ra, t_ra, ~mask);
1872 tcg_gen_or_tl(t_ra, t_ra, t1);
1873 tcg_temp_free(t1);
1874 }
1875 if (unlikely(Rc(ctx->opcode) != 0)) {
1876 gen_set_Rc0(ctx, t_ra);
1877 }
1878 }
1879 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
1880 #endif
1881
1882 /*** Integer shift ***/
1883
1884 /* slw & slw. */
1885 static void gen_slw(DisasContext *ctx)
1886 {
1887 TCGv t0, t1;
1888
1889 t0 = tcg_temp_new();
1890 /* AND rS with a mask that is 0 when rB >= 0x20 */
1891 #if defined(TARGET_PPC64)
1892 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
1893 tcg_gen_sari_tl(t0, t0, 0x3f);
1894 #else
1895 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
1896 tcg_gen_sari_tl(t0, t0, 0x1f);
1897 #endif
1898 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1899 t1 = tcg_temp_new();
1900 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
1901 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1902 tcg_temp_free(t1);
1903 tcg_temp_free(t0);
1904 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
1905 if (unlikely(Rc(ctx->opcode) != 0))
1906 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1907 }
1908
1909 /* sraw & sraw. */
1910 static void gen_sraw(DisasContext *ctx)
1911 {
1912 gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
1913 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1914 if (unlikely(Rc(ctx->opcode) != 0))
1915 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1916 }
1917
1918 /* srawi & srawi. */
1919 static void gen_srawi(DisasContext *ctx)
1920 {
1921 int sh = SH(ctx->opcode);
1922 TCGv dst = cpu_gpr[rA(ctx->opcode)];
1923 TCGv src = cpu_gpr[rS(ctx->opcode)];
1924 if (sh == 0) {
1925 tcg_gen_ext32s_tl(dst, src);
1926 tcg_gen_movi_tl(cpu_ca, 0);
1927 } else {
1928 TCGv t0;
1929 tcg_gen_ext32s_tl(dst, src);
1930 tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
1931 t0 = tcg_temp_new();
1932 tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
1933 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
1934 tcg_temp_free(t0);
1935 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
1936 tcg_gen_sari_tl(dst, dst, sh);
1937 }
1938 if (unlikely(Rc(ctx->opcode) != 0)) {
1939 gen_set_Rc0(ctx, dst);
1940 }
1941 }
1942
1943 /* srw & srw. */
1944 static void gen_srw(DisasContext *ctx)
1945 {
1946 TCGv t0, t1;
1947
1948 t0 = tcg_temp_new();
1949 /* AND rS with a mask that is 0 when rB >= 0x20 */
1950 #if defined(TARGET_PPC64)
1951 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
1952 tcg_gen_sari_tl(t0, t0, 0x3f);
1953 #else
1954 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
1955 tcg_gen_sari_tl(t0, t0, 0x1f);
1956 #endif
1957 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1958 tcg_gen_ext32u_tl(t0, t0);
1959 t1 = tcg_temp_new();
1960 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
1961 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1962 tcg_temp_free(t1);
1963 tcg_temp_free(t0);
1964 if (unlikely(Rc(ctx->opcode) != 0))
1965 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1966 }
1967
1968 #if defined(TARGET_PPC64)
1969 /* sld & sld. */
1970 static void gen_sld(DisasContext *ctx)
1971 {
1972 TCGv t0, t1;
1973
1974 t0 = tcg_temp_new();
1975 /* AND rS with a mask that is 0 when rB >= 0x40 */
1976 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
1977 tcg_gen_sari_tl(t0, t0, 0x3f);
1978 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1979 t1 = tcg_temp_new();
1980 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
1981 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1982 tcg_temp_free(t1);
1983 tcg_temp_free(t0);
1984 if (unlikely(Rc(ctx->opcode) != 0))
1985 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1986 }
1987
1988 /* srad & srad. */
1989 static void gen_srad(DisasContext *ctx)
1990 {
1991 gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
1992 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1993 if (unlikely(Rc(ctx->opcode) != 0))
1994 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1995 }
1996 /* sradi & sradi. */
1997 static inline void gen_sradi(DisasContext *ctx, int n)
1998 {
1999 int sh = SH(ctx->opcode) + (n << 5);
2000 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2001 TCGv src = cpu_gpr[rS(ctx->opcode)];
2002 if (sh == 0) {
2003 tcg_gen_mov_tl(dst, src);
2004 tcg_gen_movi_tl(cpu_ca, 0);
2005 } else {
2006 TCGv t0;
2007 tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
2008 t0 = tcg_temp_new();
2009 tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
2010 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2011 tcg_temp_free(t0);
2012 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2013 tcg_gen_sari_tl(dst, src, sh);
2014 }
2015 if (unlikely(Rc(ctx->opcode) != 0)) {
2016 gen_set_Rc0(ctx, dst);
2017 }
2018 }
2019
2020 static void gen_sradi0(DisasContext *ctx)
2021 {
2022 gen_sradi(ctx, 0);
2023 }
2024
2025 static void gen_sradi1(DisasContext *ctx)
2026 {
2027 gen_sradi(ctx, 1);
2028 }
2029
2030 /* srd & srd. */
2031 static void gen_srd(DisasContext *ctx)
2032 {
2033 TCGv t0, t1;
2034
2035 t0 = tcg_temp_new();
2036 /* AND rS with a mask that is 0 when rB >= 0x40 */
2037 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2038 tcg_gen_sari_tl(t0, t0, 0x3f);
2039 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2040 t1 = tcg_temp_new();
2041 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2042 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2043 tcg_temp_free(t1);
2044 tcg_temp_free(t0);
2045 if (unlikely(Rc(ctx->opcode) != 0))
2046 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2047 }
2048 #endif
2049
2050 #if defined(TARGET_PPC64)
2051 static void gen_set_cr1_from_fpscr(DisasContext *ctx)
2052 {
2053 TCGv_i32 tmp = tcg_temp_new_i32();
2054 tcg_gen_trunc_tl_i32(tmp, cpu_fpscr);
2055 tcg_gen_shri_i32(cpu_crf[1], tmp, 28);
2056 tcg_temp_free_i32(tmp);
2057 }
2058 #else
2059 static void gen_set_cr1_from_fpscr(DisasContext *ctx)
2060 {
2061 tcg_gen_shri_tl(cpu_crf[1], cpu_fpscr, 28);
2062 }
2063 #endif
2064
2065 /*** Floating-Point arithmetic ***/
2066 #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
2067 static void gen_f##name(DisasContext *ctx) \
2068 { \
2069 if (unlikely(!ctx->fpu_enabled)) { \
2070 gen_exception(ctx, POWERPC_EXCP_FPU); \
2071 return; \
2072 } \
2073 /* NIP cannot be restored if the memory exception comes from an helper */ \
2074 gen_update_nip(ctx, ctx->nip - 4); \
2075 gen_reset_fpstatus(); \
2076 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2077 cpu_fpr[rA(ctx->opcode)], \
2078 cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
2079 if (isfloat) { \
2080 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2081 cpu_fpr[rD(ctx->opcode)]); \
2082 } \
2083 if (set_fprf) { \
2084 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2085 } \
2086 if (unlikely(Rc(ctx->opcode) != 0)) { \
2087 gen_set_cr1_from_fpscr(ctx); \
2088 } \
2089 }
2090
2091 #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
2092 _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
2093 _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
2094
2095 #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2096 static void gen_f##name(DisasContext *ctx) \
2097 { \
2098 if (unlikely(!ctx->fpu_enabled)) { \
2099 gen_exception(ctx, POWERPC_EXCP_FPU); \
2100 return; \
2101 } \
2102 /* NIP cannot be restored if the memory exception comes from an helper */ \
2103 gen_update_nip(ctx, ctx->nip - 4); \
2104 gen_reset_fpstatus(); \
2105 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2106 cpu_fpr[rA(ctx->opcode)], \
2107 cpu_fpr[rB(ctx->opcode)]); \
2108 if (isfloat) { \
2109 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2110 cpu_fpr[rD(ctx->opcode)]); \
2111 } \
2112 if (set_fprf) { \
2113 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2114 } \
2115 if (unlikely(Rc(ctx->opcode) != 0)) { \
2116 gen_set_cr1_from_fpscr(ctx); \
2117 } \
2118 }
2119 #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
2120 _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2121 _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2122
2123 #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2124 static void gen_f##name(DisasContext *ctx) \
2125 { \
2126 if (unlikely(!ctx->fpu_enabled)) { \
2127 gen_exception(ctx, POWERPC_EXCP_FPU); \
2128 return; \
2129 } \
2130 /* NIP cannot be restored if the memory exception comes from an helper */ \
2131 gen_update_nip(ctx, ctx->nip - 4); \
2132 gen_reset_fpstatus(); \
2133 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2134 cpu_fpr[rA(ctx->opcode)], \
2135 cpu_fpr[rC(ctx->opcode)]); \
2136 if (isfloat) { \
2137 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2138 cpu_fpr[rD(ctx->opcode)]); \
2139 } \
2140 if (set_fprf) { \
2141 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2142 } \
2143 if (unlikely(Rc(ctx->opcode) != 0)) { \
2144 gen_set_cr1_from_fpscr(ctx); \
2145 } \
2146 }
2147 #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
2148 _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2149 _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2150
2151 #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
2152 static void gen_f##name(DisasContext *ctx) \
2153 { \
2154 if (unlikely(!ctx->fpu_enabled)) { \
2155 gen_exception(ctx, POWERPC_EXCP_FPU); \
2156 return; \
2157 } \
2158 /* NIP cannot be restored if the memory exception comes from an helper */ \
2159 gen_update_nip(ctx, ctx->nip - 4); \
2160 gen_reset_fpstatus(); \
2161 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2162 cpu_fpr[rB(ctx->opcode)]); \
2163 if (set_fprf) { \
2164 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2165 } \
2166 if (unlikely(Rc(ctx->opcode) != 0)) { \
2167 gen_set_cr1_from_fpscr(ctx); \
2168 } \
2169 }
2170
2171 #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
2172 static void gen_f##name(DisasContext *ctx) \
2173 { \
2174 if (unlikely(!ctx->fpu_enabled)) { \
2175 gen_exception(ctx, POWERPC_EXCP_FPU); \
2176 return; \
2177 } \
2178 /* NIP cannot be restored if the memory exception comes from an helper */ \
2179 gen_update_nip(ctx, ctx->nip - 4); \
2180 gen_reset_fpstatus(); \
2181 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2182 cpu_fpr[rB(ctx->opcode)]); \
2183 if (set_fprf) { \
2184 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2185 } \
2186 if (unlikely(Rc(ctx->opcode) != 0)) { \
2187 gen_set_cr1_from_fpscr(ctx); \
2188 } \
2189 }
2190
2191 /* fadd - fadds */
2192 GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT);
2193 /* fdiv - fdivs */
2194 GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT);
2195 /* fmul - fmuls */
2196 GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT);
2197
2198 /* fre */
2199 GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT);
2200
2201 /* fres */
2202 GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES);
2203
2204 /* frsqrte */
2205 GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE);
2206
2207 /* frsqrtes */
2208 static void gen_frsqrtes(DisasContext *ctx)
2209 {
2210 if (unlikely(!ctx->fpu_enabled)) {
2211 gen_exception(ctx, POWERPC_EXCP_FPU);
2212 return;
2213 }
2214 /* NIP cannot be restored if the memory exception comes from an helper */
2215 gen_update_nip(ctx, ctx->nip - 4);
2216 gen_reset_fpstatus();
2217 gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_env,
2218 cpu_fpr[rB(ctx->opcode)]);
2219 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
2220 cpu_fpr[rD(ctx->opcode)]);
2221 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
2222 if (unlikely(Rc(ctx->opcode) != 0)) {
2223 gen_set_cr1_from_fpscr(ctx);
2224 }
2225 }
2226
2227 /* fsel */
2228 _GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
2229 /* fsub - fsubs */
2230 GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
2231 /* Optional: */
2232
2233 /* fsqrt */
2234 static void gen_fsqrt(DisasContext *ctx)
2235 {
2236 if (unlikely(!ctx->fpu_enabled)) {
2237 gen_exception(ctx, POWERPC_EXCP_FPU);
2238 return;
2239 }
2240 /* NIP cannot be restored if the memory exception comes from an helper */
2241 gen_update_nip(ctx, ctx->nip - 4);
2242 gen_reset_fpstatus();
2243 gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
2244 cpu_fpr[rB(ctx->opcode)]);
2245 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
2246 if (unlikely(Rc(ctx->opcode) != 0)) {
2247 gen_set_cr1_from_fpscr(ctx);
2248 }
2249 }
2250
2251 static void gen_fsqrts(DisasContext *ctx)
2252 {
2253 if (unlikely(!ctx->fpu_enabled)) {
2254 gen_exception(ctx, POWERPC_EXCP_FPU);
2255 return;
2256 }
2257 /* NIP cannot be restored if the memory exception comes from an helper */
2258 gen_update_nip(ctx, ctx->nip - 4);
2259 gen_reset_fpstatus();
2260 gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
2261 cpu_fpr[rB(ctx->opcode)]);
2262 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
2263 cpu_fpr[rD(ctx->opcode)]);
2264 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
2265 if (unlikely(Rc(ctx->opcode) != 0)) {
2266 gen_set_cr1_from_fpscr(ctx);
2267 }
2268 }
2269
2270 /*** Floating-Point multiply-and-add ***/
2271 /* fmadd - fmadds */
2272 GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
2273 /* fmsub - fmsubs */
2274 GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT);
2275 /* fnmadd - fnmadds */
2276 GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT);
2277 /* fnmsub - fnmsubs */
2278 GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT);
2279
2280 /*** Floating-Point round & convert ***/
2281 /* fctiw */
2282 GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT);
2283 /* fctiwu */
2284 GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206);
2285 /* fctiwz */
2286 GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT);
2287 /* fctiwuz */
2288 GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206);
2289 /* frsp */
2290 GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT);
2291 /* fcfid */
2292 GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64);
2293 /* fcfids */
2294 GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206);
2295 /* fcfidu */
2296 GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
2297 /* fcfidus */
2298 GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
2299 /* fctid */
2300 GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64);
2301 /* fctidu */
2302 GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206);
2303 /* fctidz */
2304 GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64);
2305 /* fctidu */
2306 GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206);
2307
2308 /* frin */
2309 GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT);
2310 /* friz */
2311 GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT);
2312 /* frip */
2313 GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT);
2314 /* frim */
2315 GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT);
2316
2317 static void gen_ftdiv(DisasContext *ctx)
2318 {
2319 if (unlikely(!ctx->fpu_enabled)) {
2320 gen_exception(ctx, POWERPC_EXCP_FPU);
2321 return;
2322 }
2323 gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
2324 cpu_fpr[rB(ctx->opcode)]);
2325 }
2326
2327 static void gen_ftsqrt(DisasContext *ctx)
2328 {
2329 if (unlikely(!ctx->fpu_enabled)) {
2330 gen_exception(ctx, POWERPC_EXCP_FPU);
2331 return;
2332 }
2333 gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2334 }
2335
2336
2337
2338 /*** Floating-Point compare ***/
2339
2340 /* fcmpo */
2341 static void gen_fcmpo(DisasContext *ctx)
2342 {
2343 TCGv_i32 crf;
2344 if (unlikely(!ctx->fpu_enabled)) {
2345 gen_exception(ctx, POWERPC_EXCP_FPU);
2346 return;
2347 }
2348 /* NIP cannot be restored if the memory exception comes from an helper */
2349 gen_update_nip(ctx, ctx->nip - 4);
2350 gen_reset_fpstatus();
2351 crf = tcg_const_i32(crfD(ctx->opcode));
2352 gen_helper_fcmpo(cpu_env, cpu_fpr[rA(ctx->opcode)],
2353 cpu_fpr[rB(ctx->opcode)], crf);
2354 tcg_temp_free_i32(crf);
2355 gen_helper_float_check_status(cpu_env);
2356 }
2357
2358 /* fcmpu */
2359 static void gen_fcmpu(DisasContext *ctx)
2360 {
2361 TCGv_i32 crf;
2362 if (unlikely(!ctx->fpu_enabled)) {
2363 gen_exception(ctx, POWERPC_EXCP_FPU);
2364 return;
2365 }
2366 /* NIP cannot be restored if the memory exception comes from an helper */
2367 gen_update_nip(ctx, ctx->nip - 4);
2368 gen_reset_fpstatus();
2369 crf = tcg_const_i32(crfD(ctx->opcode));
2370 gen_helper_fcmpu(cpu_env, cpu_fpr[rA(ctx->opcode)],
2371 cpu_fpr[rB(ctx->opcode)], crf);
2372 tcg_temp_free_i32(crf);
2373 gen_helper_float_check_status(cpu_env);
2374 }
2375
2376 /*** Floating-point move ***/
2377 /* fabs */
2378 /* XXX: beware that fabs never checks for NaNs nor update FPSCR */
2379 static void gen_fabs(DisasContext *ctx)
2380 {
2381 if (unlikely(!ctx->fpu_enabled)) {
2382 gen_exception(ctx, POWERPC_EXCP_FPU);
2383 return;
2384 }
2385 tcg_gen_andi_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
2386 ~(1ULL << 63));
2387 if (unlikely(Rc(ctx->opcode))) {
2388 gen_set_cr1_from_fpscr(ctx);
2389 }
2390 }
2391
2392 /* fmr - fmr. */
2393 /* XXX: beware that fmr never checks for NaNs nor update FPSCR */
2394 static void gen_fmr(DisasContext *ctx)
2395 {
2396 if (unlikely(!ctx->fpu_enabled)) {
2397 gen_exception(ctx, POWERPC_EXCP_FPU);
2398 return;
2399 }
2400 tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2401 if (unlikely(Rc(ctx->opcode))) {
2402 gen_set_cr1_from_fpscr(ctx);
2403 }
2404 }
2405
2406 /* fnabs */
2407 /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
2408 static void gen_fnabs(DisasContext *ctx)
2409 {
2410 if (unlikely(!ctx->fpu_enabled)) {
2411 gen_exception(ctx, POWERPC_EXCP_FPU);
2412 return;
2413 }
2414 tcg_gen_ori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
2415 1ULL << 63);
2416 if (unlikely(Rc(ctx->opcode))) {
2417 gen_set_cr1_from_fpscr(ctx);
2418 }
2419 }
2420
2421 /* fneg */
2422 /* XXX: beware that fneg never checks for NaNs nor update FPSCR */
2423 static void gen_fneg(DisasContext *ctx)
2424 {
2425 if (unlikely(!ctx->fpu_enabled)) {
2426 gen_exception(ctx, POWERPC_EXCP_FPU);
2427 return;
2428 }
2429 tcg_gen_xori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
2430 1ULL << 63);
2431 if (unlikely(Rc(ctx->opcode))) {
2432 gen_set_cr1_from_fpscr(ctx);
2433 }
2434 }
2435
2436 /* fcpsgn: PowerPC 2.05 specification */
2437 /* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */
2438 static void gen_fcpsgn(DisasContext *ctx)
2439 {
2440 if (unlikely(!ctx->fpu_enabled)) {
2441 gen_exception(ctx, POWERPC_EXCP_FPU);
2442 return;
2443 }
2444 tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
2445 cpu_fpr[rB(ctx->opcode)], 0, 63);
2446 if (unlikely(Rc(ctx->opcode))) {
2447 gen_set_cr1_from_fpscr(ctx);
2448 }
2449 }
2450
2451 static void gen_fmrgew(DisasContext *ctx)
2452 {
2453 TCGv_i64 b0;
2454 if (unlikely(!ctx->fpu_enabled)) {
2455 gen_exception(ctx, POWERPC_EXCP_FPU);
2456 return;
2457 }
2458 b0 = tcg_temp_new_i64();
2459 tcg_gen_shri_i64(b0, cpu_fpr[rB(ctx->opcode)], 32);
2460 tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
2461 b0, 0, 32);
2462 tcg_temp_free_i64(b0);
2463 }
2464
2465 static void gen_fmrgow(DisasContext *ctx)
2466 {
2467 if (unlikely(!ctx->fpu_enabled)) {
2468 gen_exception(ctx, POWERPC_EXCP_FPU);
2469 return;
2470 }
2471 tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)],
2472 cpu_fpr[rB(ctx->opcode)],
2473 cpu_fpr[rA(ctx->opcode)],
2474 32, 32);
2475 }
2476
2477 /*** Floating-Point status & ctrl register ***/
2478
2479 /* mcrfs */
2480 static void gen_mcrfs(DisasContext *ctx)
2481 {
2482 TCGv tmp = tcg_temp_new();
2483 TCGv_i32 tmask;
2484 TCGv_i64 tnew_fpscr = tcg_temp_new_i64();
2485 int bfa;
2486 int nibble;
2487 int shift;
2488
2489 if (unlikely(!ctx->fpu_enabled)) {
2490 gen_exception(ctx, POWERPC_EXCP_FPU);
2491 return;
2492 }
2493 bfa = crfS(ctx->opcode);
2494 nibble = 7 - bfa;
2495 shift = 4 * nibble;
2496 tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
2497 tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
2498 tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
2499 tcg_temp_free(tmp);
2500 tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
2501 /* Only the exception bits (including FX) should be cleared if read */
2502 tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS));
2503 /* FEX and VX need to be updated, so don't set fpscr directly */
2504 tmask = tcg_const_i32(1 << nibble);
2505 gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
2506 tcg_temp_free_i32(tmask);
2507 tcg_temp_free_i64(tnew_fpscr);
2508 }
2509
2510 /* mffs */
2511 static void gen_mffs(DisasContext *ctx)
2512 {
2513 if (unlikely(!ctx->fpu_enabled)) {
2514 gen_exception(ctx, POWERPC_EXCP_FPU);
2515 return;
2516 }
2517 gen_reset_fpstatus();
2518 tcg_gen_extu_tl_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpscr);
2519 if (unlikely(Rc(ctx->opcode))) {
2520 gen_set_cr1_from_fpscr(ctx);
2521 }
2522 }
2523
2524 /* mtfsb0 */
2525 static void gen_mtfsb0(DisasContext *ctx)
2526 {
2527 uint8_t crb;
2528
2529 if (unlikely(!ctx->fpu_enabled)) {
2530 gen_exception(ctx, POWERPC_EXCP_FPU);
2531 return;
2532 }
2533 crb = 31 - crbD(ctx->opcode);
2534 gen_reset_fpstatus();
2535 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
2536 TCGv_i32 t0;
2537 /* NIP cannot be restored if the memory exception comes from an helper */
2538 gen_update_nip(ctx, ctx->nip - 4);
2539 t0 = tcg_const_i32(crb);
2540 gen_helper_fpscr_clrbit(cpu_env, t0);
2541 tcg_temp_free_i32(t0);
2542 }
2543 if (unlikely(Rc(ctx->opcode) != 0)) {
2544 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2545 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2546 }
2547 }
2548
2549 /* mtfsb1 */
2550 static void gen_mtfsb1(DisasContext *ctx)
2551 {
2552 uint8_t crb;
2553
2554 if (unlikely(!ctx->fpu_enabled)) {
2555 gen_exception(ctx, POWERPC_EXCP_FPU);
2556 return;
2557 }
2558 crb = 31 - crbD(ctx->opcode);
2559 gen_reset_fpstatus();
2560 /* XXX: we pretend we can only do IEEE floating-point computations */
2561 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
2562 TCGv_i32 t0;
2563 /* NIP cannot be restored if the memory exception comes from an helper */
2564 gen_update_nip(ctx, ctx->nip - 4);
2565 t0 = tcg_const_i32(crb);
2566 gen_helper_fpscr_setbit(cpu_env, t0);
2567 tcg_temp_free_i32(t0);
2568 }
2569 if (unlikely(Rc(ctx->opcode) != 0)) {
2570 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2571 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2572 }
2573 /* We can raise a differed exception */
2574 gen_helper_float_check_status(cpu_env);
2575 }
2576
2577 /* mtfsf */
2578 static void gen_mtfsf(DisasContext *ctx)
2579 {
2580 TCGv_i32 t0;
2581 int flm, l, w;
2582
2583 if (unlikely(!ctx->fpu_enabled)) {
2584 gen_exception(ctx, POWERPC_EXCP_FPU);
2585 return;
2586 }
2587 flm = FPFLM(ctx->opcode);
2588 l = FPL(ctx->opcode);
2589 w = FPW(ctx->opcode);
2590 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
2591 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2592 return;
2593 }
2594 /* NIP cannot be restored if the memory exception comes from an helper */
2595 gen_update_nip(ctx, ctx->nip - 4);
2596 gen_reset_fpstatus();
2597 if (l) {
2598 t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff);
2599 } else {
2600 t0 = tcg_const_i32(flm << (w * 8));
2601 }
2602 gen_helper_store_fpscr(cpu_env, cpu_fpr[rB(ctx->opcode)], t0);
2603 tcg_temp_free_i32(t0);
2604 if (unlikely(Rc(ctx->opcode) != 0)) {
2605 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2606 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2607 }
2608 /* We can raise a differed exception */
2609 gen_helper_float_check_status(cpu_env);
2610 }
2611
2612 /* mtfsfi */
2613 static void gen_mtfsfi(DisasContext *ctx)
2614 {
2615 int bf, sh, w;
2616 TCGv_i64 t0;
2617 TCGv_i32 t1;
2618
2619 if (unlikely(!ctx->fpu_enabled)) {
2620 gen_exception(ctx, POWERPC_EXCP_FPU);
2621 return;
2622 }
2623 w = FPW(ctx->opcode);
2624 bf = FPBF(ctx->opcode);
2625 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
2626 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2627 return;
2628 }
2629 sh = (8 * w) + 7 - bf;
2630 /* NIP cannot be restored if the memory exception comes from an helper */
2631 gen_update_nip(ctx, ctx->nip - 4);
2632 gen_reset_fpstatus();
2633 t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
2634 t1 = tcg_const_i32(1 << sh);
2635 gen_helper_store_fpscr(cpu_env, t0, t1);
2636 tcg_temp_free_i64(t0);
2637 tcg_temp_free_i32(t1);
2638 if (unlikely(Rc(ctx->opcode) != 0)) {
2639 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2640 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2641 }
2642 /* We can raise a differed exception */
2643 gen_helper_float_check_status(cpu_env);
2644 }
2645
2646 /*** Addressing modes ***/
2647 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2648 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
2649 target_long maskl)
2650 {
2651 target_long simm = SIMM(ctx->opcode);
2652
2653 simm &= ~maskl;
2654 if (rA(ctx->opcode) == 0) {
2655 if (NARROW_MODE(ctx)) {
2656 simm = (uint32_t)simm;
2657 }
2658 tcg_gen_movi_tl(EA, simm);
2659 } else if (likely(simm != 0)) {
2660 tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
2661 if (NARROW_MODE(ctx)) {
2662 tcg_gen_ext32u_tl(EA, EA);
2663 }
2664 } else {
2665 if (NARROW_MODE(ctx)) {
2666 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2667 } else {
2668 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2669 }
2670 }
2671 }
2672
2673 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
2674 {
2675 if (rA(ctx->opcode) == 0) {
2676 if (NARROW_MODE(ctx)) {
2677 tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2678 } else {
2679 tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2680 }
2681 } else {
2682 tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2683 if (NARROW_MODE(ctx)) {
2684 tcg_gen_ext32u_tl(EA, EA);
2685 }
2686 }
2687 }
2688
2689 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
2690 {
2691 if (rA(ctx->opcode) == 0) {
2692 tcg_gen_movi_tl(EA, 0);
2693 } else if (NARROW_MODE(ctx)) {
2694 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2695 } else {
2696 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2697 }
2698 }
2699
2700 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
2701 target_long val)
2702 {
2703 tcg_gen_addi_tl(ret, arg1, val);
2704 if (NARROW_MODE(ctx)) {
2705 tcg_gen_ext32u_tl(ret, ret);
2706 }
2707 }
2708
2709 static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask)
2710 {
2711 TCGLabel *l1 = gen_new_label();
2712 TCGv t0 = tcg_temp_new();
2713 TCGv_i32 t1, t2;
2714 /* NIP cannot be restored if the memory exception comes from an helper */
2715 gen_update_nip(ctx, ctx->nip - 4);
2716 tcg_gen_andi_tl(t0, EA, mask);
2717 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
2718 t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
2719 t2 = tcg_const_i32(0);
2720 gen_helper_raise_exception_err(cpu_env, t1, t2);
2721 tcg_temp_free_i32(t1);
2722 tcg_temp_free_i32(t2);
2723 gen_set_label(l1);
2724 tcg_temp_free(t0);
2725 }
2726
2727 /*** Integer load ***/
2728 static inline void gen_qemu_ld8u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2729 {
2730 tcg_gen_qemu_ld8u(arg1, arg2, ctx->mem_idx);
2731 }
2732
2733 static inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2734 {
2735 TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
2736 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2737 }
2738
2739 static inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2740 {
2741 TCGMemOp op = MO_SW | ctx->default_tcg_memop_mask;
2742 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2743 }
2744
2745 static inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2746 {
2747 TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
2748 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2749 }
2750
2751 static void gen_qemu_ld32u_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
2752 {
2753 TCGv tmp = tcg_temp_new();
2754 gen_qemu_ld32u(ctx, tmp, addr);
2755 tcg_gen_extu_tl_i64(val, tmp);
2756 tcg_temp_free(tmp);
2757 }
2758
2759 static inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2760 {
2761 TCGMemOp op = MO_SL | ctx->default_tcg_memop_mask;
2762 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2763 }
2764
2765 static void gen_qemu_ld32s_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
2766 {
2767 TCGv tmp = tcg_temp_new();
2768 gen_qemu_ld32s(ctx, tmp, addr);
2769 tcg_gen_ext_tl_i64(val, tmp);
2770 tcg_temp_free(tmp);
2771 }
2772
2773 static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
2774 {
2775 TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
2776 tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
2777 }
2778
2779 static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
2780 {
2781 tcg_gen_qemu_st8(arg1, arg2, ctx->mem_idx);
2782 }
2783
2784 static inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2)
2785 {
2786 TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
2787 tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
2788 }
2789
2790 static inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2)
2791 {
2792 TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
2793 tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
2794 }
2795
2796 static void gen_qemu_st32_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
2797 {
2798 TCGv tmp = tcg_temp_new();
2799 tcg_gen_trunc_i64_tl(tmp, val);
2800 gen_qemu_st32(ctx, tmp, addr);
2801 tcg_temp_free(tmp);
2802 }
2803
2804 static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
2805 {
2806 TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
2807 tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
2808 }
2809
2810 #define GEN_LD(name, ldop, opc, type) \
2811 static void glue(gen_, name)(DisasContext *ctx) \
2812 { \
2813 TCGv EA; \
2814 gen_set_access_type(ctx, ACCESS_INT); \
2815 EA = tcg_temp_new(); \
2816 gen_addr_imm_index(ctx, EA, 0); \
2817 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2818 tcg_temp_free(EA); \
2819 }
2820
2821 #define GEN_LDU(name, ldop, opc, type) \
2822 static void glue(gen_, name##u)(DisasContext *ctx) \
2823 { \
2824 TCGv EA; \
2825 if (unlikely(rA(ctx->opcode) == 0 || \
2826 rA(ctx->opcode) == rD(ctx->opcode))) { \
2827 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2828 return; \
2829 } \
2830 gen_set_access_type(ctx, ACCESS_INT); \
2831 EA = tcg_temp_new(); \
2832 if (type == PPC_64B) \
2833 gen_addr_imm_index(ctx, EA, 0x03); \
2834 else \
2835 gen_addr_imm_index(ctx, EA, 0); \
2836 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2837 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2838 tcg_temp_free(EA); \
2839 }
2840
2841 #define GEN_LDUX(name, ldop, opc2, opc3, type) \
2842 static void glue(gen_, name##ux)(DisasContext *ctx) \
2843 { \
2844 TCGv EA; \
2845 if (unlikely(rA(ctx->opcode) == 0 || \
2846 rA(ctx->opcode) == rD(ctx->opcode))) { \
2847 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2848 return; \
2849 } \
2850 gen_set_access_type(ctx, ACCESS_INT); \
2851 EA = tcg_temp_new(); \
2852 gen_addr_reg_index(ctx, EA); \
2853 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2854 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2855 tcg_temp_free(EA); \
2856 }
2857
2858 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2) \
2859 static void glue(gen_, name##x)(DisasContext *ctx) \
2860 { \
2861 TCGv EA; \
2862 gen_set_access_type(ctx, ACCESS_INT); \
2863 EA = tcg_temp_new(); \
2864 gen_addr_reg_index(ctx, EA); \
2865 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2866 tcg_temp_free(EA); \
2867 }
2868 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2869 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE)
2870
2871 #define GEN_LDS(name, ldop, op, type) \
2872 GEN_LD(name, ldop, op | 0x20, type); \
2873 GEN_LDU(name, ldop, op | 0x21, type); \
2874 GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
2875 GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2876
2877 /* lbz lbzu lbzux lbzx */
2878 GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER);
2879 /* lha lhau lhaux lhax */
2880 GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER);
2881 /* lhz lhzu lhzux lhzx */
2882 GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER);
2883 /* lwz lwzu lwzux lwzx */
2884 GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER);
2885 #if defined(TARGET_PPC64)
2886 /* lwaux */
2887 GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
2888 /* lwax */
2889 GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
2890 /* ldux */
2891 GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B);
2892 /* ldx */
2893 GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B);
2894
2895 static void gen_ld(DisasContext *ctx)
2896 {
2897 TCGv EA;
2898 if (Rc(ctx->opcode)) {
2899 if (unlikely(rA(ctx->opcode) == 0 ||
2900 rA(ctx->opcode) == rD(ctx->opcode))) {
2901 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2902 return;
2903 }
2904 }
2905 gen_set_access_type(ctx, ACCESS_INT);
2906 EA = tcg_temp_new();
2907 gen_addr_imm_index(ctx, EA, 0x03);
2908 if (ctx->opcode & 0x02) {
2909 /* lwa (lwau is undefined) */
2910 gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2911 } else {
2912 /* ld - ldu */
2913 gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2914 }
2915 if (Rc(ctx->opcode))
2916 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2917 tcg_temp_free(EA);
2918 }
2919
2920 /* lq */
2921 static void gen_lq(DisasContext *ctx)
2922 {
2923 int ra, rd;
2924 TCGv EA;
2925
2926 /* lq is a legal user mode instruction starting in ISA 2.07 */
2927 bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2928 bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2929
2930 if (!legal_in_user_mode && ctx->pr) {
2931 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2932 return;
2933 }
2934
2935 if (!le_is_supported && ctx->le_mode) {
2936 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
2937 return;
2938 }
2939
2940 ra = rA(ctx->opcode);
2941 rd = rD(ctx->opcode);
2942 if (unlikely((rd & 1) || rd == ra)) {
2943 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2944 return;
2945 }
2946
2947 gen_set_access_type(ctx, ACCESS_INT);
2948 EA = tcg_temp_new();
2949 gen_addr_imm_index(ctx, EA, 0x0F);
2950
2951 /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
2952 64-bit byteswap already. */
2953 if (unlikely(ctx->le_mode)) {
2954 gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
2955 gen_addr_add(ctx, EA, EA, 8);
2956 gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
2957 } else {
2958 gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
2959 gen_addr_add(ctx, EA, EA, 8);
2960 gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
2961 }
2962 tcg_temp_free(EA);
2963 }
2964 #endif
2965
2966 /*** Integer store ***/
2967 #define GEN_ST(name, stop, opc, type) \
2968 static void glue(gen_, name)(DisasContext *ctx) \
2969 { \
2970 TCGv EA; \
2971 gen_set_access_type(ctx, ACCESS_INT); \
2972 EA = tcg_temp_new(); \
2973 gen_addr_imm_index(ctx, EA, 0); \
2974 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2975 tcg_temp_free(EA); \
2976 }
2977
2978 #define GEN_STU(name, stop, opc, type) \
2979 static void glue(gen_, stop##u)(DisasContext *ctx) \
2980 { \
2981 TCGv EA; \
2982 if (unlikely(rA(ctx->opcode) == 0)) { \
2983 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2984 return; \
2985 } \
2986 gen_set_access_type(ctx, ACCESS_INT); \
2987 EA = tcg_temp_new(); \
2988 if (type == PPC_64B) \
2989 gen_addr_imm_index(ctx, EA, 0x03); \
2990 else \
2991 gen_addr_imm_index(ctx, EA, 0); \
2992 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2993 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2994 tcg_temp_free(EA); \
2995 }
2996
2997 #define GEN_STUX(name, stop, opc2, opc3, type) \
2998 static void glue(gen_, name##ux)(DisasContext *ctx) \
2999 { \
3000 TCGv EA; \
3001 if (unlikely(rA(ctx->opcode) == 0)) { \
3002 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3003 return; \
3004 } \
3005 gen_set_access_type(ctx, ACCESS_INT); \
3006 EA = tcg_temp_new(); \
3007 gen_addr_reg_index(ctx, EA); \
3008 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3009 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3010 tcg_temp_free(EA); \
3011 }
3012
3013 #define GEN_STX_E(name, stop, opc2, opc3, type, type2) \
3014 static void glue(gen_, name##x)(DisasContext *ctx) \
3015 { \
3016 TCGv EA; \
3017 gen_set_access_type(ctx, ACCESS_INT); \
3018 EA = tcg_temp_new(); \
3019 gen_addr_reg_index(ctx, EA); \
3020 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3021 tcg_temp_free(EA); \
3022 }
3023 #define GEN_STX(name, stop, opc2, opc3, type) \
3024 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE)
3025
3026 #define GEN_STS(name, stop, op, type) \
3027 GEN_ST(name, stop, op | 0x20, type); \
3028 GEN_STU(name, stop, op | 0x21, type); \
3029 GEN_STUX(name, stop, 0x17, op | 0x01, type); \
3030 GEN_STX(name, stop, 0x17, op | 0x00, type)
3031
3032 /* stb stbu stbux stbx */
3033 GEN_STS(stb, st8, 0x06, PPC_INTEGER);
3034 /* sth sthu sthux sthx */
3035 GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
3036 /* stw stwu stwux stwx */
3037 GEN_STS(stw, st32, 0x04, PPC_INTEGER);
3038 #if defined(TARGET_PPC64)
3039 GEN_STUX(std, st64, 0x15, 0x05, PPC_64B);
3040 GEN_STX(std, st64, 0x15, 0x04, PPC_64B);
3041
3042 static void gen_std(DisasContext *ctx)
3043 {
3044 int rs;
3045 TCGv EA;
3046
3047 rs = rS(ctx->opcode);
3048 if ((ctx->opcode & 0x3) == 0x2) { /* stq */
3049
3050 bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
3051 bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
3052
3053 if (!legal_in_user_mode && ctx->pr) {
3054 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
3055 return;
3056 }
3057
3058 if (!le_is_supported && ctx->le_mode) {
3059 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
3060 return;
3061 }
3062
3063 if (unlikely(rs & 1)) {
3064 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3065 return;
3066 }
3067 gen_set_access_type(ctx, ACCESS_INT);
3068 EA = tcg_temp_new();
3069 gen_addr_imm_index(ctx, EA, 0x03);
3070
3071 /* We only need to swap high and low halves. gen_qemu_st64 does
3072 necessary 64-bit byteswap already. */
3073 if (unlikely(ctx->le_mode)) {
3074 gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
3075 gen_addr_add(ctx, EA, EA, 8);
3076 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
3077 } else {
3078 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
3079 gen_addr_add(ctx, EA, EA, 8);
3080 gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
3081 }
3082 tcg_temp_free(EA);
3083 } else {
3084 /* std / stdu*/
3085 if (Rc(ctx->opcode)) {
3086 if (unlikely(rA(ctx->opcode) == 0)) {
3087 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3088 return;
3089 }
3090 }
3091 gen_set_access_type(ctx, ACCESS_INT);
3092 EA = tcg_temp_new();
3093 gen_addr_imm_index(ctx, EA, 0x03);
3094 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
3095 if (Rc(ctx->opcode))
3096 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
3097 tcg_temp_free(EA);
3098 }
3099 }
3100 #endif
3101 /*** Integer load and store with byte reverse ***/
3102
3103 /* lhbrx */
3104 static inline void gen_qemu_ld16ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
3105 {
3106 TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
3107 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
3108 }
3109 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
3110
3111 /* lwbrx */
3112 static inline void gen_qemu_ld32ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
3113 {
3114 TCGMemOp op = MO_UL | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
3115 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
3116 }
3117 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
3118
3119 #if defined(TARGET_PPC64)
3120 /* ldbrx */
3121 static inline void gen_qemu_ld64ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
3122 {
3123 TCGMemOp op = MO_Q | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
3124 tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
3125 }
3126 GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX);
3127 #endif /* TARGET_PPC64 */
3128
3129 /* sthbrx */
3130 static inline void gen_qemu_st16r(DisasContext *ctx, TCGv arg1, TCGv arg2)
3131 {
3132 TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
3133 tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
3134 }
3135 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
3136
3137 /* stwbrx */
3138 static inline void gen_qemu_st32r(DisasContext *ctx, TCGv arg1, TCGv arg2)