migration: convert unix socket protocol to use QIOChannel
[qemu.git] / target-ppc / translate.c
1 /*
2 * PowerPC emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/host-utils.h"
27 #include "exec/cpu_ldst.h"
28
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34
35
36 #define CPU_SINGLE_STEP 0x1
37 #define CPU_BRANCH_STEP 0x2
38 #define GDBSTUB_SINGLE_STEP 0x4
39
40 /* Include definitions for instructions classes and implementations flags */
41 //#define PPC_DEBUG_DISAS
42 //#define DO_PPC_STATISTICS
43
44 #ifdef PPC_DEBUG_DISAS
45 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
46 #else
47 # define LOG_DISAS(...) do { } while (0)
48 #endif
49 /*****************************************************************************/
50 /* Code translation helpers */
51
52 /* global register indexes */
53 static TCGv_env cpu_env;
54 static char cpu_reg_names[10*3 + 22*4 /* GPR */
55 + 10*4 + 22*5 /* SPE GPRh */
56 + 10*4 + 22*5 /* FPR */
57 + 2*(10*6 + 22*7) /* AVRh, AVRl */
58 + 10*5 + 22*6 /* VSR */
59 + 8*5 /* CRF */];
60 static TCGv cpu_gpr[32];
61 static TCGv cpu_gprh[32];
62 static TCGv_i64 cpu_fpr[32];
63 static TCGv_i64 cpu_avrh[32], cpu_avrl[32];
64 static TCGv_i64 cpu_vsr[32];
65 static TCGv_i32 cpu_crf[8];
66 static TCGv cpu_nip;
67 static TCGv cpu_msr;
68 static TCGv cpu_ctr;
69 static TCGv cpu_lr;
70 #if defined(TARGET_PPC64)
71 static TCGv cpu_cfar;
72 #endif
73 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca;
74 static TCGv cpu_reserve;
75 static TCGv cpu_fpscr;
76 static TCGv_i32 cpu_access_type;
77
78 #include "exec/gen-icount.h"
79
80 void ppc_translate_init(void)
81 {
82 int i;
83 char* p;
84 size_t cpu_reg_names_size;
85 static int done_init = 0;
86
87 if (done_init)
88 return;
89
90 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
91
92 p = cpu_reg_names;
93 cpu_reg_names_size = sizeof(cpu_reg_names);
94
95 for (i = 0; i < 8; i++) {
96 snprintf(p, cpu_reg_names_size, "crf%d", i);
97 cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
98 offsetof(CPUPPCState, crf[i]), p);
99 p += 5;
100 cpu_reg_names_size -= 5;
101 }
102
103 for (i = 0; i < 32; i++) {
104 snprintf(p, cpu_reg_names_size, "r%d", i);
105 cpu_gpr[i] = tcg_global_mem_new(cpu_env,
106 offsetof(CPUPPCState, gpr[i]), p);
107 p += (i < 10) ? 3 : 4;
108 cpu_reg_names_size -= (i < 10) ? 3 : 4;
109 snprintf(p, cpu_reg_names_size, "r%dH", i);
110 cpu_gprh[i] = tcg_global_mem_new(cpu_env,
111 offsetof(CPUPPCState, gprh[i]), p);
112 p += (i < 10) ? 4 : 5;
113 cpu_reg_names_size -= (i < 10) ? 4 : 5;
114
115 snprintf(p, cpu_reg_names_size, "fp%d", i);
116 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
117 offsetof(CPUPPCState, fpr[i]), p);
118 p += (i < 10) ? 4 : 5;
119 cpu_reg_names_size -= (i < 10) ? 4 : 5;
120
121 snprintf(p, cpu_reg_names_size, "avr%dH", i);
122 #ifdef HOST_WORDS_BIGENDIAN
123 cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
124 offsetof(CPUPPCState, avr[i].u64[0]), p);
125 #else
126 cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
127 offsetof(CPUPPCState, avr[i].u64[1]), p);
128 #endif
129 p += (i < 10) ? 6 : 7;
130 cpu_reg_names_size -= (i < 10) ? 6 : 7;
131
132 snprintf(p, cpu_reg_names_size, "avr%dL", i);
133 #ifdef HOST_WORDS_BIGENDIAN
134 cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
135 offsetof(CPUPPCState, avr[i].u64[1]), p);
136 #else
137 cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
138 offsetof(CPUPPCState, avr[i].u64[0]), p);
139 #endif
140 p += (i < 10) ? 6 : 7;
141 cpu_reg_names_size -= (i < 10) ? 6 : 7;
142 snprintf(p, cpu_reg_names_size, "vsr%d", i);
143 cpu_vsr[i] = tcg_global_mem_new_i64(cpu_env,
144 offsetof(CPUPPCState, vsr[i]), p);
145 p += (i < 10) ? 5 : 6;
146 cpu_reg_names_size -= (i < 10) ? 5 : 6;
147 }
148
149 cpu_nip = tcg_global_mem_new(cpu_env,
150 offsetof(CPUPPCState, nip), "nip");
151
152 cpu_msr = tcg_global_mem_new(cpu_env,
153 offsetof(CPUPPCState, msr), "msr");
154
155 cpu_ctr = tcg_global_mem_new(cpu_env,
156 offsetof(CPUPPCState, ctr), "ctr");
157
158 cpu_lr = tcg_global_mem_new(cpu_env,
159 offsetof(CPUPPCState, lr), "lr");
160
161 #if defined(TARGET_PPC64)
162 cpu_cfar = tcg_global_mem_new(cpu_env,
163 offsetof(CPUPPCState, cfar), "cfar");
164 #endif
165
166 cpu_xer = tcg_global_mem_new(cpu_env,
167 offsetof(CPUPPCState, xer), "xer");
168 cpu_so = tcg_global_mem_new(cpu_env,
169 offsetof(CPUPPCState, so), "SO");
170 cpu_ov = tcg_global_mem_new(cpu_env,
171 offsetof(CPUPPCState, ov), "OV");
172 cpu_ca = tcg_global_mem_new(cpu_env,
173 offsetof(CPUPPCState, ca), "CA");
174
175 cpu_reserve = tcg_global_mem_new(cpu_env,
176 offsetof(CPUPPCState, reserve_addr),
177 "reserve_addr");
178
179 cpu_fpscr = tcg_global_mem_new(cpu_env,
180 offsetof(CPUPPCState, fpscr), "fpscr");
181
182 cpu_access_type = tcg_global_mem_new_i32(cpu_env,
183 offsetof(CPUPPCState, access_type), "access_type");
184
185 done_init = 1;
186 }
187
188 /* internal defines */
189 struct DisasContext {
190 struct TranslationBlock *tb;
191 target_ulong nip;
192 uint32_t opcode;
193 uint32_t exception;
194 /* Routine used to access memory */
195 bool pr, hv;
196 int mem_idx;
197 int access_type;
198 /* Translation flags */
199 int le_mode;
200 TCGMemOp default_tcg_memop_mask;
201 #if defined(TARGET_PPC64)
202 int sf_mode;
203 int has_cfar;
204 #endif
205 int fpu_enabled;
206 int altivec_enabled;
207 int vsx_enabled;
208 int spe_enabled;
209 int tm_enabled;
210 ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
211 int singlestep_enabled;
212 uint64_t insns_flags;
213 uint64_t insns_flags2;
214 };
215
216 /* Return true iff byteswap is needed in a scalar memop */
217 static inline bool need_byteswap(const DisasContext *ctx)
218 {
219 #if defined(TARGET_WORDS_BIGENDIAN)
220 return ctx->le_mode;
221 #else
222 return !ctx->le_mode;
223 #endif
224 }
225
226 /* True when active word size < size of target_long. */
227 #ifdef TARGET_PPC64
228 # define NARROW_MODE(C) (!(C)->sf_mode)
229 #else
230 # define NARROW_MODE(C) 0
231 #endif
232
233 struct opc_handler_t {
234 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
235 uint32_t inval1;
236 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
237 uint32_t inval2;
238 /* instruction type */
239 uint64_t type;
240 /* extended instruction type */
241 uint64_t type2;
242 /* handler */
243 void (*handler)(DisasContext *ctx);
244 #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
245 const char *oname;
246 #endif
247 #if defined(DO_PPC_STATISTICS)
248 uint64_t count;
249 #endif
250 };
251
252 static inline void gen_reset_fpstatus(void)
253 {
254 gen_helper_reset_fpstatus(cpu_env);
255 }
256
257 static inline void gen_compute_fprf(TCGv_i64 arg)
258 {
259 gen_helper_compute_fprf(cpu_env, arg);
260 gen_helper_float_check_status(cpu_env);
261 }
262
263 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
264 {
265 if (ctx->access_type != access_type) {
266 tcg_gen_movi_i32(cpu_access_type, access_type);
267 ctx->access_type = access_type;
268 }
269 }
270
271 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
272 {
273 if (NARROW_MODE(ctx)) {
274 nip = (uint32_t)nip;
275 }
276 tcg_gen_movi_tl(cpu_nip, nip);
277 }
278
279 void gen_update_current_nip(void *opaque)
280 {
281 DisasContext *ctx = opaque;
282
283 tcg_gen_movi_tl(cpu_nip, ctx->nip);
284 }
285
286 static inline void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
287 {
288 TCGv_i32 t0, t1;
289 if (ctx->exception == POWERPC_EXCP_NONE) {
290 gen_update_nip(ctx, ctx->nip);
291 }
292 t0 = tcg_const_i32(excp);
293 t1 = tcg_const_i32(error);
294 gen_helper_raise_exception_err(cpu_env, t0, t1);
295 tcg_temp_free_i32(t0);
296 tcg_temp_free_i32(t1);
297 ctx->exception = (excp);
298 }
299
300 static inline void gen_exception(DisasContext *ctx, uint32_t excp)
301 {
302 TCGv_i32 t0;
303 if (ctx->exception == POWERPC_EXCP_NONE) {
304 gen_update_nip(ctx, ctx->nip);
305 }
306 t0 = tcg_const_i32(excp);
307 gen_helper_raise_exception(cpu_env, t0);
308 tcg_temp_free_i32(t0);
309 ctx->exception = (excp);
310 }
311
312 static inline void gen_debug_exception(DisasContext *ctx)
313 {
314 TCGv_i32 t0;
315
316 if ((ctx->exception != POWERPC_EXCP_BRANCH) &&
317 (ctx->exception != POWERPC_EXCP_SYNC)) {
318 gen_update_nip(ctx, ctx->nip);
319 }
320 t0 = tcg_const_i32(EXCP_DEBUG);
321 gen_helper_raise_exception(cpu_env, t0);
322 tcg_temp_free_i32(t0);
323 }
324
325 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
326 {
327 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | error);
328 }
329
330 /* Stop translation */
331 static inline void gen_stop_exception(DisasContext *ctx)
332 {
333 gen_update_nip(ctx, ctx->nip);
334 ctx->exception = POWERPC_EXCP_STOP;
335 }
336
337 #ifndef CONFIG_USER_ONLY
338 /* No need to update nip here, as execution flow will change */
339 static inline void gen_sync_exception(DisasContext *ctx)
340 {
341 ctx->exception = POWERPC_EXCP_SYNC;
342 }
343 #endif
344
345 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
346 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
347
348 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
349 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
350
351 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
352 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
353
354 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
355 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
356
357 typedef struct opcode_t {
358 unsigned char opc1, opc2, opc3;
359 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
360 unsigned char pad[5];
361 #else
362 unsigned char pad[1];
363 #endif
364 opc_handler_t handler;
365 const char *oname;
366 } opcode_t;
367
368 /*****************************************************************************/
369 /*** Instruction decoding ***/
370 #define EXTRACT_HELPER(name, shift, nb) \
371 static inline uint32_t name(uint32_t opcode) \
372 { \
373 return (opcode >> (shift)) & ((1 << (nb)) - 1); \
374 }
375
376 #define EXTRACT_SHELPER(name, shift, nb) \
377 static inline int32_t name(uint32_t opcode) \
378 { \
379 return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
380 }
381
382 #define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \
383 static inline uint32_t name(uint32_t opcode) \
384 { \
385 return (((opcode >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
386 ((opcode >> (shift2)) & ((1 << (nb2)) - 1)); \
387 }
388 /* Opcode part 1 */
389 EXTRACT_HELPER(opc1, 26, 6);
390 /* Opcode part 2 */
391 EXTRACT_HELPER(opc2, 1, 5);
392 /* Opcode part 3 */
393 EXTRACT_HELPER(opc3, 6, 5);
394 /* Update Cr0 flags */
395 EXTRACT_HELPER(Rc, 0, 1);
396 /* Update Cr6 flags (Altivec) */
397 EXTRACT_HELPER(Rc21, 10, 1);
398 /* Destination */
399 EXTRACT_HELPER(rD, 21, 5);
400 /* Source */
401 EXTRACT_HELPER(rS, 21, 5);
402 /* First operand */
403 EXTRACT_HELPER(rA, 16, 5);
404 /* Second operand */
405 EXTRACT_HELPER(rB, 11, 5);
406 /* Third operand */
407 EXTRACT_HELPER(rC, 6, 5);
408 /*** Get CRn ***/
409 EXTRACT_HELPER(crfD, 23, 3);
410 EXTRACT_HELPER(crfS, 18, 3);
411 EXTRACT_HELPER(crbD, 21, 5);
412 EXTRACT_HELPER(crbA, 16, 5);
413 EXTRACT_HELPER(crbB, 11, 5);
414 /* SPR / TBL */
415 EXTRACT_HELPER(_SPR, 11, 10);
416 static inline uint32_t SPR(uint32_t opcode)
417 {
418 uint32_t sprn = _SPR(opcode);
419
420 return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
421 }
422 /*** Get constants ***/
423 /* 16 bits signed immediate value */
424 EXTRACT_SHELPER(SIMM, 0, 16);
425 /* 16 bits unsigned immediate value */
426 EXTRACT_HELPER(UIMM, 0, 16);
427 /* 5 bits signed immediate value */
428 EXTRACT_HELPER(SIMM5, 16, 5);
429 /* 5 bits signed immediate value */
430 EXTRACT_HELPER(UIMM5, 16, 5);
431 /* Bit count */
432 EXTRACT_HELPER(NB, 11, 5);
433 /* Shift count */
434 EXTRACT_HELPER(SH, 11, 5);
435 /* Vector shift count */
436 EXTRACT_HELPER(VSH, 6, 4);
437 /* Mask start */
438 EXTRACT_HELPER(MB, 6, 5);
439 /* Mask end */
440 EXTRACT_HELPER(ME, 1, 5);
441 /* Trap operand */
442 EXTRACT_HELPER(TO, 21, 5);
443
444 EXTRACT_HELPER(CRM, 12, 8);
445
446 #ifndef CONFIG_USER_ONLY
447 EXTRACT_HELPER(SR, 16, 4);
448 #endif
449
450 /* mtfsf/mtfsfi */
451 EXTRACT_HELPER(FPBF, 23, 3);
452 EXTRACT_HELPER(FPIMM, 12, 4);
453 EXTRACT_HELPER(FPL, 25, 1);
454 EXTRACT_HELPER(FPFLM, 17, 8);
455 EXTRACT_HELPER(FPW, 16, 1);
456
457 /*** Jump target decoding ***/
458 /* Immediate address */
459 static inline target_ulong LI(uint32_t opcode)
460 {
461 return (opcode >> 0) & 0x03FFFFFC;
462 }
463
464 static inline uint32_t BD(uint32_t opcode)
465 {
466 return (opcode >> 0) & 0xFFFC;
467 }
468
469 EXTRACT_HELPER(BO, 21, 5);
470 EXTRACT_HELPER(BI, 16, 5);
471 /* Absolute/relative address */
472 EXTRACT_HELPER(AA, 1, 1);
473 /* Link */
474 EXTRACT_HELPER(LK, 0, 1);
475
476 /* DFP Z22-form */
477 EXTRACT_HELPER(DCM, 10, 6)
478
479 /* DFP Z23-form */
480 EXTRACT_HELPER(RMC, 9, 2)
481
482 /* Create a mask between <start> and <end> bits */
483 static inline target_ulong MASK(uint32_t start, uint32_t end)
484 {
485 target_ulong ret;
486
487 #if defined(TARGET_PPC64)
488 if (likely(start == 0)) {
489 ret = UINT64_MAX << (63 - end);
490 } else if (likely(end == 63)) {
491 ret = UINT64_MAX >> start;
492 }
493 #else
494 if (likely(start == 0)) {
495 ret = UINT32_MAX << (31 - end);
496 } else if (likely(end == 31)) {
497 ret = UINT32_MAX >> start;
498 }
499 #endif
500 else {
501 ret = (((target_ulong)(-1ULL)) >> (start)) ^
502 (((target_ulong)(-1ULL) >> (end)) >> 1);
503 if (unlikely(start > end))
504 return ~ret;
505 }
506
507 return ret;
508 }
509
510 EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5);
511 EXTRACT_HELPER_SPLIT(xS, 0, 1, 21, 5);
512 EXTRACT_HELPER_SPLIT(xA, 2, 1, 16, 5);
513 EXTRACT_HELPER_SPLIT(xB, 1, 1, 11, 5);
514 EXTRACT_HELPER_SPLIT(xC, 3, 1, 6, 5);
515 EXTRACT_HELPER(DM, 8, 2);
516 EXTRACT_HELPER(UIM, 16, 2);
517 EXTRACT_HELPER(SHW, 8, 2);
518 EXTRACT_HELPER(SP, 19, 2);
519 /*****************************************************************************/
520 /* PowerPC instructions table */
521
522 #if defined(DO_PPC_STATISTICS)
523 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
524 { \
525 .opc1 = op1, \
526 .opc2 = op2, \
527 .opc3 = op3, \
528 .pad = { 0, }, \
529 .handler = { \
530 .inval1 = invl, \
531 .type = _typ, \
532 .type2 = _typ2, \
533 .handler = &gen_##name, \
534 .oname = stringify(name), \
535 }, \
536 .oname = stringify(name), \
537 }
538 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
539 { \
540 .opc1 = op1, \
541 .opc2 = op2, \
542 .opc3 = op3, \
543 .pad = { 0, }, \
544 .handler = { \
545 .inval1 = invl1, \
546 .inval2 = invl2, \
547 .type = _typ, \
548 .type2 = _typ2, \
549 .handler = &gen_##name, \
550 .oname = stringify(name), \
551 }, \
552 .oname = stringify(name), \
553 }
554 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
555 { \
556 .opc1 = op1, \
557 .opc2 = op2, \
558 .opc3 = op3, \
559 .pad = { 0, }, \
560 .handler = { \
561 .inval1 = invl, \
562 .type = _typ, \
563 .type2 = _typ2, \
564 .handler = &gen_##name, \
565 .oname = onam, \
566 }, \
567 .oname = onam, \
568 }
569 #else
570 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
571 { \
572 .opc1 = op1, \
573 .opc2 = op2, \
574 .opc3 = op3, \
575 .pad = { 0, }, \
576 .handler = { \
577 .inval1 = invl, \
578 .type = _typ, \
579 .type2 = _typ2, \
580 .handler = &gen_##name, \
581 }, \
582 .oname = stringify(name), \
583 }
584 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
585 { \
586 .opc1 = op1, \
587 .opc2 = op2, \
588 .opc3 = op3, \
589 .pad = { 0, }, \
590 .handler = { \
591 .inval1 = invl1, \
592 .inval2 = invl2, \
593 .type = _typ, \
594 .type2 = _typ2, \
595 .handler = &gen_##name, \
596 }, \
597 .oname = stringify(name), \
598 }
599 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
600 { \
601 .opc1 = op1, \
602 .opc2 = op2, \
603 .opc3 = op3, \
604 .pad = { 0, }, \
605 .handler = { \
606 .inval1 = invl, \
607 .type = _typ, \
608 .type2 = _typ2, \
609 .handler = &gen_##name, \
610 }, \
611 .oname = onam, \
612 }
613 #endif
614
615 /* SPR load/store helpers */
616 static inline void gen_load_spr(TCGv t, int reg)
617 {
618 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
619 }
620
621 static inline void gen_store_spr(int reg, TCGv t)
622 {
623 tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
624 }
625
626 /* Invalid instruction */
627 static void gen_invalid(DisasContext *ctx)
628 {
629 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
630 }
631
632 static opc_handler_t invalid_handler = {
633 .inval1 = 0xFFFFFFFF,
634 .inval2 = 0xFFFFFFFF,
635 .type = PPC_NONE,
636 .type2 = PPC_NONE,
637 .handler = gen_invalid,
638 };
639
640 /*** Integer comparison ***/
641
642 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
643 {
644 TCGv t0 = tcg_temp_new();
645 TCGv_i32 t1 = tcg_temp_new_i32();
646
647 tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
648
649 tcg_gen_setcond_tl((s ? TCG_COND_LT: TCG_COND_LTU), t0, arg0, arg1);
650 tcg_gen_trunc_tl_i32(t1, t0);
651 tcg_gen_shli_i32(t1, t1, CRF_LT);
652 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
653
654 tcg_gen_setcond_tl((s ? TCG_COND_GT: TCG_COND_GTU), t0, arg0, arg1);
655 tcg_gen_trunc_tl_i32(t1, t0);
656 tcg_gen_shli_i32(t1, t1, CRF_GT);
657 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
658
659 tcg_gen_setcond_tl(TCG_COND_EQ, t0, arg0, arg1);
660 tcg_gen_trunc_tl_i32(t1, t0);
661 tcg_gen_shli_i32(t1, t1, CRF_EQ);
662 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
663
664 tcg_temp_free(t0);
665 tcg_temp_free_i32(t1);
666 }
667
668 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
669 {
670 TCGv t0 = tcg_const_tl(arg1);
671 gen_op_cmp(arg0, t0, s, crf);
672 tcg_temp_free(t0);
673 }
674
675 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
676 {
677 TCGv t0, t1;
678 t0 = tcg_temp_new();
679 t1 = tcg_temp_new();
680 if (s) {
681 tcg_gen_ext32s_tl(t0, arg0);
682 tcg_gen_ext32s_tl(t1, arg1);
683 } else {
684 tcg_gen_ext32u_tl(t0, arg0);
685 tcg_gen_ext32u_tl(t1, arg1);
686 }
687 gen_op_cmp(t0, t1, s, crf);
688 tcg_temp_free(t1);
689 tcg_temp_free(t0);
690 }
691
692 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
693 {
694 TCGv t0 = tcg_const_tl(arg1);
695 gen_op_cmp32(arg0, t0, s, crf);
696 tcg_temp_free(t0);
697 }
698
699 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
700 {
701 if (NARROW_MODE(ctx)) {
702 gen_op_cmpi32(reg, 0, 1, 0);
703 } else {
704 gen_op_cmpi(reg, 0, 1, 0);
705 }
706 }
707
708 /* cmp */
709 static void gen_cmp(DisasContext *ctx)
710 {
711 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
712 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
713 1, crfD(ctx->opcode));
714 } else {
715 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
716 1, crfD(ctx->opcode));
717 }
718 }
719
720 /* cmpi */
721 static void gen_cmpi(DisasContext *ctx)
722 {
723 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
724 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
725 1, crfD(ctx->opcode));
726 } else {
727 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
728 1, crfD(ctx->opcode));
729 }
730 }
731
732 /* cmpl */
733 static void gen_cmpl(DisasContext *ctx)
734 {
735 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
736 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
737 0, crfD(ctx->opcode));
738 } else {
739 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
740 0, crfD(ctx->opcode));
741 }
742 }
743
744 /* cmpli */
745 static void gen_cmpli(DisasContext *ctx)
746 {
747 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
748 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
749 0, crfD(ctx->opcode));
750 } else {
751 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
752 0, crfD(ctx->opcode));
753 }
754 }
755
756 /* isel (PowerPC 2.03 specification) */
757 static void gen_isel(DisasContext *ctx)
758 {
759 TCGLabel *l1, *l2;
760 uint32_t bi = rC(ctx->opcode);
761 uint32_t mask;
762 TCGv_i32 t0;
763
764 l1 = gen_new_label();
765 l2 = gen_new_label();
766
767 mask = 0x08 >> (bi & 0x03);
768 t0 = tcg_temp_new_i32();
769 tcg_gen_andi_i32(t0, cpu_crf[bi >> 2], mask);
770 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
771 if (rA(ctx->opcode) == 0)
772 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
773 else
774 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
775 tcg_gen_br(l2);
776 gen_set_label(l1);
777 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
778 gen_set_label(l2);
779 tcg_temp_free_i32(t0);
780 }
781
782 /* cmpb: PowerPC 2.05 specification */
783 static void gen_cmpb(DisasContext *ctx)
784 {
785 gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
786 cpu_gpr[rB(ctx->opcode)]);
787 }
788
789 /*** Integer arithmetic ***/
790
791 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
792 TCGv arg1, TCGv arg2, int sub)
793 {
794 TCGv t0 = tcg_temp_new();
795
796 tcg_gen_xor_tl(cpu_ov, arg0, arg2);
797 tcg_gen_xor_tl(t0, arg1, arg2);
798 if (sub) {
799 tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
800 } else {
801 tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
802 }
803 tcg_temp_free(t0);
804 if (NARROW_MODE(ctx)) {
805 tcg_gen_ext32s_tl(cpu_ov, cpu_ov);
806 }
807 tcg_gen_shri_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1);
808 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
809 }
810
811 /* Common add function */
812 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
813 TCGv arg2, bool add_ca, bool compute_ca,
814 bool compute_ov, bool compute_rc0)
815 {
816 TCGv t0 = ret;
817
818 if (compute_ca || compute_ov) {
819 t0 = tcg_temp_new();
820 }
821
822 if (compute_ca) {
823 if (NARROW_MODE(ctx)) {
824 /* Caution: a non-obvious corner case of the spec is that we
825 must produce the *entire* 64-bit addition, but produce the
826 carry into bit 32. */
827 TCGv t1 = tcg_temp_new();
828 tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */
829 tcg_gen_add_tl(t0, arg1, arg2);
830 if (add_ca) {
831 tcg_gen_add_tl(t0, t0, cpu_ca);
832 }
833 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changed w/ carry */
834 tcg_temp_free(t1);
835 tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
836 tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
837 } else {
838 TCGv zero = tcg_const_tl(0);
839 if (add_ca) {
840 tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, cpu_ca, zero);
841 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, arg2, zero);
842 } else {
843 tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, arg2, zero);
844 }
845 tcg_temp_free(zero);
846 }
847 } else {
848 tcg_gen_add_tl(t0, arg1, arg2);
849 if (add_ca) {
850 tcg_gen_add_tl(t0, t0, cpu_ca);
851 }
852 }
853
854 if (compute_ov) {
855 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
856 }
857 if (unlikely(compute_rc0)) {
858 gen_set_Rc0(ctx, t0);
859 }
860
861 if (!TCGV_EQUAL(t0, ret)) {
862 tcg_gen_mov_tl(ret, t0);
863 tcg_temp_free(t0);
864 }
865 }
866 /* Add functions with two operands */
867 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
868 static void glue(gen_, name)(DisasContext *ctx) \
869 { \
870 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
871 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
872 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
873 }
874 /* Add functions with one operand and one immediate */
875 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
876 add_ca, compute_ca, compute_ov) \
877 static void glue(gen_, name)(DisasContext *ctx) \
878 { \
879 TCGv t0 = tcg_const_tl(const_val); \
880 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
881 cpu_gpr[rA(ctx->opcode)], t0, \
882 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
883 tcg_temp_free(t0); \
884 }
885
886 /* add add. addo addo. */
887 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
888 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
889 /* addc addc. addco addco. */
890 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
891 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
892 /* adde adde. addeo addeo. */
893 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
894 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
895 /* addme addme. addmeo addmeo. */
896 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
897 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
898 /* addze addze. addzeo addzeo.*/
899 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
900 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
901 /* addi */
902 static void gen_addi(DisasContext *ctx)
903 {
904 target_long simm = SIMM(ctx->opcode);
905
906 if (rA(ctx->opcode) == 0) {
907 /* li case */
908 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm);
909 } else {
910 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
911 cpu_gpr[rA(ctx->opcode)], simm);
912 }
913 }
914 /* addic addic.*/
915 static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
916 {
917 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
918 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
919 c, 0, 1, 0, compute_rc0);
920 tcg_temp_free(c);
921 }
922
923 static void gen_addic(DisasContext *ctx)
924 {
925 gen_op_addic(ctx, 0);
926 }
927
928 static void gen_addic_(DisasContext *ctx)
929 {
930 gen_op_addic(ctx, 1);
931 }
932
933 /* addis */
934 static void gen_addis(DisasContext *ctx)
935 {
936 target_long simm = SIMM(ctx->opcode);
937
938 if (rA(ctx->opcode) == 0) {
939 /* lis case */
940 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm << 16);
941 } else {
942 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
943 cpu_gpr[rA(ctx->opcode)], simm << 16);
944 }
945 }
946
947 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
948 TCGv arg2, int sign, int compute_ov)
949 {
950 TCGLabel *l1 = gen_new_label();
951 TCGLabel *l2 = gen_new_label();
952 TCGv_i32 t0 = tcg_temp_local_new_i32();
953 TCGv_i32 t1 = tcg_temp_local_new_i32();
954
955 tcg_gen_trunc_tl_i32(t0, arg1);
956 tcg_gen_trunc_tl_i32(t1, arg2);
957 tcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1);
958 if (sign) {
959 TCGLabel *l3 = gen_new_label();
960 tcg_gen_brcondi_i32(TCG_COND_NE, t1, -1, l3);
961 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, INT32_MIN, l1);
962 gen_set_label(l3);
963 tcg_gen_div_i32(t0, t0, t1);
964 } else {
965 tcg_gen_divu_i32(t0, t0, t1);
966 }
967 if (compute_ov) {
968 tcg_gen_movi_tl(cpu_ov, 0);
969 }
970 tcg_gen_br(l2);
971 gen_set_label(l1);
972 if (sign) {
973 tcg_gen_sari_i32(t0, t0, 31);
974 } else {
975 tcg_gen_movi_i32(t0, 0);
976 }
977 if (compute_ov) {
978 tcg_gen_movi_tl(cpu_ov, 1);
979 tcg_gen_movi_tl(cpu_so, 1);
980 }
981 gen_set_label(l2);
982 tcg_gen_extu_i32_tl(ret, t0);
983 tcg_temp_free_i32(t0);
984 tcg_temp_free_i32(t1);
985 if (unlikely(Rc(ctx->opcode) != 0))
986 gen_set_Rc0(ctx, ret);
987 }
988 /* Div functions */
989 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
990 static void glue(gen_, name)(DisasContext *ctx) \
991 { \
992 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
993 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
994 sign, compute_ov); \
995 }
996 /* divwu divwu. divwuo divwuo. */
997 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
998 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
999 /* divw divw. divwo divwo. */
1000 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
1001 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
1002
1003 /* div[wd]eu[o][.] */
1004 #define GEN_DIVE(name, hlpr, compute_ov) \
1005 static void gen_##name(DisasContext *ctx) \
1006 { \
1007 TCGv_i32 t0 = tcg_const_i32(compute_ov); \
1008 gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
1009 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1010 tcg_temp_free_i32(t0); \
1011 if (unlikely(Rc(ctx->opcode) != 0)) { \
1012 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
1013 } \
1014 }
1015
1016 GEN_DIVE(divweu, divweu, 0);
1017 GEN_DIVE(divweuo, divweu, 1);
1018 GEN_DIVE(divwe, divwe, 0);
1019 GEN_DIVE(divweo, divwe, 1);
1020
1021 #if defined(TARGET_PPC64)
1022 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
1023 TCGv arg2, int sign, int compute_ov)
1024 {
1025 TCGLabel *l1 = gen_new_label();
1026 TCGLabel *l2 = gen_new_label();
1027
1028 tcg_gen_brcondi_i64(TCG_COND_EQ, arg2, 0, l1);
1029 if (sign) {
1030 TCGLabel *l3 = gen_new_label();
1031 tcg_gen_brcondi_i64(TCG_COND_NE, arg2, -1, l3);
1032 tcg_gen_brcondi_i64(TCG_COND_EQ, arg1, INT64_MIN, l1);
1033 gen_set_label(l3);
1034 tcg_gen_div_i64(ret, arg1, arg2);
1035 } else {
1036 tcg_gen_divu_i64(ret, arg1, arg2);
1037 }
1038 if (compute_ov) {
1039 tcg_gen_movi_tl(cpu_ov, 0);
1040 }
1041 tcg_gen_br(l2);
1042 gen_set_label(l1);
1043 if (sign) {
1044 tcg_gen_sari_i64(ret, arg1, 63);
1045 } else {
1046 tcg_gen_movi_i64(ret, 0);
1047 }
1048 if (compute_ov) {
1049 tcg_gen_movi_tl(cpu_ov, 1);
1050 tcg_gen_movi_tl(cpu_so, 1);
1051 }
1052 gen_set_label(l2);
1053 if (unlikely(Rc(ctx->opcode) != 0))
1054 gen_set_Rc0(ctx, ret);
1055 }
1056 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1057 static void glue(gen_, name)(DisasContext *ctx) \
1058 { \
1059 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1060 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1061 sign, compute_ov); \
1062 }
1063 /* divwu divwu. divwuo divwuo. */
1064 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1065 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1066 /* divw divw. divwo divwo. */
1067 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1068 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1069
1070 GEN_DIVE(divdeu, divdeu, 0);
1071 GEN_DIVE(divdeuo, divdeu, 1);
1072 GEN_DIVE(divde, divde, 0);
1073 GEN_DIVE(divdeo, divde, 1);
1074 #endif
1075
1076 /* mulhw mulhw. */
1077 static void gen_mulhw(DisasContext *ctx)
1078 {
1079 TCGv_i32 t0 = tcg_temp_new_i32();
1080 TCGv_i32 t1 = tcg_temp_new_i32();
1081
1082 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1083 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1084 tcg_gen_muls2_i32(t0, t1, t0, t1);
1085 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1086 tcg_temp_free_i32(t0);
1087 tcg_temp_free_i32(t1);
1088 if (unlikely(Rc(ctx->opcode) != 0))
1089 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1090 }
1091
1092 /* mulhwu mulhwu. */
1093 static void gen_mulhwu(DisasContext *ctx)
1094 {
1095 TCGv_i32 t0 = tcg_temp_new_i32();
1096 TCGv_i32 t1 = tcg_temp_new_i32();
1097
1098 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1099 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1100 tcg_gen_mulu2_i32(t0, t1, t0, t1);
1101 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1102 tcg_temp_free_i32(t0);
1103 tcg_temp_free_i32(t1);
1104 if (unlikely(Rc(ctx->opcode) != 0))
1105 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1106 }
1107
1108 /* mullw mullw. */
1109 static void gen_mullw(DisasContext *ctx)
1110 {
1111 #if defined(TARGET_PPC64)
1112 TCGv_i64 t0, t1;
1113 t0 = tcg_temp_new_i64();
1114 t1 = tcg_temp_new_i64();
1115 tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
1116 tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
1117 tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1118 tcg_temp_free(t0);
1119 tcg_temp_free(t1);
1120 #else
1121 tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1122 cpu_gpr[rB(ctx->opcode)]);
1123 #endif
1124 if (unlikely(Rc(ctx->opcode) != 0))
1125 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1126 }
1127
1128 /* mullwo mullwo. */
1129 static void gen_mullwo(DisasContext *ctx)
1130 {
1131 TCGv_i32 t0 = tcg_temp_new_i32();
1132 TCGv_i32 t1 = tcg_temp_new_i32();
1133
1134 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1135 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1136 tcg_gen_muls2_i32(t0, t1, t0, t1);
1137 #if defined(TARGET_PPC64)
1138 tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1139 #else
1140 tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
1141 #endif
1142
1143 tcg_gen_sari_i32(t0, t0, 31);
1144 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
1145 tcg_gen_extu_i32_tl(cpu_ov, t0);
1146 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1147
1148 tcg_temp_free_i32(t0);
1149 tcg_temp_free_i32(t1);
1150 if (unlikely(Rc(ctx->opcode) != 0))
1151 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1152 }
1153
1154 /* mulli */
1155 static void gen_mulli(DisasContext *ctx)
1156 {
1157 tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1158 SIMM(ctx->opcode));
1159 }
1160
1161 #if defined(TARGET_PPC64)
1162 /* mulhd mulhd. */
1163 static void gen_mulhd(DisasContext *ctx)
1164 {
1165 TCGv lo = tcg_temp_new();
1166 tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1167 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1168 tcg_temp_free(lo);
1169 if (unlikely(Rc(ctx->opcode) != 0)) {
1170 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1171 }
1172 }
1173
1174 /* mulhdu mulhdu. */
1175 static void gen_mulhdu(DisasContext *ctx)
1176 {
1177 TCGv lo = tcg_temp_new();
1178 tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1179 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1180 tcg_temp_free(lo);
1181 if (unlikely(Rc(ctx->opcode) != 0)) {
1182 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1183 }
1184 }
1185
1186 /* mulld mulld. */
1187 static void gen_mulld(DisasContext *ctx)
1188 {
1189 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1190 cpu_gpr[rB(ctx->opcode)]);
1191 if (unlikely(Rc(ctx->opcode) != 0))
1192 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1193 }
1194
1195 /* mulldo mulldo. */
1196 static void gen_mulldo(DisasContext *ctx)
1197 {
1198 TCGv_i64 t0 = tcg_temp_new_i64();
1199 TCGv_i64 t1 = tcg_temp_new_i64();
1200
1201 tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
1202 cpu_gpr[rB(ctx->opcode)]);
1203 tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
1204
1205 tcg_gen_sari_i64(t0, t0, 63);
1206 tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
1207 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1208
1209 tcg_temp_free_i64(t0);
1210 tcg_temp_free_i64(t1);
1211
1212 if (unlikely(Rc(ctx->opcode) != 0)) {
1213 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1214 }
1215 }
1216 #endif
1217
1218 /* Common subf function */
1219 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
1220 TCGv arg2, bool add_ca, bool compute_ca,
1221 bool compute_ov, bool compute_rc0)
1222 {
1223 TCGv t0 = ret;
1224
1225 if (compute_ca || compute_ov) {
1226 t0 = tcg_temp_new();
1227 }
1228
1229 if (compute_ca) {
1230 /* dest = ~arg1 + arg2 [+ ca]. */
1231 if (NARROW_MODE(ctx)) {
1232 /* Caution: a non-obvious corner case of the spec is that we
1233 must produce the *entire* 64-bit addition, but produce the
1234 carry into bit 32. */
1235 TCGv inv1 = tcg_temp_new();
1236 TCGv t1 = tcg_temp_new();
1237 tcg_gen_not_tl(inv1, arg1);
1238 if (add_ca) {
1239 tcg_gen_add_tl(t0, arg2, cpu_ca);
1240 } else {
1241 tcg_gen_addi_tl(t0, arg2, 1);
1242 }
1243 tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */
1244 tcg_gen_add_tl(t0, t0, inv1);
1245 tcg_temp_free(inv1);
1246 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */
1247 tcg_temp_free(t1);
1248 tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
1249 tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
1250 } else if (add_ca) {
1251 TCGv zero, inv1 = tcg_temp_new();
1252 tcg_gen_not_tl(inv1, arg1);
1253 zero = tcg_const_tl(0);
1254 tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
1255 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
1256 tcg_temp_free(zero);
1257 tcg_temp_free(inv1);
1258 } else {
1259 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
1260 tcg_gen_sub_tl(t0, arg2, arg1);
1261 }
1262 } else if (add_ca) {
1263 /* Since we're ignoring carry-out, we can simplify the
1264 standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */
1265 tcg_gen_sub_tl(t0, arg2, arg1);
1266 tcg_gen_add_tl(t0, t0, cpu_ca);
1267 tcg_gen_subi_tl(t0, t0, 1);
1268 } else {
1269 tcg_gen_sub_tl(t0, arg2, arg1);
1270 }
1271
1272 if (compute_ov) {
1273 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
1274 }
1275 if (unlikely(compute_rc0)) {
1276 gen_set_Rc0(ctx, t0);
1277 }
1278
1279 if (!TCGV_EQUAL(t0, ret)) {
1280 tcg_gen_mov_tl(ret, t0);
1281 tcg_temp_free(t0);
1282 }
1283 }
1284 /* Sub functions with Two operands functions */
1285 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
1286 static void glue(gen_, name)(DisasContext *ctx) \
1287 { \
1288 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1289 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1290 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1291 }
1292 /* Sub functions with one operand and one immediate */
1293 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
1294 add_ca, compute_ca, compute_ov) \
1295 static void glue(gen_, name)(DisasContext *ctx) \
1296 { \
1297 TCGv t0 = tcg_const_tl(const_val); \
1298 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1299 cpu_gpr[rA(ctx->opcode)], t0, \
1300 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1301 tcg_temp_free(t0); \
1302 }
1303 /* subf subf. subfo subfo. */
1304 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
1305 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
1306 /* subfc subfc. subfco subfco. */
1307 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
1308 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
1309 /* subfe subfe. subfeo subfo. */
1310 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
1311 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
1312 /* subfme subfme. subfmeo subfmeo. */
1313 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
1314 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
1315 /* subfze subfze. subfzeo subfzeo.*/
1316 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
1317 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
1318
1319 /* subfic */
1320 static void gen_subfic(DisasContext *ctx)
1321 {
1322 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
1323 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1324 c, 0, 1, 0, 0);
1325 tcg_temp_free(c);
1326 }
1327
1328 /* neg neg. nego nego. */
1329 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
1330 {
1331 TCGv zero = tcg_const_tl(0);
1332 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1333 zero, 0, 0, compute_ov, Rc(ctx->opcode));
1334 tcg_temp_free(zero);
1335 }
1336
1337 static void gen_neg(DisasContext *ctx)
1338 {
1339 gen_op_arith_neg(ctx, 0);
1340 }
1341
1342 static void gen_nego(DisasContext *ctx)
1343 {
1344 gen_op_arith_neg(ctx, 1);
1345 }
1346
1347 /*** Integer logical ***/
1348 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
1349 static void glue(gen_, name)(DisasContext *ctx) \
1350 { \
1351 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
1352 cpu_gpr[rB(ctx->opcode)]); \
1353 if (unlikely(Rc(ctx->opcode) != 0)) \
1354 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1355 }
1356
1357 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
1358 static void glue(gen_, name)(DisasContext *ctx) \
1359 { \
1360 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
1361 if (unlikely(Rc(ctx->opcode) != 0)) \
1362 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1363 }
1364
1365 /* and & and. */
1366 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
1367 /* andc & andc. */
1368 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
1369
1370 /* andi. */
1371 static void gen_andi_(DisasContext *ctx)
1372 {
1373 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode));
1374 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1375 }
1376
1377 /* andis. */
1378 static void gen_andis_(DisasContext *ctx)
1379 {
1380 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16);
1381 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1382 }
1383
1384 /* cntlzw */
1385 static void gen_cntlzw(DisasContext *ctx)
1386 {
1387 gen_helper_cntlzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1388 if (unlikely(Rc(ctx->opcode) != 0))
1389 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1390 }
1391 /* eqv & eqv. */
1392 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
1393 /* extsb & extsb. */
1394 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
1395 /* extsh & extsh. */
1396 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
1397 /* nand & nand. */
1398 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
1399 /* nor & nor. */
1400 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
1401
1402 /* or & or. */
1403 static void gen_or(DisasContext *ctx)
1404 {
1405 int rs, ra, rb;
1406
1407 rs = rS(ctx->opcode);
1408 ra = rA(ctx->opcode);
1409 rb = rB(ctx->opcode);
1410 /* Optimisation for mr. ri case */
1411 if (rs != ra || rs != rb) {
1412 if (rs != rb)
1413 tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
1414 else
1415 tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
1416 if (unlikely(Rc(ctx->opcode) != 0))
1417 gen_set_Rc0(ctx, cpu_gpr[ra]);
1418 } else if (unlikely(Rc(ctx->opcode) != 0)) {
1419 gen_set_Rc0(ctx, cpu_gpr[rs]);
1420 #if defined(TARGET_PPC64)
1421 } else {
1422 int prio = 0;
1423
1424 switch (rs) {
1425 case 1:
1426 /* Set process priority to low */
1427 prio = 2;
1428 break;
1429 case 6:
1430 /* Set process priority to medium-low */
1431 prio = 3;
1432 break;
1433 case 2:
1434 /* Set process priority to normal */
1435 prio = 4;
1436 break;
1437 #if !defined(CONFIG_USER_ONLY)
1438 case 31:
1439 if (!ctx->pr) {
1440 /* Set process priority to very low */
1441 prio = 1;
1442 }
1443 break;
1444 case 5:
1445 if (!ctx->pr) {
1446 /* Set process priority to medium-hight */
1447 prio = 5;
1448 }
1449 break;
1450 case 3:
1451 if (!ctx->pr) {
1452 /* Set process priority to high */
1453 prio = 6;
1454 }
1455 break;
1456 case 7:
1457 if (ctx->hv) {
1458 /* Set process priority to very high */
1459 prio = 7;
1460 }
1461 break;
1462 #endif
1463 default:
1464 /* nop */
1465 break;
1466 }
1467 if (prio) {
1468 TCGv t0 = tcg_temp_new();
1469 gen_load_spr(t0, SPR_PPR);
1470 tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
1471 tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
1472 gen_store_spr(SPR_PPR, t0);
1473 tcg_temp_free(t0);
1474 }
1475 #endif
1476 }
1477 }
1478 /* orc & orc. */
1479 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
1480
1481 /* xor & xor. */
1482 static void gen_xor(DisasContext *ctx)
1483 {
1484 /* Optimisation for "set to zero" case */
1485 if (rS(ctx->opcode) != rB(ctx->opcode))
1486 tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1487 else
1488 tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
1489 if (unlikely(Rc(ctx->opcode) != 0))
1490 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1491 }
1492
1493 /* ori */
1494 static void gen_ori(DisasContext *ctx)
1495 {
1496 target_ulong uimm = UIMM(ctx->opcode);
1497
1498 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1499 /* NOP */
1500 /* XXX: should handle special NOPs for POWER series */
1501 return;
1502 }
1503 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1504 }
1505
1506 /* oris */
1507 static void gen_oris(DisasContext *ctx)
1508 {
1509 target_ulong uimm = UIMM(ctx->opcode);
1510
1511 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1512 /* NOP */
1513 return;
1514 }
1515 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1516 }
1517
1518 /* xori */
1519 static void gen_xori(DisasContext *ctx)
1520 {
1521 target_ulong uimm = UIMM(ctx->opcode);
1522
1523 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1524 /* NOP */
1525 return;
1526 }
1527 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1528 }
1529
1530 /* xoris */
1531 static void gen_xoris(DisasContext *ctx)
1532 {
1533 target_ulong uimm = UIMM(ctx->opcode);
1534
1535 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1536 /* NOP */
1537 return;
1538 }
1539 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1540 }
1541
1542 /* popcntb : PowerPC 2.03 specification */
1543 static void gen_popcntb(DisasContext *ctx)
1544 {
1545 gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1546 }
1547
1548 static void gen_popcntw(DisasContext *ctx)
1549 {
1550 gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1551 }
1552
1553 #if defined(TARGET_PPC64)
1554 /* popcntd: PowerPC 2.06 specification */
1555 static void gen_popcntd(DisasContext *ctx)
1556 {
1557 gen_helper_popcntd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1558 }
1559 #endif
1560
1561 /* prtyw: PowerPC 2.05 specification */
1562 static void gen_prtyw(DisasContext *ctx)
1563 {
1564 TCGv ra = cpu_gpr[rA(ctx->opcode)];
1565 TCGv rs = cpu_gpr[rS(ctx->opcode)];
1566 TCGv t0 = tcg_temp_new();
1567 tcg_gen_shri_tl(t0, rs, 16);
1568 tcg_gen_xor_tl(ra, rs, t0);
1569 tcg_gen_shri_tl(t0, ra, 8);
1570 tcg_gen_xor_tl(ra, ra, t0);
1571 tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
1572 tcg_temp_free(t0);
1573 }
1574
1575 #if defined(TARGET_PPC64)
1576 /* prtyd: PowerPC 2.05 specification */
1577 static void gen_prtyd(DisasContext *ctx)
1578 {
1579 TCGv ra = cpu_gpr[rA(ctx->opcode)];
1580 TCGv rs = cpu_gpr[rS(ctx->opcode)];
1581 TCGv t0 = tcg_temp_new();
1582 tcg_gen_shri_tl(t0, rs, 32);
1583 tcg_gen_xor_tl(ra, rs, t0);
1584 tcg_gen_shri_tl(t0, ra, 16);
1585 tcg_gen_xor_tl(ra, ra, t0);
1586 tcg_gen_shri_tl(t0, ra, 8);
1587 tcg_gen_xor_tl(ra, ra, t0);
1588 tcg_gen_andi_tl(ra, ra, 1);
1589 tcg_temp_free(t0);
1590 }
1591 #endif
1592
1593 #if defined(TARGET_PPC64)
1594 /* bpermd */
1595 static void gen_bpermd(DisasContext *ctx)
1596 {
1597 gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
1598 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1599 }
1600 #endif
1601
1602 #if defined(TARGET_PPC64)
1603 /* extsw & extsw. */
1604 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
1605
1606 /* cntlzd */
1607 static void gen_cntlzd(DisasContext *ctx)
1608 {
1609 gen_helper_cntlzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1610 if (unlikely(Rc(ctx->opcode) != 0))
1611 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1612 }
1613 #endif
1614
1615 /*** Integer rotate ***/
1616
1617 /* rlwimi & rlwimi. */
1618 static void gen_rlwimi(DisasContext *ctx)
1619 {
1620 uint32_t mb, me, sh;
1621
1622 mb = MB(ctx->opcode);
1623 me = ME(ctx->opcode);
1624 sh = SH(ctx->opcode);
1625 if (likely(sh == (31-me) && mb <= me)) {
1626 tcg_gen_deposit_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1627 cpu_gpr[rS(ctx->opcode)], sh, me - mb + 1);
1628 } else {
1629 target_ulong mask;
1630 TCGv t1;
1631 TCGv t0 = tcg_temp_new();
1632 #if defined(TARGET_PPC64)
1633 tcg_gen_deposit_i64(t0, cpu_gpr[rS(ctx->opcode)],
1634 cpu_gpr[rS(ctx->opcode)], 32, 32);
1635 tcg_gen_rotli_i64(t0, t0, sh);
1636 #else
1637 tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
1638 #endif
1639 #if defined(TARGET_PPC64)
1640 mb += 32;
1641 me += 32;
1642 #endif
1643 mask = MASK(mb, me);
1644 t1 = tcg_temp_new();
1645 tcg_gen_andi_tl(t0, t0, mask);
1646 tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
1647 tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1648 tcg_temp_free(t0);
1649 tcg_temp_free(t1);
1650 }
1651 if (unlikely(Rc(ctx->opcode) != 0))
1652 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1653 }
1654
1655 /* rlwinm & rlwinm. */
1656 static void gen_rlwinm(DisasContext *ctx)
1657 {
1658 uint32_t mb, me, sh;
1659
1660 sh = SH(ctx->opcode);
1661 mb = MB(ctx->opcode);
1662 me = ME(ctx->opcode);
1663
1664 if (likely(mb == 0 && me == (31 - sh))) {
1665 if (likely(sh == 0)) {
1666 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1667 } else {
1668 TCGv t0 = tcg_temp_new();
1669 tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
1670 tcg_gen_shli_tl(t0, t0, sh);
1671 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
1672 tcg_temp_free(t0);
1673 }
1674 } else if (likely(sh != 0 && me == 31 && sh == (32 - mb))) {
1675 TCGv t0 = tcg_temp_new();
1676 tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
1677 tcg_gen_shri_tl(t0, t0, mb);
1678 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
1679 tcg_temp_free(t0);
1680 } else if (likely(mb == 0 && me == 31)) {
1681 TCGv_i32 t0 = tcg_temp_new_i32();
1682 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rS(ctx->opcode)]);
1683 tcg_gen_rotli_i32(t0, t0, sh);
1684 tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t0);
1685 tcg_temp_free_i32(t0);
1686 } else {
1687 TCGv t0 = tcg_temp_new();
1688 #if defined(TARGET_PPC64)
1689 tcg_gen_deposit_i64(t0, cpu_gpr[rS(ctx->opcode)],
1690 cpu_gpr[rS(ctx->opcode)], 32, 32);
1691 tcg_gen_rotli_i64(t0, t0, sh);
1692 #else
1693 tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
1694 #endif
1695 #if defined(TARGET_PPC64)
1696 mb += 32;
1697 me += 32;
1698 #endif
1699 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1700 tcg_temp_free(t0);
1701 }
1702 if (unlikely(Rc(ctx->opcode) != 0))
1703 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1704 }
1705
1706 /* rlwnm & rlwnm. */
1707 static void gen_rlwnm(DisasContext *ctx)
1708 {
1709 uint32_t mb, me;
1710 mb = MB(ctx->opcode);
1711 me = ME(ctx->opcode);
1712
1713 if (likely(mb == 0 && me == 31)) {
1714 TCGv_i32 t0, t1;
1715 t0 = tcg_temp_new_i32();
1716 t1 = tcg_temp_new_i32();
1717 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]);
1718 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rS(ctx->opcode)]);
1719 tcg_gen_andi_i32(t0, t0, 0x1f);
1720 tcg_gen_rotl_i32(t1, t1, t0);
1721 tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t1);
1722 tcg_temp_free_i32(t0);
1723 tcg_temp_free_i32(t1);
1724 } else {
1725 TCGv t0;
1726 #if defined(TARGET_PPC64)
1727 TCGv t1;
1728 #endif
1729
1730 t0 = tcg_temp_new();
1731 tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1f);
1732 #if defined(TARGET_PPC64)
1733 t1 = tcg_temp_new_i64();
1734 tcg_gen_deposit_i64(t1, cpu_gpr[rS(ctx->opcode)],
1735 cpu_gpr[rS(ctx->opcode)], 32, 32);
1736 tcg_gen_rotl_i64(t0, t1, t0);
1737 tcg_temp_free_i64(t1);
1738 #else
1739 tcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0);
1740 #endif
1741 if (unlikely(mb != 0 || me != 31)) {
1742 #if defined(TARGET_PPC64)
1743 mb += 32;
1744 me += 32;
1745 #endif
1746 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1747 } else {
1748 tcg_gen_andi_tl(t0, t0, MASK(32, 63));
1749 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
1750 }
1751 tcg_temp_free(t0);
1752 }
1753 if (unlikely(Rc(ctx->opcode) != 0))
1754 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1755 }
1756
1757 #if defined(TARGET_PPC64)
1758 #define GEN_PPC64_R2(name, opc1, opc2) \
1759 static void glue(gen_, name##0)(DisasContext *ctx) \
1760 { \
1761 gen_##name(ctx, 0); \
1762 } \
1763 \
1764 static void glue(gen_, name##1)(DisasContext *ctx) \
1765 { \
1766 gen_##name(ctx, 1); \
1767 }
1768 #define GEN_PPC64_R4(name, opc1, opc2) \
1769 static void glue(gen_, name##0)(DisasContext *ctx) \
1770 { \
1771 gen_##name(ctx, 0, 0); \
1772 } \
1773 \
1774 static void glue(gen_, name##1)(DisasContext *ctx) \
1775 { \
1776 gen_##name(ctx, 0, 1); \
1777 } \
1778 \
1779 static void glue(gen_, name##2)(DisasContext *ctx) \
1780 { \
1781 gen_##name(ctx, 1, 0); \
1782 } \
1783 \
1784 static void glue(gen_, name##3)(DisasContext *ctx) \
1785 { \
1786 gen_##name(ctx, 1, 1); \
1787 }
1788
1789 static inline void gen_rldinm(DisasContext *ctx, uint32_t mb, uint32_t me,
1790 uint32_t sh)
1791 {
1792 if (likely(sh != 0 && mb == 0 && me == (63 - sh))) {
1793 tcg_gen_shli_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
1794 } else if (likely(sh != 0 && me == 63 && sh == (64 - mb))) {
1795 tcg_gen_shri_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], mb);
1796 } else {
1797 TCGv t0 = tcg_temp_new();
1798 tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
1799 if (likely(mb == 0 && me == 63)) {
1800 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
1801 } else {
1802 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1803 }
1804 tcg_temp_free(t0);
1805 }
1806 if (unlikely(Rc(ctx->opcode) != 0))
1807 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1808 }
1809 /* rldicl - rldicl. */
1810 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
1811 {
1812 uint32_t sh, mb;
1813
1814 sh = SH(ctx->opcode) | (shn << 5);
1815 mb = MB(ctx->opcode) | (mbn << 5);
1816 gen_rldinm(ctx, mb, 63, sh);
1817 }
1818 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
1819 /* rldicr - rldicr. */
1820 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
1821 {
1822 uint32_t sh, me;
1823
1824 sh = SH(ctx->opcode) | (shn << 5);
1825 me = MB(ctx->opcode) | (men << 5);
1826 gen_rldinm(ctx, 0, me, sh);
1827 }
1828 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
1829 /* rldic - rldic. */
1830 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
1831 {
1832 uint32_t sh, mb;
1833
1834 sh = SH(ctx->opcode) | (shn << 5);
1835 mb = MB(ctx->opcode) | (mbn << 5);
1836 gen_rldinm(ctx, mb, 63 - sh, sh);
1837 }
1838 GEN_PPC64_R4(rldic, 0x1E, 0x04);
1839
1840 static inline void gen_rldnm(DisasContext *ctx, uint32_t mb, uint32_t me)
1841 {
1842 TCGv t0;
1843
1844 t0 = tcg_temp_new();
1845 tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3f);
1846 tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1847 if (unlikely(mb != 0 || me != 63)) {
1848 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1849 } else {
1850 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
1851 }
1852 tcg_temp_free(t0);
1853 if (unlikely(Rc(ctx->opcode) != 0))
1854 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1855 }
1856
1857 /* rldcl - rldcl. */
1858 static inline void gen_rldcl(DisasContext *ctx, int mbn)
1859 {
1860 uint32_t mb;
1861
1862 mb = MB(ctx->opcode) | (mbn << 5);
1863 gen_rldnm(ctx, mb, 63);
1864 }
1865 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
1866 /* rldcr - rldcr. */
1867 static inline void gen_rldcr(DisasContext *ctx, int men)
1868 {
1869 uint32_t me;
1870
1871 me = MB(ctx->opcode) | (men << 5);
1872 gen_rldnm(ctx, 0, me);
1873 }
1874 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
1875 /* rldimi - rldimi. */
1876 static inline void gen_rldimi(DisasContext *ctx, int mbn, int shn)
1877 {
1878 uint32_t sh, mb, me;
1879
1880 sh = SH(ctx->opcode) | (shn << 5);
1881 mb = MB(ctx->opcode) | (mbn << 5);
1882 me = 63 - sh;
1883 if (unlikely(sh == 0 && mb == 0)) {
1884 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1885 } else {
1886 TCGv t0, t1;
1887 target_ulong mask;
1888
1889 t0 = tcg_temp_new();
1890 tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
1891 t1 = tcg_temp_new();
1892 mask = MASK(mb, me);
1893 tcg_gen_andi_tl(t0, t0, mask);
1894 tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
1895 tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1896 tcg_temp_free(t0);
1897 tcg_temp_free(t1);
1898 }
1899 if (unlikely(Rc(ctx->opcode) != 0))
1900 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1901 }
1902 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
1903 #endif
1904
1905 /*** Integer shift ***/
1906
1907 /* slw & slw. */
1908 static void gen_slw(DisasContext *ctx)
1909 {
1910 TCGv t0, t1;
1911
1912 t0 = tcg_temp_new();
1913 /* AND rS with a mask that is 0 when rB >= 0x20 */
1914 #if defined(TARGET_PPC64)
1915 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
1916 tcg_gen_sari_tl(t0, t0, 0x3f);
1917 #else
1918 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
1919 tcg_gen_sari_tl(t0, t0, 0x1f);
1920 #endif
1921 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1922 t1 = tcg_temp_new();
1923 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
1924 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1925 tcg_temp_free(t1);
1926 tcg_temp_free(t0);
1927 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
1928 if (unlikely(Rc(ctx->opcode) != 0))
1929 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1930 }
1931
1932 /* sraw & sraw. */
1933 static void gen_sraw(DisasContext *ctx)
1934 {
1935 gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
1936 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1937 if (unlikely(Rc(ctx->opcode) != 0))
1938 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1939 }
1940
1941 /* srawi & srawi. */
1942 static void gen_srawi(DisasContext *ctx)
1943 {
1944 int sh = SH(ctx->opcode);
1945 TCGv dst = cpu_gpr[rA(ctx->opcode)];
1946 TCGv src = cpu_gpr[rS(ctx->opcode)];
1947 if (sh == 0) {
1948 tcg_gen_ext32s_tl(dst, src);
1949 tcg_gen_movi_tl(cpu_ca, 0);
1950 } else {
1951 TCGv t0;
1952 tcg_gen_ext32s_tl(dst, src);
1953 tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
1954 t0 = tcg_temp_new();
1955 tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
1956 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
1957 tcg_temp_free(t0);
1958 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
1959 tcg_gen_sari_tl(dst, dst, sh);
1960 }
1961 if (unlikely(Rc(ctx->opcode) != 0)) {
1962 gen_set_Rc0(ctx, dst);
1963 }
1964 }
1965
1966 /* srw & srw. */
1967 static void gen_srw(DisasContext *ctx)
1968 {
1969 TCGv t0, t1;
1970
1971 t0 = tcg_temp_new();
1972 /* AND rS with a mask that is 0 when rB >= 0x20 */
1973 #if defined(TARGET_PPC64)
1974 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
1975 tcg_gen_sari_tl(t0, t0, 0x3f);
1976 #else
1977 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
1978 tcg_gen_sari_tl(t0, t0, 0x1f);
1979 #endif
1980 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1981 tcg_gen_ext32u_tl(t0, t0);
1982 t1 = tcg_temp_new();
1983 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
1984 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1985 tcg_temp_free(t1);
1986 tcg_temp_free(t0);
1987 if (unlikely(Rc(ctx->opcode) != 0))
1988 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1989 }
1990
1991 #if defined(TARGET_PPC64)
1992 /* sld & sld. */
1993 static void gen_sld(DisasContext *ctx)
1994 {
1995 TCGv t0, t1;
1996
1997 t0 = tcg_temp_new();
1998 /* AND rS with a mask that is 0 when rB >= 0x40 */
1999 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2000 tcg_gen_sari_tl(t0, t0, 0x3f);
2001 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2002 t1 = tcg_temp_new();
2003 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2004 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2005 tcg_temp_free(t1);
2006 tcg_temp_free(t0);
2007 if (unlikely(Rc(ctx->opcode) != 0))
2008 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2009 }
2010
2011 /* srad & srad. */
2012 static void gen_srad(DisasContext *ctx)
2013 {
2014 gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
2015 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2016 if (unlikely(Rc(ctx->opcode) != 0))
2017 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2018 }
2019 /* sradi & sradi. */
2020 static inline void gen_sradi(DisasContext *ctx, int n)
2021 {
2022 int sh = SH(ctx->opcode) + (n << 5);
2023 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2024 TCGv src = cpu_gpr[rS(ctx->opcode)];
2025 if (sh == 0) {
2026 tcg_gen_mov_tl(dst, src);
2027 tcg_gen_movi_tl(cpu_ca, 0);
2028 } else {
2029 TCGv t0;
2030 tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
2031 t0 = tcg_temp_new();
2032 tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
2033 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2034 tcg_temp_free(t0);
2035 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2036 tcg_gen_sari_tl(dst, src, sh);
2037 }
2038 if (unlikely(Rc(ctx->opcode) != 0)) {
2039 gen_set_Rc0(ctx, dst);
2040 }
2041 }
2042
2043 static void gen_sradi0(DisasContext *ctx)
2044 {
2045 gen_sradi(ctx, 0);
2046 }
2047
2048 static void gen_sradi1(DisasContext *ctx)
2049 {
2050 gen_sradi(ctx, 1);
2051 }
2052
2053 /* srd & srd. */
2054 static void gen_srd(DisasContext *ctx)
2055 {
2056 TCGv t0, t1;
2057
2058 t0 = tcg_temp_new();
2059 /* AND rS with a mask that is 0 when rB >= 0x40 */
2060 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2061 tcg_gen_sari_tl(t0, t0, 0x3f);
2062 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2063 t1 = tcg_temp_new();
2064 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2065 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2066 tcg_temp_free(t1);
2067 tcg_temp_free(t0);
2068 if (unlikely(Rc(ctx->opcode) != 0))
2069 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2070 }
2071 #endif
2072
2073 #if defined(TARGET_PPC64)
2074 static void gen_set_cr1_from_fpscr(DisasContext *ctx)
2075 {
2076 TCGv_i32 tmp = tcg_temp_new_i32();
2077 tcg_gen_trunc_tl_i32(tmp, cpu_fpscr);
2078 tcg_gen_shri_i32(cpu_crf[1], tmp, 28);
2079 tcg_temp_free_i32(tmp);
2080 }
2081 #else
2082 static void gen_set_cr1_from_fpscr(DisasContext *ctx)
2083 {
2084 tcg_gen_shri_tl(cpu_crf[1], cpu_fpscr, 28);
2085 }
2086 #endif
2087
2088 /*** Floating-Point arithmetic ***/
2089 #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
2090 static void gen_f##name(DisasContext *ctx) \
2091 { \
2092 if (unlikely(!ctx->fpu_enabled)) { \
2093 gen_exception(ctx, POWERPC_EXCP_FPU); \
2094 return; \
2095 } \
2096 /* NIP cannot be restored if the memory exception comes from an helper */ \
2097 gen_update_nip(ctx, ctx->nip - 4); \
2098 gen_reset_fpstatus(); \
2099 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2100 cpu_fpr[rA(ctx->opcode)], \
2101 cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
2102 if (isfloat) { \
2103 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2104 cpu_fpr[rD(ctx->opcode)]); \
2105 } \
2106 if (set_fprf) { \
2107 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2108 } \
2109 if (unlikely(Rc(ctx->opcode) != 0)) { \
2110 gen_set_cr1_from_fpscr(ctx); \
2111 } \
2112 }
2113
2114 #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
2115 _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
2116 _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
2117
2118 #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2119 static void gen_f##name(DisasContext *ctx) \
2120 { \
2121 if (unlikely(!ctx->fpu_enabled)) { \
2122 gen_exception(ctx, POWERPC_EXCP_FPU); \
2123 return; \
2124 } \
2125 /* NIP cannot be restored if the memory exception comes from an helper */ \
2126 gen_update_nip(ctx, ctx->nip - 4); \
2127 gen_reset_fpstatus(); \
2128 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2129 cpu_fpr[rA(ctx->opcode)], \
2130 cpu_fpr[rB(ctx->opcode)]); \
2131 if (isfloat) { \
2132 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2133 cpu_fpr[rD(ctx->opcode)]); \
2134 } \
2135 if (set_fprf) { \
2136 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2137 } \
2138 if (unlikely(Rc(ctx->opcode) != 0)) { \
2139 gen_set_cr1_from_fpscr(ctx); \
2140 } \
2141 }
2142 #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
2143 _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2144 _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2145
2146 #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2147 static void gen_f##name(DisasContext *ctx) \
2148 { \
2149 if (unlikely(!ctx->fpu_enabled)) { \
2150 gen_exception(ctx, POWERPC_EXCP_FPU); \
2151 return; \
2152 } \
2153 /* NIP cannot be restored if the memory exception comes from an helper */ \
2154 gen_update_nip(ctx, ctx->nip - 4); \
2155 gen_reset_fpstatus(); \
2156 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2157 cpu_fpr[rA(ctx->opcode)], \
2158 cpu_fpr[rC(ctx->opcode)]); \
2159 if (isfloat) { \
2160 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2161 cpu_fpr[rD(ctx->opcode)]); \
2162 } \
2163 if (set_fprf) { \
2164 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2165 } \
2166 if (unlikely(Rc(ctx->opcode) != 0)) { \
2167 gen_set_cr1_from_fpscr(ctx); \
2168 } \
2169 }
2170 #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
2171 _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2172 _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2173
2174 #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
2175 static void gen_f##name(DisasContext *ctx) \
2176 { \
2177 if (unlikely(!ctx->fpu_enabled)) { \
2178 gen_exception(ctx, POWERPC_EXCP_FPU); \
2179 return; \
2180 } \
2181 /* NIP cannot be restored if the memory exception comes from an helper */ \
2182 gen_update_nip(ctx, ctx->nip - 4); \
2183 gen_reset_fpstatus(); \
2184 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2185 cpu_fpr[rB(ctx->opcode)]); \
2186 if (set_fprf) { \
2187 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2188 } \
2189 if (unlikely(Rc(ctx->opcode) != 0)) { \
2190 gen_set_cr1_from_fpscr(ctx); \
2191 } \
2192 }
2193
2194 #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
2195 static void gen_f##name(DisasContext *ctx) \
2196 { \
2197 if (unlikely(!ctx->fpu_enabled)) { \
2198 gen_exception(ctx, POWERPC_EXCP_FPU); \
2199 return; \
2200 } \
2201 /* NIP cannot be restored if the memory exception comes from an helper */ \
2202 gen_update_nip(ctx, ctx->nip - 4); \
2203 gen_reset_fpstatus(); \
2204 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2205 cpu_fpr[rB(ctx->opcode)]); \
2206 if (set_fprf) { \
2207 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2208 } \
2209 if (unlikely(Rc(ctx->opcode) != 0)) { \
2210 gen_set_cr1_from_fpscr(ctx); \
2211 } \
2212 }
2213
2214 /* fadd - fadds */
2215 GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT);
2216 /* fdiv - fdivs */
2217 GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT);
2218 /* fmul - fmuls */
2219 GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT);
2220
2221 /* fre */
2222 GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT);
2223
2224 /* fres */
2225 GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES);
2226
2227 /* frsqrte */
2228 GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE);
2229
2230 /* frsqrtes */
2231 static void gen_frsqrtes(DisasContext *ctx)
2232 {
2233 if (unlikely(!ctx->fpu_enabled)) {
2234 gen_exception(ctx, POWERPC_EXCP_FPU);
2235 return;
2236 }
2237 /* NIP cannot be restored if the memory exception comes from an helper */
2238 gen_update_nip(ctx, ctx->nip - 4);
2239 gen_reset_fpstatus();
2240 gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_env,
2241 cpu_fpr[rB(ctx->opcode)]);
2242 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
2243 cpu_fpr[rD(ctx->opcode)]);
2244 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
2245 if (unlikely(Rc(ctx->opcode) != 0)) {
2246 gen_set_cr1_from_fpscr(ctx);
2247 }
2248 }
2249
2250 /* fsel */
2251 _GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
2252 /* fsub - fsubs */
2253 GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
2254 /* Optional: */
2255
2256 /* fsqrt */
2257 static void gen_fsqrt(DisasContext *ctx)
2258 {
2259 if (unlikely(!ctx->fpu_enabled)) {
2260 gen_exception(ctx, POWERPC_EXCP_FPU);
2261 return;
2262 }
2263 /* NIP cannot be restored if the memory exception comes from an helper */
2264 gen_update_nip(ctx, ctx->nip - 4);
2265 gen_reset_fpstatus();
2266 gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
2267 cpu_fpr[rB(ctx->opcode)]);
2268 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
2269 if (unlikely(Rc(ctx->opcode) != 0)) {
2270 gen_set_cr1_from_fpscr(ctx);
2271 }
2272 }
2273
2274 static void gen_fsqrts(DisasContext *ctx)
2275 {
2276 if (unlikely(!ctx->fpu_enabled)) {
2277 gen_exception(ctx, POWERPC_EXCP_FPU);
2278 return;
2279 }
2280 /* NIP cannot be restored if the memory exception comes from an helper */
2281 gen_update_nip(ctx, ctx->nip - 4);
2282 gen_reset_fpstatus();
2283 gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
2284 cpu_fpr[rB(ctx->opcode)]);
2285 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
2286 cpu_fpr[rD(ctx->opcode)]);
2287 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
2288 if (unlikely(Rc(ctx->opcode) != 0)) {
2289 gen_set_cr1_from_fpscr(ctx);
2290 }
2291 }
2292
2293 /*** Floating-Point multiply-and-add ***/
2294 /* fmadd - fmadds */
2295 GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
2296 /* fmsub - fmsubs */
2297 GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT);
2298 /* fnmadd - fnmadds */
2299 GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT);
2300 /* fnmsub - fnmsubs */
2301 GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT);
2302
2303 /*** Floating-Point round & convert ***/
2304 /* fctiw */
2305 GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT);
2306 /* fctiwu */
2307 GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206);
2308 /* fctiwz */
2309 GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT);
2310 /* fctiwuz */
2311 GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206);
2312 /* frsp */
2313 GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT);
2314 /* fcfid */
2315 GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64);
2316 /* fcfids */
2317 GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206);
2318 /* fcfidu */
2319 GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
2320 /* fcfidus */
2321 GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
2322 /* fctid */
2323 GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64);
2324 /* fctidu */
2325 GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206);
2326 /* fctidz */
2327 GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64);
2328 /* fctidu */
2329 GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206);
2330
2331 /* frin */
2332 GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT);
2333 /* friz */
2334 GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT);
2335 /* frip */
2336 GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT);
2337 /* frim */
2338 GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT);
2339
2340 static void gen_ftdiv(DisasContext *ctx)
2341 {
2342 if (unlikely(!ctx->fpu_enabled)) {
2343 gen_exception(ctx, POWERPC_EXCP_FPU);
2344 return;
2345 }
2346 gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
2347 cpu_fpr[rB(ctx->opcode)]);
2348 }
2349
2350 static void gen_ftsqrt(DisasContext *ctx)
2351 {
2352 if (unlikely(!ctx->fpu_enabled)) {
2353 gen_exception(ctx, POWERPC_EXCP_FPU);
2354 return;
2355 }
2356 gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2357 }
2358
2359
2360
2361 /*** Floating-Point compare ***/
2362
2363 /* fcmpo */
2364 static void gen_fcmpo(DisasContext *ctx)
2365 {
2366 TCGv_i32 crf;
2367 if (unlikely(!ctx->fpu_enabled)) {
2368 gen_exception(ctx, POWERPC_EXCP_FPU);
2369 return;
2370 }
2371 /* NIP cannot be restored if the memory exception comes from an helper */
2372 gen_update_nip(ctx, ctx->nip - 4);
2373 gen_reset_fpstatus();
2374 crf = tcg_const_i32(crfD(ctx->opcode));
2375 gen_helper_fcmpo(cpu_env, cpu_fpr[rA(ctx->opcode)],
2376 cpu_fpr[rB(ctx->opcode)], crf);
2377 tcg_temp_free_i32(crf);
2378 gen_helper_float_check_status(cpu_env);
2379 }
2380
2381 /* fcmpu */
2382 static void gen_fcmpu(DisasContext *ctx)
2383 {
2384 TCGv_i32 crf;
2385 if (unlikely(!ctx->fpu_enabled)) {
2386 gen_exception(ctx, POWERPC_EXCP_FPU);
2387 return;
2388 }
2389 /* NIP cannot be restored if the memory exception comes from an helper */
2390 gen_update_nip(ctx, ctx->nip - 4);
2391 gen_reset_fpstatus();
2392 crf = tcg_const_i32(crfD(ctx->opcode));
2393 gen_helper_fcmpu(cpu_env, cpu_fpr[rA(ctx->opcode)],
2394 cpu_fpr[rB(ctx->opcode)], crf);
2395 tcg_temp_free_i32(crf);
2396 gen_helper_float_check_status(cpu_env);
2397 }
2398
2399 /*** Floating-point move ***/
2400 /* fabs */
2401 /* XXX: beware that fabs never checks for NaNs nor update FPSCR */
2402 static void gen_fabs(DisasContext *ctx)
2403 {
2404 if (unlikely(!ctx->fpu_enabled)) {
2405 gen_exception(ctx, POWERPC_EXCP_FPU);
2406 return;
2407 }
2408 tcg_gen_andi_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
2409 ~(1ULL << 63));
2410 if (unlikely(Rc(ctx->opcode))) {
2411 gen_set_cr1_from_fpscr(ctx);
2412 }
2413 }
2414
2415 /* fmr - fmr. */
2416 /* XXX: beware that fmr never checks for NaNs nor update FPSCR */
2417 static void gen_fmr(DisasContext *ctx)
2418 {
2419 if (unlikely(!ctx->fpu_enabled)) {
2420 gen_exception(ctx, POWERPC_EXCP_FPU);
2421 return;
2422 }
2423 tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2424 if (unlikely(Rc(ctx->opcode))) {
2425 gen_set_cr1_from_fpscr(ctx);
2426 }
2427 }
2428
2429 /* fnabs */
2430 /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
2431 static void gen_fnabs(DisasContext *ctx)
2432 {
2433 if (unlikely(!ctx->fpu_enabled)) {
2434 gen_exception(ctx, POWERPC_EXCP_FPU);
2435 return;
2436 }
2437 tcg_gen_ori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
2438 1ULL << 63);
2439 if (unlikely(Rc(ctx->opcode))) {
2440 gen_set_cr1_from_fpscr(ctx);
2441 }
2442 }
2443
2444 /* fneg */
2445 /* XXX: beware that fneg never checks for NaNs nor update FPSCR */
2446 static void gen_fneg(DisasContext *ctx)
2447 {
2448 if (unlikely(!ctx->fpu_enabled)) {
2449 gen_exception(ctx, POWERPC_EXCP_FPU);
2450 return;
2451 }
2452 tcg_gen_xori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
2453 1ULL << 63);
2454 if (unlikely(Rc(ctx->opcode))) {
2455 gen_set_cr1_from_fpscr(ctx);
2456 }
2457 }
2458
2459 /* fcpsgn: PowerPC 2.05 specification */
2460 /* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */
2461 static void gen_fcpsgn(DisasContext *ctx)
2462 {
2463 if (unlikely(!ctx->fpu_enabled)) {
2464 gen_exception(ctx, POWERPC_EXCP_FPU);
2465 return;
2466 }
2467 tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
2468 cpu_fpr[rB(ctx->opcode)], 0, 63);
2469 if (unlikely(Rc(ctx->opcode))) {
2470 gen_set_cr1_from_fpscr(ctx);
2471 }
2472 }
2473
2474 static void gen_fmrgew(DisasContext *ctx)
2475 {
2476 TCGv_i64 b0;
2477 if (unlikely(!ctx->fpu_enabled)) {
2478 gen_exception(ctx, POWERPC_EXCP_FPU);
2479 return;
2480 }
2481 b0 = tcg_temp_new_i64();
2482 tcg_gen_shri_i64(b0, cpu_fpr[rB(ctx->opcode)], 32);
2483 tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
2484 b0, 0, 32);
2485 tcg_temp_free_i64(b0);
2486 }
2487
2488 static void gen_fmrgow(DisasContext *ctx)
2489 {
2490 if (unlikely(!ctx->fpu_enabled)) {
2491 gen_exception(ctx, POWERPC_EXCP_FPU);
2492 return;
2493 }
2494 tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)],
2495 cpu_fpr[rB(ctx->opcode)],
2496 cpu_fpr[rA(ctx->opcode)],
2497 32, 32);
2498 }
2499
2500 /*** Floating-Point status & ctrl register ***/
2501
2502 /* mcrfs */
2503 static void gen_mcrfs(DisasContext *ctx)
2504 {
2505 TCGv tmp = tcg_temp_new();
2506 TCGv_i32 tmask;
2507 TCGv_i64 tnew_fpscr = tcg_temp_new_i64();
2508 int bfa;
2509 int nibble;
2510 int shift;
2511
2512 if (unlikely(!ctx->fpu_enabled)) {
2513 gen_exception(ctx, POWERPC_EXCP_FPU);
2514 return;
2515 }
2516 bfa = crfS(ctx->opcode);
2517 nibble = 7 - bfa;
2518 shift = 4 * nibble;
2519 tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
2520 tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
2521 tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
2522 tcg_temp_free(tmp);
2523 tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
2524 /* Only the exception bits (including FX) should be cleared if read */
2525 tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS));
2526 /* FEX and VX need to be updated, so don't set fpscr directly */
2527 tmask = tcg_const_i32(1 << nibble);
2528 gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
2529 tcg_temp_free_i32(tmask);
2530 tcg_temp_free_i64(tnew_fpscr);
2531 }
2532
2533 /* mffs */
2534 static void gen_mffs(DisasContext *ctx)
2535 {
2536 if (unlikely(!ctx->fpu_enabled)) {
2537 gen_exception(ctx, POWERPC_EXCP_FPU);
2538 return;
2539 }
2540 gen_reset_fpstatus();
2541 tcg_gen_extu_tl_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpscr);
2542 if (unlikely(Rc(ctx->opcode))) {
2543 gen_set_cr1_from_fpscr(ctx);
2544 }
2545 }
2546
2547 /* mtfsb0 */
2548 static void gen_mtfsb0(DisasContext *ctx)
2549 {
2550 uint8_t crb;
2551
2552 if (unlikely(!ctx->fpu_enabled)) {
2553 gen_exception(ctx, POWERPC_EXCP_FPU);
2554 return;
2555 }
2556 crb = 31 - crbD(ctx->opcode);
2557 gen_reset_fpstatus();
2558 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
2559 TCGv_i32 t0;
2560 /* NIP cannot be restored if the memory exception comes from an helper */
2561 gen_update_nip(ctx, ctx->nip - 4);
2562 t0 = tcg_const_i32(crb);
2563 gen_helper_fpscr_clrbit(cpu_env, t0);
2564 tcg_temp_free_i32(t0);
2565 }
2566 if (unlikely(Rc(ctx->opcode) != 0)) {
2567 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2568 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2569 }
2570 }
2571
2572 /* mtfsb1 */
2573 static void gen_mtfsb1(DisasContext *ctx)
2574 {
2575 uint8_t crb;
2576
2577 if (unlikely(!ctx->fpu_enabled)) {
2578 gen_exception(ctx, POWERPC_EXCP_FPU);
2579 return;
2580 }
2581 crb = 31 - crbD(ctx->opcode);
2582 gen_reset_fpstatus();
2583 /* XXX: we pretend we can only do IEEE floating-point computations */
2584 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
2585 TCGv_i32 t0;
2586 /* NIP cannot be restored if the memory exception comes from an helper */
2587 gen_update_nip(ctx, ctx->nip - 4);
2588 t0 = tcg_const_i32(crb);
2589 gen_helper_fpscr_setbit(cpu_env, t0);
2590 tcg_temp_free_i32(t0);
2591 }
2592 if (unlikely(Rc(ctx->opcode) != 0)) {
2593 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2594 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2595 }
2596 /* We can raise a differed exception */
2597 gen_helper_float_check_status(cpu_env);
2598 }
2599
2600 /* mtfsf */
2601 static void gen_mtfsf(DisasContext *ctx)
2602 {
2603 TCGv_i32 t0;
2604 int flm, l, w;
2605
2606 if (unlikely(!ctx->fpu_enabled)) {
2607 gen_exception(ctx, POWERPC_EXCP_FPU);
2608 return;
2609 }
2610 flm = FPFLM(ctx->opcode);
2611 l = FPL(ctx->opcode);
2612 w = FPW(ctx->opcode);
2613 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
2614 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2615 return;
2616 }
2617 /* NIP cannot be restored if the memory exception comes from an helper */
2618 gen_update_nip(ctx, ctx->nip - 4);
2619 gen_reset_fpstatus();
2620 if (l) {
2621 t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff);
2622 } else {
2623 t0 = tcg_const_i32(flm << (w * 8));
2624 }
2625 gen_helper_store_fpscr(cpu_env, cpu_fpr[rB(ctx->opcode)], t0);
2626 tcg_temp_free_i32(t0);
2627 if (unlikely(Rc(ctx->opcode) != 0)) {
2628 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2629 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2630 }
2631 /* We can raise a differed exception */
2632 gen_helper_float_check_status(cpu_env);
2633 }
2634
2635 /* mtfsfi */
2636 static void gen_mtfsfi(DisasContext *ctx)
2637 {
2638 int bf, sh, w;
2639 TCGv_i64 t0;
2640 TCGv_i32 t1;
2641
2642 if (unlikely(!ctx->fpu_enabled)) {
2643 gen_exception(ctx, POWERPC_EXCP_FPU);
2644 return;
2645 }
2646 w = FPW(ctx->opcode);
2647 bf = FPBF(ctx->opcode);
2648 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
2649 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2650 return;
2651 }
2652 sh = (8 * w) + 7 - bf;
2653 /* NIP cannot be restored if the memory exception comes from an helper */
2654 gen_update_nip(ctx, ctx->nip - 4);
2655 gen_reset_fpstatus();
2656 t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
2657 t1 = tcg_const_i32(1 << sh);
2658 gen_helper_store_fpscr(cpu_env, t0, t1);
2659 tcg_temp_free_i64(t0);
2660 tcg_temp_free_i32(t1);
2661 if (unlikely(Rc(ctx->opcode) != 0)) {
2662 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2663 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2664 }
2665 /* We can raise a differed exception */
2666 gen_helper_float_check_status(cpu_env);
2667 }
2668
2669 /*** Addressing modes ***/
2670 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2671 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
2672 target_long maskl)
2673 {
2674 target_long simm = SIMM(ctx->opcode);
2675
2676 simm &= ~maskl;
2677 if (rA(ctx->opcode) == 0) {
2678 if (NARROW_MODE(ctx)) {
2679 simm = (uint32_t)simm;
2680 }
2681 tcg_gen_movi_tl(EA, simm);
2682 } else if (likely(simm != 0)) {
2683 tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
2684 if (NARROW_MODE(ctx)) {
2685 tcg_gen_ext32u_tl(EA, EA);
2686 }
2687 } else {
2688 if (NARROW_MODE(ctx)) {
2689 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2690 } else {
2691 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2692 }
2693 }
2694 }
2695
2696 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
2697 {
2698 if (rA(ctx->opcode) == 0) {
2699 if (NARROW_MODE(ctx)) {
2700 tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2701 } else {
2702 tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2703 }
2704 } else {
2705 tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2706 if (NARROW_MODE(ctx)) {
2707 tcg_gen_ext32u_tl(EA, EA);
2708 }
2709 }
2710 }
2711
2712 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
2713 {
2714 if (rA(ctx->opcode) == 0) {
2715 tcg_gen_movi_tl(EA, 0);
2716 } else if (NARROW_MODE(ctx)) {
2717 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2718 } else {
2719 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2720 }
2721 }
2722
2723 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
2724 target_long val)
2725 {
2726 tcg_gen_addi_tl(ret, arg1, val);
2727 if (NARROW_MODE(ctx)) {
2728 tcg_gen_ext32u_tl(ret, ret);
2729 }
2730 }
2731
2732 static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask)
2733 {
2734 TCGLabel *l1 = gen_new_label();
2735 TCGv t0 = tcg_temp_new();
2736 TCGv_i32 t1, t2;
2737 /* NIP cannot be restored if the memory exception comes from an helper */
2738 gen_update_nip(ctx, ctx->nip - 4);
2739 tcg_gen_andi_tl(t0, EA, mask);
2740 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
2741 t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
2742 t2 = tcg_const_i32(0);
2743 gen_helper_raise_exception_err(cpu_env, t1, t2);
2744 tcg_temp_free_i32(t1);
2745 tcg_temp_free_i32(t2);
2746 gen_set_label(l1);
2747 tcg_temp_free(t0);
2748 }
2749
2750 /*** Integer load ***/
2751 static inline void gen_qemu_ld8u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2752 {
2753 tcg_gen_qemu_ld8u(arg1, arg2, ctx->mem_idx);
2754 }
2755
2756 static inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2757 {
2758 TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
2759 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2760 }
2761
2762 static inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2763 {
2764 TCGMemOp op = MO_SW | ctx->default_tcg_memop_mask;
2765 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2766 }
2767
2768 static inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2769 {
2770 TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
2771 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2772 }
2773
2774 static void gen_qemu_ld32u_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
2775 {
2776 TCGv tmp = tcg_temp_new();
2777 gen_qemu_ld32u(ctx, tmp, addr);
2778 tcg_gen_extu_tl_i64(val, tmp);
2779 tcg_temp_free(tmp);
2780 }
2781
2782 static inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2783 {
2784 TCGMemOp op = MO_SL | ctx->default_tcg_memop_mask;
2785 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2786 }
2787
2788 static void gen_qemu_ld32s_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
2789 {
2790 TCGv tmp = tcg_temp_new();
2791 gen_qemu_ld32s(ctx, tmp, addr);
2792 tcg_gen_ext_tl_i64(val, tmp);
2793 tcg_temp_free(tmp);
2794 }
2795
2796 static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
2797 {
2798 TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
2799 tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
2800 }
2801
2802 static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
2803 {
2804 tcg_gen_qemu_st8(arg1, arg2, ctx->mem_idx);
2805 }
2806
2807 static inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2)
2808 {
2809 TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
2810 tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
2811 }
2812
2813 static inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2)
2814 {
2815 TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
2816 tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
2817 }
2818
2819 static void gen_qemu_st32_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
2820 {
2821 TCGv tmp = tcg_temp_new();
2822 tcg_gen_trunc_i64_tl(tmp, val);
2823 gen_qemu_st32(ctx, tmp, addr);
2824 tcg_temp_free(tmp);
2825 }
2826
2827 static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
2828 {
2829 TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
2830 tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
2831 }
2832
2833 #define GEN_LD(name, ldop, opc, type) \
2834 static void glue(gen_, name)(DisasContext *ctx) \
2835 { \
2836 TCGv EA; \
2837 gen_set_access_type(ctx, ACCESS_INT); \
2838 EA = tcg_temp_new(); \
2839 gen_addr_imm_index(ctx, EA, 0); \
2840 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2841 tcg_temp_free(EA); \
2842 }
2843
2844 #define GEN_LDU(name, ldop, opc, type) \
2845 static void glue(gen_, name##u)(DisasContext *ctx) \
2846 { \
2847 TCGv EA; \
2848 if (unlikely(rA(ctx->opcode) == 0 || \
2849 rA(ctx->opcode) == rD(ctx->opcode))) { \
2850 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2851 return; \
2852 } \
2853 gen_set_access_type(ctx, ACCESS_INT); \
2854 EA = tcg_temp_new(); \
2855 if (type == PPC_64B) \
2856 gen_addr_imm_index(ctx, EA, 0x03); \
2857 else \
2858 gen_addr_imm_index(ctx, EA, 0); \
2859 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2860 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2861 tcg_temp_free(EA); \
2862 }
2863
2864 #define GEN_LDUX(name, ldop, opc2, opc3, type) \
2865 static void glue(gen_, name##ux)(DisasContext *ctx) \
2866 { \
2867 TCGv EA; \
2868 if (unlikely(rA(ctx->opcode) == 0 || \
2869 rA(ctx->opcode) == rD(ctx->opcode))) { \
2870 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2871 return; \
2872 } \
2873 gen_set_access_type(ctx, ACCESS_INT); \
2874 EA = tcg_temp_new(); \
2875 gen_addr_reg_index(ctx, EA); \
2876 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2877 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2878 tcg_temp_free(EA); \
2879 }
2880
2881 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2) \
2882 static void glue(gen_, name##x)(DisasContext *ctx) \
2883 { \
2884 TCGv EA; \
2885 gen_set_access_type(ctx, ACCESS_INT); \
2886 EA = tcg_temp_new(); \
2887 gen_addr_reg_index(ctx, EA); \
2888 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2889 tcg_temp_free(EA); \
2890 }
2891 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2892 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE)
2893
2894 #define GEN_LDS(name, ldop, op, type) \
2895 GEN_LD(name, ldop, op | 0x20, type); \
2896 GEN_LDU(name, ldop, op | 0x21, type); \
2897 GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
2898 GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2899
2900 /* lbz lbzu lbzux lbzx */
2901 GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER);
2902 /* lha lhau lhaux lhax */
2903 GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER);
2904 /* lhz lhzu lhzux lhzx */
2905 GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER);
2906 /* lwz lwzu lwzux lwzx */
2907 GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER);
2908 #if defined(TARGET_PPC64)
2909 /* lwaux */
2910 GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
2911 /* lwax */
2912 GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
2913 /* ldux */
2914 GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B);
2915 /* ldx */
2916 GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B);
2917
2918 static void gen_ld(DisasContext *ctx)
2919 {
2920 TCGv EA;
2921 if (Rc(ctx->opcode)) {
2922 if (unlikely(rA(ctx->opcode) == 0 ||
2923 rA(ctx->opcode) == rD(ctx->opcode))) {
2924 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2925 return;
2926 }
2927 }
2928 gen_set_access_type(ctx, ACCESS_INT);
2929 EA = tcg_temp_new();
2930 gen_addr_imm_index(ctx, EA, 0x03);
2931 if (ctx->opcode & 0x02) {
2932 /* lwa (lwau is undefined) */
2933 gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2934 } else {
2935 /* ld - ldu */
2936 gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2937 }
2938 if (Rc(ctx->opcode))
2939 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2940 tcg_temp_free(EA);
2941 }
2942
2943 /* lq */
2944 static void gen_lq(DisasContext *ctx)
2945 {
2946 int ra, rd;
2947 TCGv EA;
2948
2949 /* lq is a legal user mode instruction starting in ISA 2.07 */
2950 bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2951 bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2952
2953 if (!legal_in_user_mode && ctx->pr) {
2954 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2955 return;
2956 }
2957
2958 if (!le_is_supported && ctx->le_mode) {
2959 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
2960 return;
2961 }
2962
2963 ra = rA(ctx->opcode);
2964 rd = rD(ctx->opcode);
2965 if (unlikely((rd & 1) || rd == ra)) {
2966 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2967 return;
2968 }
2969
2970 gen_set_access_type(ctx, ACCESS_INT);
2971 EA = tcg_temp_new();
2972 gen_addr_imm_index(ctx, EA, 0x0F);
2973
2974 /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
2975 64-bit byteswap already. */
2976 if (unlikely(ctx->le_mode)) {
2977 gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
2978 gen_addr_add(ctx, EA, EA, 8);
2979 gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
2980 } else {
2981 gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
2982 gen_addr_add(ctx, EA, EA, 8);
2983 gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
2984 }
2985 tcg_temp_free(EA);
2986 }
2987 #endif
2988
2989 /*** Integer store ***/
2990 #define GEN_ST(name, stop, opc, type) \
2991 static void glue(gen_, name)(DisasContext *ctx) \
2992 { \
2993 TCGv EA; \
2994 gen_set_access_type(ctx, ACCESS_INT); \
2995 EA = tcg_temp_new(); \
2996 gen_addr_imm_index(ctx, EA, 0); \
2997 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2998 tcg_temp_free(EA); \
2999 }
3000
3001 #define GEN_STU(name, stop, opc, type) \
3002 static void glue(gen_, stop##u)(DisasContext *ctx) \
3003 { \
3004 TCGv EA; \
3005 if (unlikely(rA(ctx->opcode) == 0)) { \
3006 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3007 return; \
3008 } \
3009 gen_set_access_type(ctx, ACCESS_INT); \
3010 EA = tcg_temp_new(); \
3011 if (type == PPC_64B) \
3012 gen_addr_imm_index(ctx, EA, 0x03); \
3013 else \
3014 gen_addr_imm_index(ctx, EA, 0); \
3015 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3016 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3017 tcg_temp_free(EA); \
3018 }
3019
3020 #define GEN_STUX(name, stop, opc2, opc3, type) \
3021 static void glue(gen_, name##ux)(DisasContext *ctx) \
3022 { \
3023 TCGv EA; \
3024 if (unlikely(rA(ctx->opcode) == 0)) { \
3025 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3026 return; \
3027 } \
3028 gen_set_access_type(ctx, ACCESS_INT); \
3029 EA = tcg_temp_new(); \
3030 gen_addr_reg_index(ctx, EA); \
3031 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3032 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3033 tcg_temp_free(EA); \
3034 }
3035
3036 #define GEN_STX_E(name, stop, opc2, opc3, type, type2) \
3037 static void glue(gen_, name##x)(DisasContext *ctx) \
3038 { \
3039 TCGv EA; \
3040 gen_set_access_type(ctx, ACCESS_INT); \
3041 EA = tcg_temp_new(); \
3042 gen_addr_reg_index(ctx, EA); \
3043 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3044 tcg_temp_free(EA); \
3045 }
3046 #define GEN_STX(name, stop, opc2, opc3, type) \
3047 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE)
3048
3049 #define GEN_STS(name, stop, op, type) \
3050 GEN_ST(name, stop, op | 0x20, type); \
3051 GEN_STU(name, stop, op | 0x21, type); \
3052 GEN_STUX(name, stop, 0x17, op | 0x01, type); \
3053 GEN_STX(name, stop, 0x17, op | 0x00, type)
3054
3055 /* stb stbu stbux stbx */
3056 GEN_STS(stb, st8, 0x06, PPC_INTEGER);
3057 /* sth sthu sthux sthx */
3058 GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
3059 /* stw stwu stwux stwx */
3060 GEN_STS(stw, st32, 0x04, PPC_INTEGER);
3061 #if defined(TARGET_PPC64)
3062 GEN_STUX(std, st64, 0x15, 0x05, PPC_64B);
3063 GEN_STX(std, st64, 0x15, 0x04, PPC_64B);
3064
3065 static void gen_std(DisasContext *ctx)
3066 {
3067 int rs;
3068 TCGv EA;
3069
3070 rs = rS(ctx->opcode);
3071 if ((ctx->opcode & 0x3) == 0x2) { /* stq */
3072
3073 bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
3074 bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
3075
3076 if (!legal_in_user_mode && ctx->pr) {
3077 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
3078 return;
3079 }
3080
3081 if (!le_is_supported && ctx->le_mode) {
3082 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
3083 return;
3084 }
3085
3086 if (unlikely(rs & 1)) {
3087 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3088 return;
3089 }
3090 gen_set_access_type(ctx, ACCESS_INT);
3091 EA = tcg_temp_new();
3092 gen_addr_imm_index(ctx, EA, 0x03);
3093
3094 /* We only need to swap high and low halves. gen_qemu_st64 does
3095 necessary 64-bit byteswap already. */
3096 if (unlikely(ctx->le_mode)) {
3097 gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
3098 gen_addr_add(ctx, EA, EA, 8);
3099 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
3100 } else {
3101 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
3102 gen_addr_add(ctx, EA, EA, 8);
3103 gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
3104 }
3105 tcg_temp_free(EA);
3106 } else {
3107 /* std / stdu*/
3108 if (Rc(ctx->opcode)) {
3109 if (unlikely(rA(ctx->opcode) == 0)) {
3110 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3111 return;
3112 }
3113 }
3114 gen_set_access_type(ctx, ACCESS_INT);
3115 EA = tcg_temp_new();
3116 gen_addr_imm_index(ctx, EA, 0x03);