nbd: Don't use *_to_cpup() functions
[qemu.git] / target-ppc / translate.c
1 /*
2 * PowerPC emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/host-utils.h"
27 #include "exec/cpu_ldst.h"
28
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34
35
36 #define CPU_SINGLE_STEP 0x1
37 #define CPU_BRANCH_STEP 0x2
38 #define GDBSTUB_SINGLE_STEP 0x4
39
40 /* Include definitions for instructions classes and implementations flags */
41 //#define PPC_DEBUG_DISAS
42 //#define DO_PPC_STATISTICS
43
44 #ifdef PPC_DEBUG_DISAS
45 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
46 #else
47 # define LOG_DISAS(...) do { } while (0)
48 #endif
49 /*****************************************************************************/
50 /* Code translation helpers */
51
52 /* global register indexes */
53 static TCGv_env cpu_env;
54 static char cpu_reg_names[10*3 + 22*4 /* GPR */
55 + 10*4 + 22*5 /* SPE GPRh */
56 + 10*4 + 22*5 /* FPR */
57 + 2*(10*6 + 22*7) /* AVRh, AVRl */
58 + 10*5 + 22*6 /* VSR */
59 + 8*5 /* CRF */];
60 static TCGv cpu_gpr[32];
61 static TCGv cpu_gprh[32];
62 static TCGv_i64 cpu_fpr[32];
63 static TCGv_i64 cpu_avrh[32], cpu_avrl[32];
64 static TCGv_i64 cpu_vsr[32];
65 static TCGv_i32 cpu_crf[8];
66 static TCGv cpu_nip;
67 static TCGv cpu_msr;
68 static TCGv cpu_ctr;
69 static TCGv cpu_lr;
70 #if defined(TARGET_PPC64)
71 static TCGv cpu_cfar;
72 #endif
73 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca;
74 static TCGv cpu_reserve;
75 static TCGv cpu_fpscr;
76 static TCGv_i32 cpu_access_type;
77
78 #include "exec/gen-icount.h"
79
80 void ppc_translate_init(void)
81 {
82 int i;
83 char* p;
84 size_t cpu_reg_names_size;
85 static int done_init = 0;
86
87 if (done_init)
88 return;
89
90 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
91
92 p = cpu_reg_names;
93 cpu_reg_names_size = sizeof(cpu_reg_names);
94
95 for (i = 0; i < 8; i++) {
96 snprintf(p, cpu_reg_names_size, "crf%d", i);
97 cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
98 offsetof(CPUPPCState, crf[i]), p);
99 p += 5;
100 cpu_reg_names_size -= 5;
101 }
102
103 for (i = 0; i < 32; i++) {
104 snprintf(p, cpu_reg_names_size, "r%d", i);
105 cpu_gpr[i] = tcg_global_mem_new(cpu_env,
106 offsetof(CPUPPCState, gpr[i]), p);
107 p += (i < 10) ? 3 : 4;
108 cpu_reg_names_size -= (i < 10) ? 3 : 4;
109 snprintf(p, cpu_reg_names_size, "r%dH", i);
110 cpu_gprh[i] = tcg_global_mem_new(cpu_env,
111 offsetof(CPUPPCState, gprh[i]), p);
112 p += (i < 10) ? 4 : 5;
113 cpu_reg_names_size -= (i < 10) ? 4 : 5;
114
115 snprintf(p, cpu_reg_names_size, "fp%d", i);
116 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
117 offsetof(CPUPPCState, fpr[i]), p);
118 p += (i < 10) ? 4 : 5;
119 cpu_reg_names_size -= (i < 10) ? 4 : 5;
120
121 snprintf(p, cpu_reg_names_size, "avr%dH", i);
122 #ifdef HOST_WORDS_BIGENDIAN
123 cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
124 offsetof(CPUPPCState, avr[i].u64[0]), p);
125 #else
126 cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
127 offsetof(CPUPPCState, avr[i].u64[1]), p);
128 #endif
129 p += (i < 10) ? 6 : 7;
130 cpu_reg_names_size -= (i < 10) ? 6 : 7;
131
132 snprintf(p, cpu_reg_names_size, "avr%dL", i);
133 #ifdef HOST_WORDS_BIGENDIAN
134 cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
135 offsetof(CPUPPCState, avr[i].u64[1]), p);
136 #else
137 cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
138 offsetof(CPUPPCState, avr[i].u64[0]), p);
139 #endif
140 p += (i < 10) ? 6 : 7;
141 cpu_reg_names_size -= (i < 10) ? 6 : 7;
142 snprintf(p, cpu_reg_names_size, "vsr%d", i);
143 cpu_vsr[i] = tcg_global_mem_new_i64(cpu_env,
144 offsetof(CPUPPCState, vsr[i]), p);
145 p += (i < 10) ? 5 : 6;
146 cpu_reg_names_size -= (i < 10) ? 5 : 6;
147 }
148
149 cpu_nip = tcg_global_mem_new(cpu_env,
150 offsetof(CPUPPCState, nip), "nip");
151
152 cpu_msr = tcg_global_mem_new(cpu_env,
153 offsetof(CPUPPCState, msr), "msr");
154
155 cpu_ctr = tcg_global_mem_new(cpu_env,
156 offsetof(CPUPPCState, ctr), "ctr");
157
158 cpu_lr = tcg_global_mem_new(cpu_env,
159 offsetof(CPUPPCState, lr), "lr");
160
161 #if defined(TARGET_PPC64)
162 cpu_cfar = tcg_global_mem_new(cpu_env,
163 offsetof(CPUPPCState, cfar), "cfar");
164 #endif
165
166 cpu_xer = tcg_global_mem_new(cpu_env,
167 offsetof(CPUPPCState, xer), "xer");
168 cpu_so = tcg_global_mem_new(cpu_env,
169 offsetof(CPUPPCState, so), "SO");
170 cpu_ov = tcg_global_mem_new(cpu_env,
171 offsetof(CPUPPCState, ov), "OV");
172 cpu_ca = tcg_global_mem_new(cpu_env,
173 offsetof(CPUPPCState, ca), "CA");
174
175 cpu_reserve = tcg_global_mem_new(cpu_env,
176 offsetof(CPUPPCState, reserve_addr),
177 "reserve_addr");
178
179 cpu_fpscr = tcg_global_mem_new(cpu_env,
180 offsetof(CPUPPCState, fpscr), "fpscr");
181
182 cpu_access_type = tcg_global_mem_new_i32(cpu_env,
183 offsetof(CPUPPCState, access_type), "access_type");
184
185 done_init = 1;
186 }
187
188 /* internal defines */
189 struct DisasContext {
190 struct TranslationBlock *tb;
191 target_ulong nip;
192 uint32_t opcode;
193 uint32_t exception;
194 /* Routine used to access memory */
195 bool pr, hv;
196 bool lazy_tlb_flush;
197 int mem_idx;
198 int access_type;
199 /* Translation flags */
200 int le_mode;
201 TCGMemOp default_tcg_memop_mask;
202 #if defined(TARGET_PPC64)
203 int sf_mode;
204 int has_cfar;
205 #endif
206 int fpu_enabled;
207 int altivec_enabled;
208 int vsx_enabled;
209 int spe_enabled;
210 int tm_enabled;
211 ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
212 int singlestep_enabled;
213 uint64_t insns_flags;
214 uint64_t insns_flags2;
215 };
216
217 /* Return true iff byteswap is needed in a scalar memop */
218 static inline bool need_byteswap(const DisasContext *ctx)
219 {
220 #if defined(TARGET_WORDS_BIGENDIAN)
221 return ctx->le_mode;
222 #else
223 return !ctx->le_mode;
224 #endif
225 }
226
227 /* True when active word size < size of target_long. */
228 #ifdef TARGET_PPC64
229 # define NARROW_MODE(C) (!(C)->sf_mode)
230 #else
231 # define NARROW_MODE(C) 0
232 #endif
233
234 struct opc_handler_t {
235 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
236 uint32_t inval1;
237 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
238 uint32_t inval2;
239 /* instruction type */
240 uint64_t type;
241 /* extended instruction type */
242 uint64_t type2;
243 /* handler */
244 void (*handler)(DisasContext *ctx);
245 #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
246 const char *oname;
247 #endif
248 #if defined(DO_PPC_STATISTICS)
249 uint64_t count;
250 #endif
251 };
252
253 static inline void gen_reset_fpstatus(void)
254 {
255 gen_helper_reset_fpstatus(cpu_env);
256 }
257
258 static inline void gen_compute_fprf(TCGv_i64 arg)
259 {
260 gen_helper_compute_fprf(cpu_env, arg);
261 gen_helper_float_check_status(cpu_env);
262 }
263
264 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
265 {
266 if (ctx->access_type != access_type) {
267 tcg_gen_movi_i32(cpu_access_type, access_type);
268 ctx->access_type = access_type;
269 }
270 }
271
272 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
273 {
274 if (NARROW_MODE(ctx)) {
275 nip = (uint32_t)nip;
276 }
277 tcg_gen_movi_tl(cpu_nip, nip);
278 }
279
280 void gen_update_current_nip(void *opaque)
281 {
282 DisasContext *ctx = opaque;
283
284 tcg_gen_movi_tl(cpu_nip, ctx->nip);
285 }
286
287 static inline void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
288 {
289 TCGv_i32 t0, t1;
290 if (ctx->exception == POWERPC_EXCP_NONE) {
291 gen_update_nip(ctx, ctx->nip);
292 }
293 t0 = tcg_const_i32(excp);
294 t1 = tcg_const_i32(error);
295 gen_helper_raise_exception_err(cpu_env, t0, t1);
296 tcg_temp_free_i32(t0);
297 tcg_temp_free_i32(t1);
298 ctx->exception = (excp);
299 }
300
301 static inline void gen_exception(DisasContext *ctx, uint32_t excp)
302 {
303 TCGv_i32 t0;
304 if (ctx->exception == POWERPC_EXCP_NONE) {
305 gen_update_nip(ctx, ctx->nip);
306 }
307 t0 = tcg_const_i32(excp);
308 gen_helper_raise_exception(cpu_env, t0);
309 tcg_temp_free_i32(t0);
310 ctx->exception = (excp);
311 }
312
313 static inline void gen_debug_exception(DisasContext *ctx)
314 {
315 TCGv_i32 t0;
316
317 if ((ctx->exception != POWERPC_EXCP_BRANCH) &&
318 (ctx->exception != POWERPC_EXCP_SYNC)) {
319 gen_update_nip(ctx, ctx->nip);
320 }
321 t0 = tcg_const_i32(EXCP_DEBUG);
322 gen_helper_raise_exception(cpu_env, t0);
323 tcg_temp_free_i32(t0);
324 }
325
326 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
327 {
328 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | error);
329 }
330
331 /* Stop translation */
332 static inline void gen_stop_exception(DisasContext *ctx)
333 {
334 gen_update_nip(ctx, ctx->nip);
335 ctx->exception = POWERPC_EXCP_STOP;
336 }
337
338 #ifndef CONFIG_USER_ONLY
339 /* No need to update nip here, as execution flow will change */
340 static inline void gen_sync_exception(DisasContext *ctx)
341 {
342 ctx->exception = POWERPC_EXCP_SYNC;
343 }
344 #endif
345
346 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
347 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
348
349 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
350 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
351
352 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
353 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
354
355 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
356 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
357
358 typedef struct opcode_t {
359 unsigned char opc1, opc2, opc3;
360 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
361 unsigned char pad[5];
362 #else
363 unsigned char pad[1];
364 #endif
365 opc_handler_t handler;
366 const char *oname;
367 } opcode_t;
368
369 /*****************************************************************************/
370 /*** Instruction decoding ***/
371 #define EXTRACT_HELPER(name, shift, nb) \
372 static inline uint32_t name(uint32_t opcode) \
373 { \
374 return (opcode >> (shift)) & ((1 << (nb)) - 1); \
375 }
376
377 #define EXTRACT_SHELPER(name, shift, nb) \
378 static inline int32_t name(uint32_t opcode) \
379 { \
380 return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
381 }
382
383 #define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \
384 static inline uint32_t name(uint32_t opcode) \
385 { \
386 return (((opcode >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
387 ((opcode >> (shift2)) & ((1 << (nb2)) - 1)); \
388 }
389 /* Opcode part 1 */
390 EXTRACT_HELPER(opc1, 26, 6);
391 /* Opcode part 2 */
392 EXTRACT_HELPER(opc2, 1, 5);
393 /* Opcode part 3 */
394 EXTRACT_HELPER(opc3, 6, 5);
395 /* Update Cr0 flags */
396 EXTRACT_HELPER(Rc, 0, 1);
397 /* Update Cr6 flags (Altivec) */
398 EXTRACT_HELPER(Rc21, 10, 1);
399 /* Destination */
400 EXTRACT_HELPER(rD, 21, 5);
401 /* Source */
402 EXTRACT_HELPER(rS, 21, 5);
403 /* First operand */
404 EXTRACT_HELPER(rA, 16, 5);
405 /* Second operand */
406 EXTRACT_HELPER(rB, 11, 5);
407 /* Third operand */
408 EXTRACT_HELPER(rC, 6, 5);
409 /*** Get CRn ***/
410 EXTRACT_HELPER(crfD, 23, 3);
411 EXTRACT_HELPER(crfS, 18, 3);
412 EXTRACT_HELPER(crbD, 21, 5);
413 EXTRACT_HELPER(crbA, 16, 5);
414 EXTRACT_HELPER(crbB, 11, 5);
415 /* SPR / TBL */
416 EXTRACT_HELPER(_SPR, 11, 10);
417 static inline uint32_t SPR(uint32_t opcode)
418 {
419 uint32_t sprn = _SPR(opcode);
420
421 return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
422 }
423 /*** Get constants ***/
424 /* 16 bits signed immediate value */
425 EXTRACT_SHELPER(SIMM, 0, 16);
426 /* 16 bits unsigned immediate value */
427 EXTRACT_HELPER(UIMM, 0, 16);
428 /* 5 bits signed immediate value */
429 EXTRACT_HELPER(SIMM5, 16, 5);
430 /* 5 bits signed immediate value */
431 EXTRACT_HELPER(UIMM5, 16, 5);
432 /* Bit count */
433 EXTRACT_HELPER(NB, 11, 5);
434 /* Shift count */
435 EXTRACT_HELPER(SH, 11, 5);
436 /* Vector shift count */
437 EXTRACT_HELPER(VSH, 6, 4);
438 /* Mask start */
439 EXTRACT_HELPER(MB, 6, 5);
440 /* Mask end */
441 EXTRACT_HELPER(ME, 1, 5);
442 /* Trap operand */
443 EXTRACT_HELPER(TO, 21, 5);
444
445 EXTRACT_HELPER(CRM, 12, 8);
446
447 #ifndef CONFIG_USER_ONLY
448 EXTRACT_HELPER(SR, 16, 4);
449 #endif
450
451 /* mtfsf/mtfsfi */
452 EXTRACT_HELPER(FPBF, 23, 3);
453 EXTRACT_HELPER(FPIMM, 12, 4);
454 EXTRACT_HELPER(FPL, 25, 1);
455 EXTRACT_HELPER(FPFLM, 17, 8);
456 EXTRACT_HELPER(FPW, 16, 1);
457
458 /*** Jump target decoding ***/
459 /* Immediate address */
460 static inline target_ulong LI(uint32_t opcode)
461 {
462 return (opcode >> 0) & 0x03FFFFFC;
463 }
464
465 static inline uint32_t BD(uint32_t opcode)
466 {
467 return (opcode >> 0) & 0xFFFC;
468 }
469
470 EXTRACT_HELPER(BO, 21, 5);
471 EXTRACT_HELPER(BI, 16, 5);
472 /* Absolute/relative address */
473 EXTRACT_HELPER(AA, 1, 1);
474 /* Link */
475 EXTRACT_HELPER(LK, 0, 1);
476
477 /* DFP Z22-form */
478 EXTRACT_HELPER(DCM, 10, 6)
479
480 /* DFP Z23-form */
481 EXTRACT_HELPER(RMC, 9, 2)
482
483 /* Create a mask between <start> and <end> bits */
484 static inline target_ulong MASK(uint32_t start, uint32_t end)
485 {
486 target_ulong ret;
487
488 #if defined(TARGET_PPC64)
489 if (likely(start == 0)) {
490 ret = UINT64_MAX << (63 - end);
491 } else if (likely(end == 63)) {
492 ret = UINT64_MAX >> start;
493 }
494 #else
495 if (likely(start == 0)) {
496 ret = UINT32_MAX << (31 - end);
497 } else if (likely(end == 31)) {
498 ret = UINT32_MAX >> start;
499 }
500 #endif
501 else {
502 ret = (((target_ulong)(-1ULL)) >> (start)) ^
503 (((target_ulong)(-1ULL) >> (end)) >> 1);
504 if (unlikely(start > end))
505 return ~ret;
506 }
507
508 return ret;
509 }
510
511 EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5);
512 EXTRACT_HELPER_SPLIT(xS, 0, 1, 21, 5);
513 EXTRACT_HELPER_SPLIT(xA, 2, 1, 16, 5);
514 EXTRACT_HELPER_SPLIT(xB, 1, 1, 11, 5);
515 EXTRACT_HELPER_SPLIT(xC, 3, 1, 6, 5);
516 EXTRACT_HELPER(DM, 8, 2);
517 EXTRACT_HELPER(UIM, 16, 2);
518 EXTRACT_HELPER(SHW, 8, 2);
519 EXTRACT_HELPER(SP, 19, 2);
520 /*****************************************************************************/
521 /* PowerPC instructions table */
522
523 #if defined(DO_PPC_STATISTICS)
524 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
525 { \
526 .opc1 = op1, \
527 .opc2 = op2, \
528 .opc3 = op3, \
529 .pad = { 0, }, \
530 .handler = { \
531 .inval1 = invl, \
532 .type = _typ, \
533 .type2 = _typ2, \
534 .handler = &gen_##name, \
535 .oname = stringify(name), \
536 }, \
537 .oname = stringify(name), \
538 }
539 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
540 { \
541 .opc1 = op1, \
542 .opc2 = op2, \
543 .opc3 = op3, \
544 .pad = { 0, }, \
545 .handler = { \
546 .inval1 = invl1, \
547 .inval2 = invl2, \
548 .type = _typ, \
549 .type2 = _typ2, \
550 .handler = &gen_##name, \
551 .oname = stringify(name), \
552 }, \
553 .oname = stringify(name), \
554 }
555 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
556 { \
557 .opc1 = op1, \
558 .opc2 = op2, \
559 .opc3 = op3, \
560 .pad = { 0, }, \
561 .handler = { \
562 .inval1 = invl, \
563 .type = _typ, \
564 .type2 = _typ2, \
565 .handler = &gen_##name, \
566 .oname = onam, \
567 }, \
568 .oname = onam, \
569 }
570 #else
571 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
572 { \
573 .opc1 = op1, \
574 .opc2 = op2, \
575 .opc3 = op3, \
576 .pad = { 0, }, \
577 .handler = { \
578 .inval1 = invl, \
579 .type = _typ, \
580 .type2 = _typ2, \
581 .handler = &gen_##name, \
582 }, \
583 .oname = stringify(name), \
584 }
585 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
586 { \
587 .opc1 = op1, \
588 .opc2 = op2, \
589 .opc3 = op3, \
590 .pad = { 0, }, \
591 .handler = { \
592 .inval1 = invl1, \
593 .inval2 = invl2, \
594 .type = _typ, \
595 .type2 = _typ2, \
596 .handler = &gen_##name, \
597 }, \
598 .oname = stringify(name), \
599 }
600 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
601 { \
602 .opc1 = op1, \
603 .opc2 = op2, \
604 .opc3 = op3, \
605 .pad = { 0, }, \
606 .handler = { \
607 .inval1 = invl, \
608 .type = _typ, \
609 .type2 = _typ2, \
610 .handler = &gen_##name, \
611 }, \
612 .oname = onam, \
613 }
614 #endif
615
616 /* SPR load/store helpers */
617 static inline void gen_load_spr(TCGv t, int reg)
618 {
619 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
620 }
621
622 static inline void gen_store_spr(int reg, TCGv t)
623 {
624 tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
625 }
626
627 /* Invalid instruction */
628 static void gen_invalid(DisasContext *ctx)
629 {
630 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
631 }
632
633 static opc_handler_t invalid_handler = {
634 .inval1 = 0xFFFFFFFF,
635 .inval2 = 0xFFFFFFFF,
636 .type = PPC_NONE,
637 .type2 = PPC_NONE,
638 .handler = gen_invalid,
639 };
640
641 /*** Integer comparison ***/
642
643 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
644 {
645 TCGv t0 = tcg_temp_new();
646 TCGv_i32 t1 = tcg_temp_new_i32();
647
648 tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
649
650 tcg_gen_setcond_tl((s ? TCG_COND_LT: TCG_COND_LTU), t0, arg0, arg1);
651 tcg_gen_trunc_tl_i32(t1, t0);
652 tcg_gen_shli_i32(t1, t1, CRF_LT);
653 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
654
655 tcg_gen_setcond_tl((s ? TCG_COND_GT: TCG_COND_GTU), t0, arg0, arg1);
656 tcg_gen_trunc_tl_i32(t1, t0);
657 tcg_gen_shli_i32(t1, t1, CRF_GT);
658 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
659
660 tcg_gen_setcond_tl(TCG_COND_EQ, t0, arg0, arg1);
661 tcg_gen_trunc_tl_i32(t1, t0);
662 tcg_gen_shli_i32(t1, t1, CRF_EQ);
663 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
664
665 tcg_temp_free(t0);
666 tcg_temp_free_i32(t1);
667 }
668
669 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
670 {
671 TCGv t0 = tcg_const_tl(arg1);
672 gen_op_cmp(arg0, t0, s, crf);
673 tcg_temp_free(t0);
674 }
675
676 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
677 {
678 TCGv t0, t1;
679 t0 = tcg_temp_new();
680 t1 = tcg_temp_new();
681 if (s) {
682 tcg_gen_ext32s_tl(t0, arg0);
683 tcg_gen_ext32s_tl(t1, arg1);
684 } else {
685 tcg_gen_ext32u_tl(t0, arg0);
686 tcg_gen_ext32u_tl(t1, arg1);
687 }
688 gen_op_cmp(t0, t1, s, crf);
689 tcg_temp_free(t1);
690 tcg_temp_free(t0);
691 }
692
693 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
694 {
695 TCGv t0 = tcg_const_tl(arg1);
696 gen_op_cmp32(arg0, t0, s, crf);
697 tcg_temp_free(t0);
698 }
699
700 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
701 {
702 if (NARROW_MODE(ctx)) {
703 gen_op_cmpi32(reg, 0, 1, 0);
704 } else {
705 gen_op_cmpi(reg, 0, 1, 0);
706 }
707 }
708
709 /* cmp */
710 static void gen_cmp(DisasContext *ctx)
711 {
712 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
713 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
714 1, crfD(ctx->opcode));
715 } else {
716 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
717 1, crfD(ctx->opcode));
718 }
719 }
720
721 /* cmpi */
722 static void gen_cmpi(DisasContext *ctx)
723 {
724 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
725 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
726 1, crfD(ctx->opcode));
727 } else {
728 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
729 1, crfD(ctx->opcode));
730 }
731 }
732
733 /* cmpl */
734 static void gen_cmpl(DisasContext *ctx)
735 {
736 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
737 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
738 0, crfD(ctx->opcode));
739 } else {
740 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
741 0, crfD(ctx->opcode));
742 }
743 }
744
745 /* cmpli */
746 static void gen_cmpli(DisasContext *ctx)
747 {
748 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
749 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
750 0, crfD(ctx->opcode));
751 } else {
752 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
753 0, crfD(ctx->opcode));
754 }
755 }
756
757 /* isel (PowerPC 2.03 specification) */
758 static void gen_isel(DisasContext *ctx)
759 {
760 uint32_t bi = rC(ctx->opcode);
761 uint32_t mask = 0x08 >> (bi & 0x03);
762 TCGv t0 = tcg_temp_new();
763 TCGv zr;
764
765 tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
766 tcg_gen_andi_tl(t0, t0, mask);
767
768 zr = tcg_const_tl(0);
769 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
770 rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
771 cpu_gpr[rB(ctx->opcode)]);
772 tcg_temp_free(zr);
773 tcg_temp_free(t0);
774 }
775
776 /* cmpb: PowerPC 2.05 specification */
777 static void gen_cmpb(DisasContext *ctx)
778 {
779 gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
780 cpu_gpr[rB(ctx->opcode)]);
781 }
782
783 /*** Integer arithmetic ***/
784
785 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
786 TCGv arg1, TCGv arg2, int sub)
787 {
788 TCGv t0 = tcg_temp_new();
789
790 tcg_gen_xor_tl(cpu_ov, arg0, arg2);
791 tcg_gen_xor_tl(t0, arg1, arg2);
792 if (sub) {
793 tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
794 } else {
795 tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
796 }
797 tcg_temp_free(t0);
798 if (NARROW_MODE(ctx)) {
799 tcg_gen_ext32s_tl(cpu_ov, cpu_ov);
800 }
801 tcg_gen_shri_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1);
802 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
803 }
804
805 /* Common add function */
806 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
807 TCGv arg2, bool add_ca, bool compute_ca,
808 bool compute_ov, bool compute_rc0)
809 {
810 TCGv t0 = ret;
811
812 if (compute_ca || compute_ov) {
813 t0 = tcg_temp_new();
814 }
815
816 if (compute_ca) {
817 if (NARROW_MODE(ctx)) {
818 /* Caution: a non-obvious corner case of the spec is that we
819 must produce the *entire* 64-bit addition, but produce the
820 carry into bit 32. */
821 TCGv t1 = tcg_temp_new();
822 tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */
823 tcg_gen_add_tl(t0, arg1, arg2);
824 if (add_ca) {
825 tcg_gen_add_tl(t0, t0, cpu_ca);
826 }
827 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changed w/ carry */
828 tcg_temp_free(t1);
829 tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
830 tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
831 } else {
832 TCGv zero = tcg_const_tl(0);
833 if (add_ca) {
834 tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, cpu_ca, zero);
835 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, arg2, zero);
836 } else {
837 tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, arg2, zero);
838 }
839 tcg_temp_free(zero);
840 }
841 } else {
842 tcg_gen_add_tl(t0, arg1, arg2);
843 if (add_ca) {
844 tcg_gen_add_tl(t0, t0, cpu_ca);
845 }
846 }
847
848 if (compute_ov) {
849 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
850 }
851 if (unlikely(compute_rc0)) {
852 gen_set_Rc0(ctx, t0);
853 }
854
855 if (!TCGV_EQUAL(t0, ret)) {
856 tcg_gen_mov_tl(ret, t0);
857 tcg_temp_free(t0);
858 }
859 }
860 /* Add functions with two operands */
861 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
862 static void glue(gen_, name)(DisasContext *ctx) \
863 { \
864 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
865 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
866 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
867 }
868 /* Add functions with one operand and one immediate */
869 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
870 add_ca, compute_ca, compute_ov) \
871 static void glue(gen_, name)(DisasContext *ctx) \
872 { \
873 TCGv t0 = tcg_const_tl(const_val); \
874 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
875 cpu_gpr[rA(ctx->opcode)], t0, \
876 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
877 tcg_temp_free(t0); \
878 }
879
880 /* add add. addo addo. */
881 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
882 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
883 /* addc addc. addco addco. */
884 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
885 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
886 /* adde adde. addeo addeo. */
887 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
888 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
889 /* addme addme. addmeo addmeo. */
890 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
891 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
892 /* addze addze. addzeo addzeo.*/
893 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
894 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
895 /* addi */
896 static void gen_addi(DisasContext *ctx)
897 {
898 target_long simm = SIMM(ctx->opcode);
899
900 if (rA(ctx->opcode) == 0) {
901 /* li case */
902 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm);
903 } else {
904 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
905 cpu_gpr[rA(ctx->opcode)], simm);
906 }
907 }
908 /* addic addic.*/
909 static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
910 {
911 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
912 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
913 c, 0, 1, 0, compute_rc0);
914 tcg_temp_free(c);
915 }
916
917 static void gen_addic(DisasContext *ctx)
918 {
919 gen_op_addic(ctx, 0);
920 }
921
922 static void gen_addic_(DisasContext *ctx)
923 {
924 gen_op_addic(ctx, 1);
925 }
926
927 /* addis */
928 static void gen_addis(DisasContext *ctx)
929 {
930 target_long simm = SIMM(ctx->opcode);
931
932 if (rA(ctx->opcode) == 0) {
933 /* lis case */
934 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm << 16);
935 } else {
936 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
937 cpu_gpr[rA(ctx->opcode)], simm << 16);
938 }
939 }
940
941 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
942 TCGv arg2, int sign, int compute_ov)
943 {
944 TCGLabel *l1 = gen_new_label();
945 TCGLabel *l2 = gen_new_label();
946 TCGv_i32 t0 = tcg_temp_local_new_i32();
947 TCGv_i32 t1 = tcg_temp_local_new_i32();
948
949 tcg_gen_trunc_tl_i32(t0, arg1);
950 tcg_gen_trunc_tl_i32(t1, arg2);
951 tcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1);
952 if (sign) {
953 TCGLabel *l3 = gen_new_label();
954 tcg_gen_brcondi_i32(TCG_COND_NE, t1, -1, l3);
955 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, INT32_MIN, l1);
956 gen_set_label(l3);
957 tcg_gen_div_i32(t0, t0, t1);
958 } else {
959 tcg_gen_divu_i32(t0, t0, t1);
960 }
961 if (compute_ov) {
962 tcg_gen_movi_tl(cpu_ov, 0);
963 }
964 tcg_gen_br(l2);
965 gen_set_label(l1);
966 if (sign) {
967 tcg_gen_sari_i32(t0, t0, 31);
968 } else {
969 tcg_gen_movi_i32(t0, 0);
970 }
971 if (compute_ov) {
972 tcg_gen_movi_tl(cpu_ov, 1);
973 tcg_gen_movi_tl(cpu_so, 1);
974 }
975 gen_set_label(l2);
976 tcg_gen_extu_i32_tl(ret, t0);
977 tcg_temp_free_i32(t0);
978 tcg_temp_free_i32(t1);
979 if (unlikely(Rc(ctx->opcode) != 0))
980 gen_set_Rc0(ctx, ret);
981 }
982 /* Div functions */
983 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
984 static void glue(gen_, name)(DisasContext *ctx) \
985 { \
986 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
987 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
988 sign, compute_ov); \
989 }
990 /* divwu divwu. divwuo divwuo. */
991 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
992 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
993 /* divw divw. divwo divwo. */
994 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
995 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
996
997 /* div[wd]eu[o][.] */
998 #define GEN_DIVE(name, hlpr, compute_ov) \
999 static void gen_##name(DisasContext *ctx) \
1000 { \
1001 TCGv_i32 t0 = tcg_const_i32(compute_ov); \
1002 gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
1003 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1004 tcg_temp_free_i32(t0); \
1005 if (unlikely(Rc(ctx->opcode) != 0)) { \
1006 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
1007 } \
1008 }
1009
1010 GEN_DIVE(divweu, divweu, 0);
1011 GEN_DIVE(divweuo, divweu, 1);
1012 GEN_DIVE(divwe, divwe, 0);
1013 GEN_DIVE(divweo, divwe, 1);
1014
1015 #if defined(TARGET_PPC64)
1016 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
1017 TCGv arg2, int sign, int compute_ov)
1018 {
1019 TCGLabel *l1 = gen_new_label();
1020 TCGLabel *l2 = gen_new_label();
1021
1022 tcg_gen_brcondi_i64(TCG_COND_EQ, arg2, 0, l1);
1023 if (sign) {
1024 TCGLabel *l3 = gen_new_label();
1025 tcg_gen_brcondi_i64(TCG_COND_NE, arg2, -1, l3);
1026 tcg_gen_brcondi_i64(TCG_COND_EQ, arg1, INT64_MIN, l1);
1027 gen_set_label(l3);
1028 tcg_gen_div_i64(ret, arg1, arg2);
1029 } else {
1030 tcg_gen_divu_i64(ret, arg1, arg2);
1031 }
1032 if (compute_ov) {
1033 tcg_gen_movi_tl(cpu_ov, 0);
1034 }
1035 tcg_gen_br(l2);
1036 gen_set_label(l1);
1037 if (sign) {
1038 tcg_gen_sari_i64(ret, arg1, 63);
1039 } else {
1040 tcg_gen_movi_i64(ret, 0);
1041 }
1042 if (compute_ov) {
1043 tcg_gen_movi_tl(cpu_ov, 1);
1044 tcg_gen_movi_tl(cpu_so, 1);
1045 }
1046 gen_set_label(l2);
1047 if (unlikely(Rc(ctx->opcode) != 0))
1048 gen_set_Rc0(ctx, ret);
1049 }
1050 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1051 static void glue(gen_, name)(DisasContext *ctx) \
1052 { \
1053 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1054 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1055 sign, compute_ov); \
1056 }
1057 /* divwu divwu. divwuo divwuo. */
1058 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1059 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1060 /* divw divw. divwo divwo. */
1061 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1062 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1063
1064 GEN_DIVE(divdeu, divdeu, 0);
1065 GEN_DIVE(divdeuo, divdeu, 1);
1066 GEN_DIVE(divde, divde, 0);
1067 GEN_DIVE(divdeo, divde, 1);
1068 #endif
1069
1070 /* mulhw mulhw. */
1071 static void gen_mulhw(DisasContext *ctx)
1072 {
1073 TCGv_i32 t0 = tcg_temp_new_i32();
1074 TCGv_i32 t1 = tcg_temp_new_i32();
1075
1076 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1077 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1078 tcg_gen_muls2_i32(t0, t1, t0, t1);
1079 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1080 tcg_temp_free_i32(t0);
1081 tcg_temp_free_i32(t1);
1082 if (unlikely(Rc(ctx->opcode) != 0))
1083 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1084 }
1085
1086 /* mulhwu mulhwu. */
1087 static void gen_mulhwu(DisasContext *ctx)
1088 {
1089 TCGv_i32 t0 = tcg_temp_new_i32();
1090 TCGv_i32 t1 = tcg_temp_new_i32();
1091
1092 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1093 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1094 tcg_gen_mulu2_i32(t0, t1, t0, t1);
1095 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1096 tcg_temp_free_i32(t0);
1097 tcg_temp_free_i32(t1);
1098 if (unlikely(Rc(ctx->opcode) != 0))
1099 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1100 }
1101
1102 /* mullw mullw. */
1103 static void gen_mullw(DisasContext *ctx)
1104 {
1105 #if defined(TARGET_PPC64)
1106 TCGv_i64 t0, t1;
1107 t0 = tcg_temp_new_i64();
1108 t1 = tcg_temp_new_i64();
1109 tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
1110 tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
1111 tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1112 tcg_temp_free(t0);
1113 tcg_temp_free(t1);
1114 #else
1115 tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1116 cpu_gpr[rB(ctx->opcode)]);
1117 #endif
1118 if (unlikely(Rc(ctx->opcode) != 0))
1119 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1120 }
1121
1122 /* mullwo mullwo. */
1123 static void gen_mullwo(DisasContext *ctx)
1124 {
1125 TCGv_i32 t0 = tcg_temp_new_i32();
1126 TCGv_i32 t1 = tcg_temp_new_i32();
1127
1128 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1129 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1130 tcg_gen_muls2_i32(t0, t1, t0, t1);
1131 #if defined(TARGET_PPC64)
1132 tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1133 #else
1134 tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
1135 #endif
1136
1137 tcg_gen_sari_i32(t0, t0, 31);
1138 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
1139 tcg_gen_extu_i32_tl(cpu_ov, t0);
1140 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1141
1142 tcg_temp_free_i32(t0);
1143 tcg_temp_free_i32(t1);
1144 if (unlikely(Rc(ctx->opcode) != 0))
1145 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1146 }
1147
1148 /* mulli */
1149 static void gen_mulli(DisasContext *ctx)
1150 {
1151 tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1152 SIMM(ctx->opcode));
1153 }
1154
1155 #if defined(TARGET_PPC64)
1156 /* mulhd mulhd. */
1157 static void gen_mulhd(DisasContext *ctx)
1158 {
1159 TCGv lo = tcg_temp_new();
1160 tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1161 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1162 tcg_temp_free(lo);
1163 if (unlikely(Rc(ctx->opcode) != 0)) {
1164 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1165 }
1166 }
1167
1168 /* mulhdu mulhdu. */
1169 static void gen_mulhdu(DisasContext *ctx)
1170 {
1171 TCGv lo = tcg_temp_new();
1172 tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1173 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1174 tcg_temp_free(lo);
1175 if (unlikely(Rc(ctx->opcode) != 0)) {
1176 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1177 }
1178 }
1179
1180 /* mulld mulld. */
1181 static void gen_mulld(DisasContext *ctx)
1182 {
1183 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1184 cpu_gpr[rB(ctx->opcode)]);
1185 if (unlikely(Rc(ctx->opcode) != 0))
1186 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1187 }
1188
1189 /* mulldo mulldo. */
1190 static void gen_mulldo(DisasContext *ctx)
1191 {
1192 TCGv_i64 t0 = tcg_temp_new_i64();
1193 TCGv_i64 t1 = tcg_temp_new_i64();
1194
1195 tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
1196 cpu_gpr[rB(ctx->opcode)]);
1197 tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
1198
1199 tcg_gen_sari_i64(t0, t0, 63);
1200 tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
1201 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1202
1203 tcg_temp_free_i64(t0);
1204 tcg_temp_free_i64(t1);
1205
1206 if (unlikely(Rc(ctx->opcode) != 0)) {
1207 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1208 }
1209 }
1210 #endif
1211
1212 /* Common subf function */
1213 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
1214 TCGv arg2, bool add_ca, bool compute_ca,
1215 bool compute_ov, bool compute_rc0)
1216 {
1217 TCGv t0 = ret;
1218
1219 if (compute_ca || compute_ov) {
1220 t0 = tcg_temp_new();
1221 }
1222
1223 if (compute_ca) {
1224 /* dest = ~arg1 + arg2 [+ ca]. */
1225 if (NARROW_MODE(ctx)) {
1226 /* Caution: a non-obvious corner case of the spec is that we
1227 must produce the *entire* 64-bit addition, but produce the
1228 carry into bit 32. */
1229 TCGv inv1 = tcg_temp_new();
1230 TCGv t1 = tcg_temp_new();
1231 tcg_gen_not_tl(inv1, arg1);
1232 if (add_ca) {
1233 tcg_gen_add_tl(t0, arg2, cpu_ca);
1234 } else {
1235 tcg_gen_addi_tl(t0, arg2, 1);
1236 }
1237 tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */
1238 tcg_gen_add_tl(t0, t0, inv1);
1239 tcg_temp_free(inv1);
1240 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */
1241 tcg_temp_free(t1);
1242 tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
1243 tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
1244 } else if (add_ca) {
1245 TCGv zero, inv1 = tcg_temp_new();
1246 tcg_gen_not_tl(inv1, arg1);
1247 zero = tcg_const_tl(0);
1248 tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
1249 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
1250 tcg_temp_free(zero);
1251 tcg_temp_free(inv1);
1252 } else {
1253 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
1254 tcg_gen_sub_tl(t0, arg2, arg1);
1255 }
1256 } else if (add_ca) {
1257 /* Since we're ignoring carry-out, we can simplify the
1258 standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */
1259 tcg_gen_sub_tl(t0, arg2, arg1);
1260 tcg_gen_add_tl(t0, t0, cpu_ca);
1261 tcg_gen_subi_tl(t0, t0, 1);
1262 } else {
1263 tcg_gen_sub_tl(t0, arg2, arg1);
1264 }
1265
1266 if (compute_ov) {
1267 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
1268 }
1269 if (unlikely(compute_rc0)) {
1270 gen_set_Rc0(ctx, t0);
1271 }
1272
1273 if (!TCGV_EQUAL(t0, ret)) {
1274 tcg_gen_mov_tl(ret, t0);
1275 tcg_temp_free(t0);
1276 }
1277 }
1278 /* Sub functions with Two operands functions */
1279 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
1280 static void glue(gen_, name)(DisasContext *ctx) \
1281 { \
1282 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1283 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1284 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1285 }
1286 /* Sub functions with one operand and one immediate */
1287 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
1288 add_ca, compute_ca, compute_ov) \
1289 static void glue(gen_, name)(DisasContext *ctx) \
1290 { \
1291 TCGv t0 = tcg_const_tl(const_val); \
1292 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1293 cpu_gpr[rA(ctx->opcode)], t0, \
1294 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1295 tcg_temp_free(t0); \
1296 }
1297 /* subf subf. subfo subfo. */
1298 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
1299 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
1300 /* subfc subfc. subfco subfco. */
1301 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
1302 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
1303 /* subfe subfe. subfeo subfo. */
1304 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
1305 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
1306 /* subfme subfme. subfmeo subfmeo. */
1307 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
1308 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
1309 /* subfze subfze. subfzeo subfzeo.*/
1310 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
1311 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
1312
1313 /* subfic */
1314 static void gen_subfic(DisasContext *ctx)
1315 {
1316 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
1317 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1318 c, 0, 1, 0, 0);
1319 tcg_temp_free(c);
1320 }
1321
1322 /* neg neg. nego nego. */
1323 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
1324 {
1325 TCGv zero = tcg_const_tl(0);
1326 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1327 zero, 0, 0, compute_ov, Rc(ctx->opcode));
1328 tcg_temp_free(zero);
1329 }
1330
1331 static void gen_neg(DisasContext *ctx)
1332 {
1333 gen_op_arith_neg(ctx, 0);
1334 }
1335
1336 static void gen_nego(DisasContext *ctx)
1337 {
1338 gen_op_arith_neg(ctx, 1);
1339 }
1340
1341 /*** Integer logical ***/
1342 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
1343 static void glue(gen_, name)(DisasContext *ctx) \
1344 { \
1345 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
1346 cpu_gpr[rB(ctx->opcode)]); \
1347 if (unlikely(Rc(ctx->opcode) != 0)) \
1348 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1349 }
1350
1351 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
1352 static void glue(gen_, name)(DisasContext *ctx) \
1353 { \
1354 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
1355 if (unlikely(Rc(ctx->opcode) != 0)) \
1356 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1357 }
1358
1359 /* and & and. */
1360 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
1361 /* andc & andc. */
1362 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
1363
1364 /* andi. */
1365 static void gen_andi_(DisasContext *ctx)
1366 {
1367 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode));
1368 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1369 }
1370
1371 /* andis. */
1372 static void gen_andis_(DisasContext *ctx)
1373 {
1374 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16);
1375 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1376 }
1377
1378 /* cntlzw */
1379 static void gen_cntlzw(DisasContext *ctx)
1380 {
1381 gen_helper_cntlzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1382 if (unlikely(Rc(ctx->opcode) != 0))
1383 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1384 }
1385 /* eqv & eqv. */
1386 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
1387 /* extsb & extsb. */
1388 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
1389 /* extsh & extsh. */
1390 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
1391 /* nand & nand. */
1392 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
1393 /* nor & nor. */
1394 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
1395
1396 #if defined(TARGET_PPC64)
1397 static void gen_pause(DisasContext *ctx)
1398 {
1399 TCGv_i32 t0 = tcg_const_i32(0);
1400 tcg_gen_st_i32(t0, cpu_env,
1401 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
1402 tcg_temp_free_i32(t0);
1403
1404 /* Stop translation, this gives other CPUs a chance to run */
1405 gen_exception_err(ctx, EXCP_HLT, 1);
1406 }
1407 #endif /* defined(TARGET_PPC64) */
1408
1409 /* or & or. */
1410 static void gen_or(DisasContext *ctx)
1411 {
1412 int rs, ra, rb;
1413
1414 rs = rS(ctx->opcode);
1415 ra = rA(ctx->opcode);
1416 rb = rB(ctx->opcode);
1417 /* Optimisation for mr. ri case */
1418 if (rs != ra || rs != rb) {
1419 if (rs != rb)
1420 tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
1421 else
1422 tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
1423 if (unlikely(Rc(ctx->opcode) != 0))
1424 gen_set_Rc0(ctx, cpu_gpr[ra]);
1425 } else if (unlikely(Rc(ctx->opcode) != 0)) {
1426 gen_set_Rc0(ctx, cpu_gpr[rs]);
1427 #if defined(TARGET_PPC64)
1428 } else {
1429 int prio = 0;
1430
1431 switch (rs) {
1432 case 1:
1433 /* Set process priority to low */
1434 prio = 2;
1435 break;
1436 case 6:
1437 /* Set process priority to medium-low */
1438 prio = 3;
1439 break;
1440 case 2:
1441 /* Set process priority to normal */
1442 prio = 4;
1443 break;
1444 #if !defined(CONFIG_USER_ONLY)
1445 case 31:
1446 if (!ctx->pr) {
1447 /* Set process priority to very low */
1448 prio = 1;
1449 }
1450 break;
1451 case 5:
1452 if (!ctx->pr) {
1453 /* Set process priority to medium-hight */
1454 prio = 5;
1455 }
1456 break;
1457 case 3:
1458 if (!ctx->pr) {
1459 /* Set process priority to high */
1460 prio = 6;
1461 }
1462 break;
1463 case 7:
1464 if (ctx->hv && !ctx->pr) {
1465 /* Set process priority to very high */
1466 prio = 7;
1467 }
1468 break;
1469 #endif
1470 default:
1471 /* nop */
1472 break;
1473 }
1474 if (prio) {
1475 TCGv t0 = tcg_temp_new();
1476 gen_load_spr(t0, SPR_PPR);
1477 tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
1478 tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
1479 gen_store_spr(SPR_PPR, t0);
1480 tcg_temp_free(t0);
1481 /* Pause us out of TCG otherwise spin loops with smt_low
1482 * eat too much CPU and the kernel hangs
1483 */
1484 gen_pause(ctx);
1485 }
1486 #endif
1487 }
1488 }
1489 /* orc & orc. */
1490 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
1491
1492 /* xor & xor. */
1493 static void gen_xor(DisasContext *ctx)
1494 {
1495 /* Optimisation for "set to zero" case */
1496 if (rS(ctx->opcode) != rB(ctx->opcode))
1497 tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1498 else
1499 tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
1500 if (unlikely(Rc(ctx->opcode) != 0))
1501 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1502 }
1503
1504 /* ori */
1505 static void gen_ori(DisasContext *ctx)
1506 {
1507 target_ulong uimm = UIMM(ctx->opcode);
1508
1509 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1510 return;
1511 }
1512 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1513 }
1514
1515 /* oris */
1516 static void gen_oris(DisasContext *ctx)
1517 {
1518 target_ulong uimm = UIMM(ctx->opcode);
1519
1520 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1521 /* NOP */
1522 return;
1523 }
1524 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1525 }
1526
1527 /* xori */
1528 static void gen_xori(DisasContext *ctx)
1529 {
1530 target_ulong uimm = UIMM(ctx->opcode);
1531
1532 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1533 /* NOP */
1534 return;
1535 }
1536 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1537 }
1538
1539 /* xoris */
1540 static void gen_xoris(DisasContext *ctx)
1541 {
1542 target_ulong uimm = UIMM(ctx->opcode);
1543
1544 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1545 /* NOP */
1546 return;
1547 }
1548 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1549 }
1550
1551 /* popcntb : PowerPC 2.03 specification */
1552 static void gen_popcntb(DisasContext *ctx)
1553 {
1554 gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1555 }
1556
1557 static void gen_popcntw(DisasContext *ctx)
1558 {
1559 gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1560 }
1561
1562 #if defined(TARGET_PPC64)
1563 /* popcntd: PowerPC 2.06 specification */
1564 static void gen_popcntd(DisasContext *ctx)
1565 {
1566 gen_helper_popcntd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1567 }
1568 #endif
1569
1570 /* prtyw: PowerPC 2.05 specification */
1571 static void gen_prtyw(DisasContext *ctx)
1572 {
1573 TCGv ra = cpu_gpr[rA(ctx->opcode)];
1574 TCGv rs = cpu_gpr[rS(ctx->opcode)];
1575 TCGv t0 = tcg_temp_new();
1576 tcg_gen_shri_tl(t0, rs, 16);
1577 tcg_gen_xor_tl(ra, rs, t0);
1578 tcg_gen_shri_tl(t0, ra, 8);
1579 tcg_gen_xor_tl(ra, ra, t0);
1580 tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
1581 tcg_temp_free(t0);
1582 }
1583
1584 #if defined(TARGET_PPC64)
1585 /* prtyd: PowerPC 2.05 specification */
1586 static void gen_prtyd(DisasContext *ctx)
1587 {
1588 TCGv ra = cpu_gpr[rA(ctx->opcode)];
1589 TCGv rs = cpu_gpr[rS(ctx->opcode)];
1590 TCGv t0 = tcg_temp_new();
1591 tcg_gen_shri_tl(t0, rs, 32);
1592 tcg_gen_xor_tl(ra, rs, t0);
1593 tcg_gen_shri_tl(t0, ra, 16);
1594 tcg_gen_xor_tl(ra, ra, t0);
1595 tcg_gen_shri_tl(t0, ra, 8);
1596 tcg_gen_xor_tl(ra, ra, t0);
1597 tcg_gen_andi_tl(ra, ra, 1);
1598 tcg_temp_free(t0);
1599 }
1600 #endif
1601
1602 #if defined(TARGET_PPC64)
1603 /* bpermd */
1604 static void gen_bpermd(DisasContext *ctx)
1605 {
1606 gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
1607 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1608 }
1609 #endif
1610
1611 #if defined(TARGET_PPC64)
1612 /* extsw & extsw. */
1613 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
1614
1615 /* cntlzd */
1616 static void gen_cntlzd(DisasContext *ctx)
1617 {
1618 gen_helper_cntlzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1619 if (unlikely(Rc(ctx->opcode) != 0))
1620 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1621 }
1622 #endif
1623
1624 /*** Integer rotate ***/
1625
1626 /* rlwimi & rlwimi. */
1627 static void gen_rlwimi(DisasContext *ctx)
1628 {
1629 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1630 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1631 uint32_t sh = SH(ctx->opcode);
1632 uint32_t mb = MB(ctx->opcode);
1633 uint32_t me = ME(ctx->opcode);
1634
1635 if (sh == (31-me) && mb <= me) {
1636 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
1637 } else {
1638 target_ulong mask;
1639 TCGv_i32 t0;
1640 TCGv t1;
1641
1642 #if defined(TARGET_PPC64)
1643 mb += 32;
1644 me += 32;
1645 #endif
1646 mask = MASK(mb, me);
1647
1648 t0 = tcg_temp_new_i32();
1649 t1 = tcg_temp_new();
1650 tcg_gen_trunc_tl_i32(t0, t_rs);
1651 tcg_gen_rotli_i32(t0, t0, sh);
1652 tcg_gen_extu_i32_tl(t1, t0);
1653 tcg_temp_free_i32(t0);
1654
1655 tcg_gen_andi_tl(t1, t1, mask);
1656 tcg_gen_andi_tl(t_ra, t_ra, ~mask);
1657 tcg_gen_or_tl(t_ra, t_ra, t1);
1658 tcg_temp_free(t1);
1659 }
1660 if (unlikely(Rc(ctx->opcode) != 0)) {
1661 gen_set_Rc0(ctx, t_ra);
1662 }
1663 }
1664
1665 /* rlwinm & rlwinm. */
1666 static void gen_rlwinm(DisasContext *ctx)
1667 {
1668 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1669 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1670 uint32_t sh = SH(ctx->opcode);
1671 uint32_t mb = MB(ctx->opcode);
1672 uint32_t me = ME(ctx->opcode);
1673
1674 if (mb == 0 && me == (31 - sh)) {
1675 tcg_gen_shli_tl(t_ra, t_rs, sh);
1676 tcg_gen_ext32u_tl(t_ra, t_ra);
1677 } else if (sh != 0 && me == 31 && sh == (32 - mb)) {
1678 tcg_gen_ext32u_tl(t_ra, t_rs);
1679 tcg_gen_shri_tl(t_ra, t_ra, mb);
1680 } else {
1681 #if defined(TARGET_PPC64)
1682 mb += 32;
1683 me += 32;
1684 #endif
1685 if (sh == 0) {
1686 tcg_gen_andi_tl(t_ra, t_rs, MASK(mb, me));
1687 } else {
1688 TCGv_i32 t0 = tcg_temp_new_i32();
1689
1690 tcg_gen_trunc_tl_i32(t0, t_rs);
1691 tcg_gen_rotli_i32(t0, t0, sh);
1692 tcg_gen_andi_i32(t0, t0, MASK(mb, me));
1693 tcg_gen_extu_i32_tl(t_ra, t0);
1694 tcg_temp_free_i32(t0);
1695 }
1696 }
1697 if (unlikely(Rc(ctx->opcode) != 0)) {
1698 gen_set_Rc0(ctx, t_ra);
1699 }
1700 }
1701
1702 /* rlwnm & rlwnm. */
1703 static void gen_rlwnm(DisasContext *ctx)
1704 {
1705 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1706 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1707 TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
1708 uint32_t mb = MB(ctx->opcode);
1709 uint32_t me = ME(ctx->opcode);
1710 TCGv_i32 t0, t1;
1711
1712 #if defined(TARGET_PPC64)
1713 mb += 32;
1714 me += 32;
1715 #endif
1716
1717 t0 = tcg_temp_new_i32();
1718 t1 = tcg_temp_new_i32();
1719 tcg_gen_trunc_tl_i32(t0, t_rb);
1720 tcg_gen_trunc_tl_i32(t1, t_rs);
1721 tcg_gen_andi_i32(t0, t0, 0x1f);
1722 tcg_gen_rotl_i32(t1, t1, t0);
1723 tcg_temp_free_i32(t0);
1724
1725 tcg_gen_andi_i32(t1, t1, MASK(mb, me));
1726 tcg_gen_extu_i32_tl(t_ra, t1);
1727 tcg_temp_free_i32(t1);
1728
1729 if (unlikely(Rc(ctx->opcode) != 0)) {
1730 gen_set_Rc0(ctx, t_ra);
1731 }
1732 }
1733
1734 #if defined(TARGET_PPC64)
1735 #define GEN_PPC64_R2(name, opc1, opc2) \
1736 static void glue(gen_, name##0)(DisasContext *ctx) \
1737 { \
1738 gen_##name(ctx, 0); \
1739 } \
1740 \
1741 static void glue(gen_, name##1)(DisasContext *ctx) \
1742 { \
1743 gen_##name(ctx, 1); \
1744 }
1745 #define GEN_PPC64_R4(name, opc1, opc2) \
1746 static void glue(gen_, name##0)(DisasContext *ctx) \
1747 { \
1748 gen_##name(ctx, 0, 0); \
1749 } \
1750 \
1751 static void glue(gen_, name##1)(DisasContext *ctx) \
1752 { \
1753 gen_##name(ctx, 0, 1); \
1754 } \
1755 \
1756 static void glue(gen_, name##2)(DisasContext *ctx) \
1757 { \
1758 gen_##name(ctx, 1, 0); \
1759 } \
1760 \
1761 static void glue(gen_, name##3)(DisasContext *ctx) \
1762 { \
1763 gen_##name(ctx, 1, 1); \
1764 }
1765
1766 static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
1767 {
1768 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1769 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1770
1771 if (sh != 0 && mb == 0 && me == (63 - sh)) {
1772 tcg_gen_shli_tl(t_ra, t_rs, sh);
1773 } else if (sh != 0 && me == 63 && sh == (64 - mb)) {
1774 tcg_gen_shri_tl(t_ra, t_rs, mb);
1775 } else {
1776 tcg_gen_rotli_tl(t_ra, t_rs, sh);
1777 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
1778 }
1779 if (unlikely(Rc(ctx->opcode) != 0)) {
1780 gen_set_Rc0(ctx, t_ra);
1781 }
1782 }
1783
1784 /* rldicl - rldicl. */
1785 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
1786 {
1787 uint32_t sh, mb;
1788
1789 sh = SH(ctx->opcode) | (shn << 5);
1790 mb = MB(ctx->opcode) | (mbn << 5);
1791 gen_rldinm(ctx, mb, 63, sh);
1792 }
1793 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
1794
1795 /* rldicr - rldicr. */
1796 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
1797 {
1798 uint32_t sh, me;
1799
1800 sh = SH(ctx->opcode) | (shn << 5);
1801 me = MB(ctx->opcode) | (men << 5);
1802 gen_rldinm(ctx, 0, me, sh);
1803 }
1804 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
1805
1806 /* rldic - rldic. */
1807 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
1808 {
1809 uint32_t sh, mb;
1810
1811 sh = SH(ctx->opcode) | (shn << 5);
1812 mb = MB(ctx->opcode) | (mbn << 5);
1813 gen_rldinm(ctx, mb, 63 - sh, sh);
1814 }
1815 GEN_PPC64_R4(rldic, 0x1E, 0x04);
1816
1817 static void gen_rldnm(DisasContext *ctx, int mb, int me)
1818 {
1819 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1820 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1821 TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
1822 TCGv t0;
1823
1824 t0 = tcg_temp_new();
1825 tcg_gen_andi_tl(t0, t_rb, 0x3f);
1826 tcg_gen_rotl_tl(t_ra, t_rs, t0);
1827 tcg_temp_free(t0);
1828
1829 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
1830 if (unlikely(Rc(ctx->opcode) != 0)) {
1831 gen_set_Rc0(ctx, t_ra);
1832 }
1833 }
1834
1835 /* rldcl - rldcl. */
1836 static inline void gen_rldcl(DisasContext *ctx, int mbn)
1837 {
1838 uint32_t mb;
1839
1840 mb = MB(ctx->opcode) | (mbn << 5);
1841 gen_rldnm(ctx, mb, 63);
1842 }
1843 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
1844
1845 /* rldcr - rldcr. */
1846 static inline void gen_rldcr(DisasContext *ctx, int men)
1847 {
1848 uint32_t me;
1849
1850 me = MB(ctx->opcode) | (men << 5);
1851 gen_rldnm(ctx, 0, me);
1852 }
1853 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
1854
1855 /* rldimi - rldimi. */
1856 static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
1857 {
1858 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1859 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1860 uint32_t sh = SH(ctx->opcode) | (shn << 5);
1861 uint32_t mb = MB(ctx->opcode) | (mbn << 5);
1862 uint32_t me = 63 - sh;
1863
1864 if (mb <= me) {
1865 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
1866 } else {
1867 target_ulong mask = MASK(mb, me);
1868 TCGv t1 = tcg_temp_new();
1869
1870 tcg_gen_rotli_tl(t1, t_rs, sh);
1871 tcg_gen_andi_tl(t1, t1, mask);
1872 tcg_gen_andi_tl(t_ra, t_ra, ~mask);
1873 tcg_gen_or_tl(t_ra, t_ra, t1);
1874 tcg_temp_free(t1);
1875 }
1876 if (unlikely(Rc(ctx->opcode) != 0)) {
1877 gen_set_Rc0(ctx, t_ra);
1878 }
1879 }
1880 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
1881 #endif
1882
1883 /*** Integer shift ***/
1884
1885 /* slw & slw. */
1886 static void gen_slw(DisasContext *ctx)
1887 {
1888 TCGv t0, t1;
1889
1890 t0 = tcg_temp_new();
1891 /* AND rS with a mask that is 0 when rB >= 0x20 */
1892 #if defined(TARGET_PPC64)
1893 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
1894 tcg_gen_sari_tl(t0, t0, 0x3f);
1895 #else
1896 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
1897 tcg_gen_sari_tl(t0, t0, 0x1f);
1898 #endif
1899 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1900 t1 = tcg_temp_new();
1901 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
1902 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1903 tcg_temp_free(t1);
1904 tcg_temp_free(t0);
1905 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
1906 if (unlikely(Rc(ctx->opcode) != 0))
1907 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1908 }
1909
1910 /* sraw & sraw. */
1911 static void gen_sraw(DisasContext *ctx)
1912 {
1913 gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
1914 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1915 if (unlikely(Rc(ctx->opcode) != 0))
1916 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1917 }
1918
1919 /* srawi & srawi. */
1920 static void gen_srawi(DisasContext *ctx)
1921 {
1922 int sh = SH(ctx->opcode);
1923 TCGv dst = cpu_gpr[rA(ctx->opcode)];
1924 TCGv src = cpu_gpr[rS(ctx->opcode)];
1925 if (sh == 0) {
1926 tcg_gen_ext32s_tl(dst, src);
1927 tcg_gen_movi_tl(cpu_ca, 0);
1928 } else {
1929 TCGv t0;
1930 tcg_gen_ext32s_tl(dst, src);
1931 tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
1932 t0 = tcg_temp_new();
1933 tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
1934 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
1935 tcg_temp_free(t0);
1936 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
1937 tcg_gen_sari_tl(dst, dst, sh);
1938 }
1939 if (unlikely(Rc(ctx->opcode) != 0)) {
1940 gen_set_Rc0(ctx, dst);
1941 }
1942 }
1943
1944 /* srw & srw. */
1945 static void gen_srw(DisasContext *ctx)
1946 {
1947 TCGv t0, t1;
1948
1949 t0 = tcg_temp_new();
1950 /* AND rS with a mask that is 0 when rB >= 0x20 */
1951 #if defined(TARGET_PPC64)
1952 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
1953 tcg_gen_sari_tl(t0, t0, 0x3f);
1954 #else
1955 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
1956 tcg_gen_sari_tl(t0, t0, 0x1f);
1957 #endif
1958 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1959 tcg_gen_ext32u_tl(t0, t0);
1960 t1 = tcg_temp_new();
1961 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
1962 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1963 tcg_temp_free(t1);
1964 tcg_temp_free(t0);
1965 if (unlikely(Rc(ctx->opcode) != 0))
1966 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1967 }
1968
1969 #if defined(TARGET_PPC64)
1970 /* sld & sld. */
1971 static void gen_sld(DisasContext *ctx)
1972 {
1973 TCGv t0, t1;
1974
1975 t0 = tcg_temp_new();
1976 /* AND rS with a mask that is 0 when rB >= 0x40 */
1977 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
1978 tcg_gen_sari_tl(t0, t0, 0x3f);
1979 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1980 t1 = tcg_temp_new();
1981 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
1982 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1983 tcg_temp_free(t1);
1984 tcg_temp_free(t0);
1985 if (unlikely(Rc(ctx->opcode) != 0))
1986 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1987 }
1988
1989 /* srad & srad. */
1990 static void gen_srad(DisasContext *ctx)
1991 {
1992 gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
1993 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1994 if (unlikely(Rc(ctx->opcode) != 0))
1995 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1996 }
1997 /* sradi & sradi. */
1998 static inline void gen_sradi(DisasContext *ctx, int n)
1999 {
2000 int sh = SH(ctx->opcode) + (n << 5);
2001 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2002 TCGv src = cpu_gpr[rS(ctx->opcode)];
2003 if (sh == 0) {
2004 tcg_gen_mov_tl(dst, src);
2005 tcg_gen_movi_tl(cpu_ca, 0);
2006 } else {
2007 TCGv t0;
2008 tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
2009 t0 = tcg_temp_new();
2010 tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
2011 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2012 tcg_temp_free(t0);
2013 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2014 tcg_gen_sari_tl(dst, src, sh);
2015 }
2016 if (unlikely(Rc(ctx->opcode) != 0)) {
2017 gen_set_Rc0(ctx, dst);
2018 }
2019 }
2020
2021 static void gen_sradi0(DisasContext *ctx)
2022 {
2023 gen_sradi(ctx, 0);
2024 }
2025
2026 static void gen_sradi1(DisasContext *ctx)
2027 {
2028 gen_sradi(ctx, 1);
2029 }
2030
2031 /* srd & srd. */
2032 static void gen_srd(DisasContext *ctx)
2033 {
2034 TCGv t0, t1;
2035
2036 t0 = tcg_temp_new();
2037 /* AND rS with a mask that is 0 when rB >= 0x40 */
2038 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2039 tcg_gen_sari_tl(t0, t0, 0x3f);
2040 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2041 t1 = tcg_temp_new();
2042 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2043 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2044 tcg_temp_free(t1);
2045 tcg_temp_free(t0);
2046 if (unlikely(Rc(ctx->opcode) != 0))
2047 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2048 }
2049 #endif
2050
2051 #if defined(TARGET_PPC64)
2052 static void gen_set_cr1_from_fpscr(DisasContext *ctx)
2053 {
2054 TCGv_i32 tmp = tcg_temp_new_i32();
2055 tcg_gen_trunc_tl_i32(tmp, cpu_fpscr);
2056 tcg_gen_shri_i32(cpu_crf[1], tmp, 28);
2057 tcg_temp_free_i32(tmp);
2058 }
2059 #else
2060 static void gen_set_cr1_from_fpscr(DisasContext *ctx)
2061 {
2062 tcg_gen_shri_tl(cpu_crf[1], cpu_fpscr, 28);
2063 }
2064 #endif
2065
2066 /*** Floating-Point arithmetic ***/
2067 #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
2068 static void gen_f##name(DisasContext *ctx) \
2069 { \
2070 if (unlikely(!ctx->fpu_enabled)) { \
2071 gen_exception(ctx, POWERPC_EXCP_FPU); \
2072 return; \
2073 } \
2074 /* NIP cannot be restored if the memory exception comes from an helper */ \
2075 gen_update_nip(ctx, ctx->nip - 4); \
2076 gen_reset_fpstatus(); \
2077 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2078 cpu_fpr[rA(ctx->opcode)], \
2079 cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
2080 if (isfloat) { \
2081 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2082 cpu_fpr[rD(ctx->opcode)]); \
2083 } \
2084 if (set_fprf) { \
2085 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2086 } \
2087 if (unlikely(Rc(ctx->opcode) != 0)) { \
2088 gen_set_cr1_from_fpscr(ctx); \
2089 } \
2090 }
2091
2092 #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
2093 _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
2094 _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
2095
2096 #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2097 static void gen_f##name(DisasContext *ctx) \
2098 { \
2099 if (unlikely(!ctx->fpu_enabled)) { \
2100 gen_exception(ctx, POWERPC_EXCP_FPU); \
2101 return; \
2102 } \
2103 /* NIP cannot be restored if the memory exception comes from an helper */ \
2104 gen_update_nip(ctx, ctx->nip - 4); \
2105 gen_reset_fpstatus(); \
2106 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2107 cpu_fpr[rA(ctx->opcode)], \
2108 cpu_fpr[rB(ctx->opcode)]); \
2109 if (isfloat) { \
2110 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2111 cpu_fpr[rD(ctx->opcode)]); \
2112 } \
2113 if (set_fprf) { \
2114 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2115 } \
2116 if (unlikely(Rc(ctx->opcode) != 0)) { \
2117 gen_set_cr1_from_fpscr(ctx); \
2118 } \
2119 }
2120 #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
2121 _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2122 _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2123
2124 #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2125 static void gen_f##name(DisasContext *ctx) \
2126 { \
2127 if (unlikely(!ctx->fpu_enabled)) { \
2128 gen_exception(ctx, POWERPC_EXCP_FPU); \
2129 return; \
2130 } \
2131 /* NIP cannot be restored if the memory exception comes from an helper */ \
2132 gen_update_nip(ctx, ctx->nip - 4); \
2133 gen_reset_fpstatus(); \
2134 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2135 cpu_fpr[rA(ctx->opcode)], \
2136 cpu_fpr[rC(ctx->opcode)]); \
2137 if (isfloat) { \
2138 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2139 cpu_fpr[rD(ctx->opcode)]); \
2140 } \
2141 if (set_fprf) { \
2142 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2143 } \
2144 if (unlikely(Rc(ctx->opcode) != 0)) { \
2145 gen_set_cr1_from_fpscr(ctx); \
2146 } \
2147 }
2148 #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
2149 _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2150 _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2151
2152 #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
2153 static void gen_f##name(DisasContext *ctx) \
2154 { \
2155 if (unlikely(!ctx->fpu_enabled)) { \
2156 gen_exception(ctx, POWERPC_EXCP_FPU); \
2157 return; \
2158 } \
2159 /* NIP cannot be restored if the memory exception comes from an helper */ \
2160 gen_update_nip(ctx, ctx->nip - 4); \
2161 gen_reset_fpstatus(); \
2162 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2163 cpu_fpr[rB(ctx->opcode)]); \
2164 if (set_fprf) { \
2165 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2166 } \
2167 if (unlikely(Rc(ctx->opcode) != 0)) { \
2168 gen_set_cr1_from_fpscr(ctx); \
2169 } \
2170 }
2171
2172 #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
2173 static void gen_f##name(DisasContext *ctx) \
2174 { \
2175 if (unlikely(!ctx->fpu_enabled)) { \
2176 gen_exception(ctx, POWERPC_EXCP_FPU); \
2177 return; \
2178 } \
2179 /* NIP cannot be restored if the memory exception comes from an helper */ \
2180 gen_update_nip(ctx, ctx->nip - 4); \
2181 gen_reset_fpstatus(); \
2182 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2183 cpu_fpr[rB(ctx->opcode)]); \
2184 if (set_fprf) { \
2185 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
2186 } \
2187 if (unlikely(Rc(ctx->opcode) != 0)) { \
2188 gen_set_cr1_from_fpscr(ctx); \
2189 } \
2190 }
2191
2192 /* fadd - fadds */
2193 GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT);
2194 /* fdiv - fdivs */
2195 GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT);
2196 /* fmul - fmuls */
2197 GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT);
2198
2199 /* fre */
2200 GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT);
2201
2202 /* fres */
2203 GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES);
2204
2205 /* frsqrte */
2206 GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE);
2207
2208 /* frsqrtes */
2209 static void gen_frsqrtes(DisasContext *ctx)
2210 {
2211 if (unlikely(!ctx->fpu_enabled)) {
2212 gen_exception(ctx, POWERPC_EXCP_FPU);
2213 return;
2214 }
2215 /* NIP cannot be restored if the memory exception comes from an helper */
2216 gen_update_nip(ctx, ctx->nip - 4);
2217 gen_reset_fpstatus();
2218 gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_env,
2219 cpu_fpr[rB(ctx->opcode)]);
2220 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
2221 cpu_fpr[rD(ctx->opcode)]);
2222 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
2223 if (unlikely(Rc(ctx->opcode) != 0)) {
2224 gen_set_cr1_from_fpscr(ctx);
2225 }
2226 }
2227
2228 /* fsel */
2229 _GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
2230 /* fsub - fsubs */
2231 GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
2232 /* Optional: */
2233
2234 /* fsqrt */
2235 static void gen_fsqrt(DisasContext *ctx)
2236 {
2237 if (unlikely(!ctx->fpu_enabled)) {
2238 gen_exception(ctx, POWERPC_EXCP_FPU);
2239 return;
2240 }
2241 /* NIP cannot be restored if the memory exception comes from an helper */
2242 gen_update_nip(ctx, ctx->nip - 4);
2243 gen_reset_fpstatus();
2244 gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
2245 cpu_fpr[rB(ctx->opcode)]);
2246 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
2247 if (unlikely(Rc(ctx->opcode) != 0)) {
2248 gen_set_cr1_from_fpscr(ctx);
2249 }
2250 }
2251
2252 static void gen_fsqrts(DisasContext *ctx)
2253 {
2254 if (unlikely(!ctx->fpu_enabled)) {
2255 gen_exception(ctx, POWERPC_EXCP_FPU);
2256 return;
2257 }
2258 /* NIP cannot be restored if the memory exception comes from an helper */
2259 gen_update_nip(ctx, ctx->nip - 4);
2260 gen_reset_fpstatus();
2261 gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
2262 cpu_fpr[rB(ctx->opcode)]);
2263 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
2264 cpu_fpr[rD(ctx->opcode)]);
2265 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
2266 if (unlikely(Rc(ctx->opcode) != 0)) {
2267 gen_set_cr1_from_fpscr(ctx);
2268 }
2269 }
2270
2271 /*** Floating-Point multiply-and-add ***/
2272 /* fmadd - fmadds */
2273 GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
2274 /* fmsub - fmsubs */
2275 GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT);
2276 /* fnmadd - fnmadds */
2277 GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT);
2278 /* fnmsub - fnmsubs */
2279 GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT);
2280
2281 /*** Floating-Point round & convert ***/
2282 /* fctiw */
2283 GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT);
2284 /* fctiwu */
2285 GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206);
2286 /* fctiwz */
2287 GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT);
2288 /* fctiwuz */
2289 GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206);
2290 /* frsp */
2291 GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT);
2292 /* fcfid */
2293 GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64);
2294 /* fcfids */
2295 GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206);
2296 /* fcfidu */
2297 GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
2298 /* fcfidus */
2299 GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
2300 /* fctid */
2301 GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64);
2302 /* fctidu */
2303 GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206);
2304 /* fctidz */
2305 GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64);
2306 /* fctidu */
2307 GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206);
2308
2309 /* frin */
2310 GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT);
2311 /* friz */
2312 GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT);
2313 /* frip */
2314 GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT);
2315 /* frim */
2316 GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT);
2317
2318 static void gen_ftdiv(DisasContext *ctx)
2319 {
2320 if (unlikely(!ctx->fpu_enabled)) {
2321 gen_exception(ctx, POWERPC_EXCP_FPU);
2322 return;
2323 }
2324 gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
2325 cpu_fpr[rB(ctx->opcode)]);
2326 }
2327
2328 static void gen_ftsqrt(DisasContext *ctx)
2329 {
2330 if (unlikely(!ctx->fpu_enabled)) {
2331 gen_exception(ctx, POWERPC_EXCP_FPU);
2332 return;
2333 }
2334 gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2335 }
2336
2337
2338
2339 /*** Floating-Point compare ***/
2340
2341 /* fcmpo */
2342 static void gen_fcmpo(DisasContext *ctx)
2343 {
2344 TCGv_i32 crf;
2345 if (unlikely(!ctx->fpu_enabled)) {
2346 gen_exception(ctx, POWERPC_EXCP_FPU);
2347 return;
2348 }
2349 /* NIP cannot be restored if the memory exception comes from an helper */
2350 gen_update_nip(ctx, ctx->nip - 4);
2351 gen_reset_fpstatus();
2352 crf = tcg_const_i32(crfD(ctx->opcode));
2353 gen_helper_fcmpo(cpu_env, cpu_fpr[rA(ctx->opcode)],
2354 cpu_fpr[rB(ctx->opcode)], crf);
2355 tcg_temp_free_i32(crf);
2356 gen_helper_float_check_status(cpu_env);
2357 }
2358
2359 /* fcmpu */
2360 static void gen_fcmpu(DisasContext *ctx)
2361 {
2362 TCGv_i32 crf;
2363 if (unlikely(!ctx->fpu_enabled)) {
2364 gen_exception(ctx, POWERPC_EXCP_FPU);
2365 return;
2366 }
2367 /* NIP cannot be restored if the memory exception comes from an helper */
2368 gen_update_nip(ctx, ctx->nip - 4);
2369 gen_reset_fpstatus();
2370 crf = tcg_const_i32(crfD(ctx->opcode));
2371 gen_helper_fcmpu(cpu_env, cpu_fpr[rA(ctx->opcode)],
2372 cpu_fpr[rB(ctx->opcode)], crf);
2373 tcg_temp_free_i32(crf);
2374 gen_helper_float_check_status(cpu_env);
2375 }
2376
2377 /*** Floating-point move ***/
2378 /* fabs */
2379 /* XXX: beware that fabs never checks for NaNs nor update FPSCR */
2380 static void gen_fabs(DisasContext *ctx)
2381 {
2382 if (unlikely(!ctx->fpu_enabled)) {
2383 gen_exception(ctx, POWERPC_EXCP_FPU);
2384 return;
2385 }
2386 tcg_gen_andi_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
2387 ~(1ULL << 63));
2388 if (unlikely(Rc(ctx->opcode))) {
2389 gen_set_cr1_from_fpscr(ctx);
2390 }
2391 }
2392
2393 /* fmr - fmr. */
2394 /* XXX: beware that fmr never checks for NaNs nor update FPSCR */
2395 static void gen_fmr(DisasContext *ctx)
2396 {
2397 if (unlikely(!ctx->fpu_enabled)) {
2398 gen_exception(ctx, POWERPC_EXCP_FPU);
2399 return;
2400 }
2401 tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2402 if (unlikely(Rc(ctx->opcode))) {
2403 gen_set_cr1_from_fpscr(ctx);
2404 }
2405 }
2406
2407 /* fnabs */
2408 /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
2409 static void gen_fnabs(DisasContext *ctx)
2410 {
2411 if (unlikely(!ctx->fpu_enabled)) {
2412 gen_exception(ctx, POWERPC_EXCP_FPU);
2413 return;
2414 }
2415 tcg_gen_ori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
2416 1ULL << 63);
2417 if (unlikely(Rc(ctx->opcode))) {
2418 gen_set_cr1_from_fpscr(ctx);
2419 }
2420 }
2421
2422 /* fneg */
2423 /* XXX: beware that fneg never checks for NaNs nor update FPSCR */
2424 static void gen_fneg(DisasContext *ctx)
2425 {
2426 if (unlikely(!ctx->fpu_enabled)) {
2427 gen_exception(ctx, POWERPC_EXCP_FPU);
2428 return;
2429 }
2430 tcg_gen_xori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
2431 1ULL << 63);
2432 if (unlikely(Rc(ctx->opcode))) {
2433 gen_set_cr1_from_fpscr(ctx);
2434 }
2435 }
2436
2437 /* fcpsgn: PowerPC 2.05 specification */
2438 /* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */
2439 static void gen_fcpsgn(DisasContext *ctx)
2440 {
2441 if (unlikely(!ctx->fpu_enabled)) {
2442 gen_exception(ctx, POWERPC_EXCP_FPU);
2443 return;
2444 }
2445 tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
2446 cpu_fpr[rB(ctx->opcode)], 0, 63);
2447 if (unlikely(Rc(ctx->opcode))) {
2448 gen_set_cr1_from_fpscr(ctx);
2449 }
2450 }
2451
2452 static void gen_fmrgew(DisasContext *ctx)
2453 {
2454 TCGv_i64 b0;
2455 if (unlikely(!ctx->fpu_enabled)) {
2456 gen_exception(ctx, POWERPC_EXCP_FPU);
2457 return;
2458 }
2459 b0 = tcg_temp_new_i64();
2460 tcg_gen_shri_i64(b0, cpu_fpr[rB(ctx->opcode)], 32);
2461 tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
2462 b0, 0, 32);
2463 tcg_temp_free_i64(b0);
2464 }
2465
2466 static void gen_fmrgow(DisasContext *ctx)
2467 {
2468 if (unlikely(!ctx->fpu_enabled)) {
2469 gen_exception(ctx, POWERPC_EXCP_FPU);
2470 return;
2471 }
2472 tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)],
2473 cpu_fpr[rB(ctx->opcode)],
2474 cpu_fpr[rA(ctx->opcode)],
2475 32, 32);
2476 }
2477
2478 /*** Floating-Point status & ctrl register ***/
2479
2480 /* mcrfs */
2481 static void gen_mcrfs(DisasContext *ctx)
2482 {
2483 TCGv tmp = tcg_temp_new();
2484 TCGv_i32 tmask;
2485 TCGv_i64 tnew_fpscr = tcg_temp_new_i64();
2486 int bfa;
2487 int nibble;
2488 int shift;
2489
2490 if (unlikely(!ctx->fpu_enabled)) {
2491 gen_exception(ctx, POWERPC_EXCP_FPU);
2492 return;
2493 }
2494 bfa = crfS(ctx->opcode);
2495 nibble = 7 - bfa;
2496 shift = 4 * nibble;
2497 tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
2498 tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
2499 tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
2500 tcg_temp_free(tmp);
2501 tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
2502 /* Only the exception bits (including FX) should be cleared if read */
2503 tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS));
2504 /* FEX and VX need to be updated, so don't set fpscr directly */
2505 tmask = tcg_const_i32(1 << nibble);
2506 gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
2507 tcg_temp_free_i32(tmask);
2508 tcg_temp_free_i64(tnew_fpscr);
2509 }
2510
2511 /* mffs */
2512 static void gen_mffs(DisasContext *ctx)
2513 {
2514 if (unlikely(!ctx->fpu_enabled)) {
2515 gen_exception(ctx, POWERPC_EXCP_FPU);
2516 return;
2517 }
2518 gen_reset_fpstatus();
2519 tcg_gen_extu_tl_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpscr);
2520 if (unlikely(Rc(ctx->opcode))) {
2521 gen_set_cr1_from_fpscr(ctx);
2522 }
2523 }
2524
2525 /* mtfsb0 */
2526 static void gen_mtfsb0(DisasContext *ctx)
2527 {
2528 uint8_t crb;
2529
2530 if (unlikely(!ctx->fpu_enabled)) {
2531 gen_exception(ctx, POWERPC_EXCP_FPU);
2532 return;
2533 }
2534 crb = 31 - crbD(ctx->opcode);
2535 gen_reset_fpstatus();
2536 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
2537 TCGv_i32 t0;
2538 /* NIP cannot be restored if the memory exception comes from an helper */
2539 gen_update_nip(ctx, ctx->nip - 4);
2540 t0 = tcg_const_i32(crb);
2541 gen_helper_fpscr_clrbit(cpu_env, t0);
2542 tcg_temp_free_i32(t0);
2543 }
2544 if (unlikely(Rc(ctx->opcode) != 0)) {
2545 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2546 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2547 }
2548 }
2549
2550 /* mtfsb1 */
2551 static void gen_mtfsb1(DisasContext *ctx)
2552 {
2553 uint8_t crb;
2554
2555 if (unlikely(!ctx->fpu_enabled)) {
2556 gen_exception(ctx, POWERPC_EXCP_FPU);
2557 return;
2558 }
2559 crb = 31 - crbD(ctx->opcode);
2560 gen_reset_fpstatus();
2561 /* XXX: we pretend we can only do IEEE floating-point computations */
2562 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
2563 TCGv_i32 t0;
2564 /* NIP cannot be restored if the memory exception comes from an helper */
2565 gen_update_nip(ctx, ctx->nip - 4);
2566 t0 = tcg_const_i32(crb);
2567 gen_helper_fpscr_setbit(cpu_env, t0);
2568 tcg_temp_free_i32(t0);
2569 }
2570 if (unlikely(Rc(ctx->opcode) != 0)) {
2571 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2572 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2573 }
2574 /* We can raise a differed exception */
2575 gen_helper_float_check_status(cpu_env);
2576 }
2577
2578 /* mtfsf */
2579 static void gen_mtfsf(DisasContext *ctx)
2580 {
2581 TCGv_i32 t0;
2582 int flm, l, w;
2583
2584 if (unlikely(!ctx->fpu_enabled)) {
2585 gen_exception(ctx, POWERPC_EXCP_FPU);
2586 return;
2587 }
2588 flm = FPFLM(ctx->opcode);
2589 l = FPL(ctx->opcode);
2590 w = FPW(ctx->opcode);
2591 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
2592 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2593 return;
2594 }
2595 /* NIP cannot be restored if the memory exception comes from an helper */
2596 gen_update_nip(ctx, ctx->nip - 4);
2597 gen_reset_fpstatus();
2598 if (l) {
2599 t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff);
2600 } else {
2601 t0 = tcg_const_i32(flm << (w * 8));
2602 }
2603 gen_helper_store_fpscr(cpu_env, cpu_fpr[rB(ctx->opcode)], t0);
2604 tcg_temp_free_i32(t0);
2605 if (unlikely(Rc(ctx->opcode) != 0)) {
2606 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2607 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2608 }
2609 /* We can raise a differed exception */
2610 gen_helper_float_check_status(cpu_env);
2611 }
2612
2613 /* mtfsfi */
2614 static void gen_mtfsfi(DisasContext *ctx)
2615 {
2616 int bf, sh, w;
2617 TCGv_i64 t0;
2618 TCGv_i32 t1;
2619
2620 if (unlikely(!ctx->fpu_enabled)) {
2621 gen_exception(ctx, POWERPC_EXCP_FPU);
2622 return;
2623 }
2624 w = FPW(ctx->opcode);
2625 bf = FPBF(ctx->opcode);
2626 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
2627 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2628 return;
2629 }
2630 sh = (8 * w) + 7 - bf;
2631 /* NIP cannot be restored if the memory exception comes from an helper */
2632 gen_update_nip(ctx, ctx->nip - 4);
2633 gen_reset_fpstatus();
2634 t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
2635 t1 = tcg_const_i32(1 << sh);
2636 gen_helper_store_fpscr(cpu_env, t0, t1);
2637 tcg_temp_free_i64(t0);
2638 tcg_temp_free_i32(t1);
2639 if (unlikely(Rc(ctx->opcode) != 0)) {
2640 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2641 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2642 }
2643 /* We can raise a differed exception */
2644 gen_helper_float_check_status(cpu_env);
2645 }
2646
2647 /*** Addressing modes ***/
2648 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2649 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
2650 target_long maskl)
2651 {
2652 target_long simm = SIMM(ctx->opcode);
2653
2654 simm &= ~maskl;
2655 if (rA(ctx->opcode) == 0) {
2656 if (NARROW_MODE(ctx)) {
2657 simm = (uint32_t)simm;
2658 }
2659 tcg_gen_movi_tl(EA, simm);
2660 } else if (likely(simm != 0)) {
2661 tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
2662 if (NARROW_MODE(ctx)) {
2663 tcg_gen_ext32u_tl(EA, EA);
2664 }
2665 } else {
2666 if (NARROW_MODE(ctx)) {
2667 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2668 } else {
2669 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2670 }
2671 }
2672 }
2673
2674 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
2675 {
2676 if (rA(ctx->opcode) == 0) {
2677 if (NARROW_MODE(ctx)) {
2678 tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2679 } else {
2680 tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2681 }
2682 } else {
2683 tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2684 if (NARROW_MODE(ctx)) {
2685 tcg_gen_ext32u_tl(EA, EA);
2686 }
2687 }
2688 }
2689
2690 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
2691 {
2692 if (rA(ctx->opcode) == 0) {
2693 tcg_gen_movi_tl(EA, 0);
2694 } else if (NARROW_MODE(ctx)) {
2695 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2696 } else {
2697 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2698 }
2699 }
2700
2701 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
2702 target_long val)
2703 {
2704 tcg_gen_addi_tl(ret, arg1, val);
2705 if (NARROW_MODE(ctx)) {
2706 tcg_gen_ext32u_tl(ret, ret);
2707 }
2708 }
2709
2710 static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask)
2711 {
2712 TCGLabel *l1 = gen_new_label();
2713 TCGv t0 = tcg_temp_new();
2714 TCGv_i32 t1, t2;
2715 /* NIP cannot be restored if the memory exception comes from an helper */
2716 gen_update_nip(ctx, ctx->nip - 4);
2717 tcg_gen_andi_tl(t0, EA, mask);
2718 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
2719 t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
2720 t2 = tcg_const_i32(0);
2721 gen_helper_raise_exception_err(cpu_env, t1, t2);
2722 tcg_temp_free_i32(t1);
2723 tcg_temp_free_i32(t2);
2724 gen_set_label(l1);
2725 tcg_temp_free(t0);
2726 }
2727
2728 /*** Integer load ***/
2729 static inline void gen_qemu_ld8u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2730 {
2731 tcg_gen_qemu_ld8u(arg1, arg2, ctx->mem_idx);
2732 }
2733
2734 static inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2735 {
2736 TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
2737 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2738 }
2739
2740 static inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2741 {
2742 TCGMemOp op = MO_SW | ctx->default_tcg_memop_mask;
2743 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2744 }
2745
2746 static inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2747 {
2748 TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
2749 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2750 }
2751
2752 static void gen_qemu_ld32u_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
2753 {
2754 TCGv tmp = tcg_temp_new();
2755 gen_qemu_ld32u(ctx, tmp, addr);
2756 tcg_gen_extu_tl_i64(val, tmp);
2757 tcg_temp_free(tmp);
2758 }
2759
2760 static inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2761 {
2762 TCGMemOp op = MO_SL | ctx->default_tcg_memop_mask;
2763 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
2764 }
2765
2766 static void gen_qemu_ld32s_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
2767 {
2768 TCGv tmp = tcg_temp_new();
2769 gen_qemu_ld32s(ctx, tmp, addr);
2770 tcg_gen_ext_tl_i64(val, tmp);
2771 tcg_temp_free(tmp);
2772 }
2773
2774 static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
2775 {
2776 TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
2777 tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
2778 }
2779
2780 static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
2781 {
2782 tcg_gen_qemu_st8(arg1, arg2, ctx->mem_idx);
2783 }
2784
2785 static inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2)
2786 {
2787 TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
2788 tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
2789 }
2790
2791 static inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2)
2792 {
2793 TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
2794 tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
2795 }
2796
2797 static void gen_qemu_st32_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
2798 {
2799 TCGv tmp = tcg_temp_new();
2800 tcg_gen_trunc_i64_tl(tmp, val);
2801 gen_qemu_st32(ctx, tmp, addr);
2802 tcg_temp_free(tmp);
2803 }
2804
2805 static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
2806 {
2807 TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
2808 tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
2809 }
2810
2811 #define GEN_LD(name, ldop, opc, type) \
2812 static void glue(gen_, name)(DisasContext *ctx) \
2813 { \
2814 TCGv EA; \
2815 gen_set_access_type(ctx, ACCESS_INT); \
2816 EA = tcg_temp_new(); \
2817 gen_addr_imm_index(ctx, EA, 0); \
2818 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2819 tcg_temp_free(EA); \
2820 }
2821
2822 #define GEN_LDU(name, ldop, opc, type) \
2823 static void glue(gen_, name##u)(DisasContext *ctx) \
2824 { \
2825 TCGv EA; \
2826 if (unlikely(rA(ctx->opcode) == 0 || \
2827 rA(ctx->opcode) == rD(ctx->opcode))) { \
2828 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2829 return; \
2830 } \
2831 gen_set_access_type(ctx, ACCESS_INT); \
2832 EA = tcg_temp_new(); \
2833 if (type == PPC_64B) \
2834 gen_addr_imm_index(ctx, EA, 0x03); \
2835 else \
2836 gen_addr_imm_index(ctx, EA, 0); \
2837 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2838 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2839 tcg_temp_free(EA); \
2840 }
2841
2842 #define GEN_LDUX(name, ldop, opc2, opc3, type) \
2843 static void glue(gen_, name##ux)(DisasContext *ctx) \
2844 { \
2845 TCGv EA; \
2846 if (unlikely(rA(ctx->opcode) == 0 || \
2847 rA(ctx->opcode) == rD(ctx->opcode))) { \
2848 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2849 return; \
2850 } \
2851 gen_set_access_type(ctx, ACCESS_INT); \
2852 EA = tcg_temp_new(); \
2853 gen_addr_reg_index(ctx, EA); \
2854 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2855 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2856 tcg_temp_free(EA); \
2857 }
2858
2859 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2) \
2860 static void glue(gen_, name##x)(DisasContext *ctx) \
2861 { \
2862 TCGv EA; \
2863 gen_set_access_type(ctx, ACCESS_INT); \
2864 EA = tcg_temp_new(); \
2865 gen_addr_reg_index(ctx, EA); \
2866 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2867 tcg_temp_free(EA); \
2868 }
2869 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2870 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE)
2871
2872 #define GEN_LDS(name, ldop, op, type) \
2873 GEN_LD(name, ldop, op | 0x20, type); \
2874 GEN_LDU(name, ldop, op | 0x21, type); \
2875 GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
2876 GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2877
2878 /* lbz lbzu lbzux lbzx */
2879 GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER);
2880 /* lha lhau lhaux lhax */
2881 GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER);
2882 /* lhz lhzu lhzux lhzx */
2883 GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER);
2884 /* lwz lwzu lwzux lwzx */
2885 GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER);
2886 #if defined(TARGET_PPC64)
2887 /* lwaux */
2888 GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
2889 /* lwax */
2890 GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
2891 /* ldux */
2892 GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B);
2893 /* ldx */
2894 GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B);
2895
2896 static void gen_ld(DisasContext *ctx)
2897 {
2898 TCGv EA;
2899 if (Rc(ctx->opcode)) {
2900 if (unlikely(rA(ctx->opcode) == 0 ||
2901 rA(ctx->opcode) == rD(ctx->opcode))) {
2902 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2903 return;
2904 }
2905 }
2906 gen_set_access_type(ctx, ACCESS_INT);
2907 EA = tcg_temp_new();
2908 gen_addr_imm_index(ctx, EA, 0x03);
2909 if (ctx->opcode & 0x02) {
2910 /* lwa (lwau is undefined) */
2911 gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2912 } else {
2913 /* ld - ldu */
2914 gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2915 }
2916 if (Rc(ctx->opcode))
2917 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2918 tcg_temp_free(EA);
2919 }
2920
2921 /* lq */
2922 static void gen_lq(DisasContext *ctx)
2923 {
2924 int ra, rd;
2925 TCGv EA;
2926
2927 /* lq is a legal user mode instruction starting in ISA 2.07 */
2928 bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2929 bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2930
2931 if (!legal_in_user_mode && ctx->pr) {
2932 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2933 return;
2934 }
2935
2936 if (!le_is_supported && ctx->le_mode) {
2937 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
2938 return;
2939 }
2940
2941 ra = rA(ctx->opcode);
2942 rd = rD(ctx->opcode);
2943 if (unlikely((rd & 1) || rd == ra)) {
2944 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2945 return;
2946 }
2947
2948 gen_set_access_type(ctx, ACCESS_INT);
2949 EA = tcg_temp_new();
2950 gen_addr_imm_index(ctx, EA, 0x0F);
2951
2952 /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
2953 64-bit byteswap already. */
2954 if (unlikely(ctx->le_mode)) {
2955 gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
2956 gen_addr_add(ctx, EA, EA, 8);
2957 gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
2958 } else {
2959 gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
2960 gen_addr_add(ctx, EA, EA, 8);
2961 gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
2962 }
2963 tcg_temp_free(EA);
2964 }
2965 #endif
2966
2967 /*** Integer store ***/
2968 #define GEN_ST(name, stop, opc, type) \
2969 static void glue(gen_, name)(DisasContext *ctx) \
2970 { \
2971 TCGv EA; \
2972 gen_set_access_type(ctx, ACCESS_INT); \
2973 EA = tcg_temp_new(); \
2974 gen_addr_imm_index(ctx, EA, 0); \
2975 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2976 tcg_temp_free(EA); \
2977 }
2978
2979 #define GEN_STU(name, stop, opc, type) \
2980 static void glue(gen_, stop##u)(DisasContext *ctx) \
2981 { \
2982 TCGv EA; \
2983 if (unlikely(rA(ctx->opcode) == 0)) { \
2984 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2985 return; \
2986 } \
2987 gen_set_access_type(ctx, ACCESS_INT); \
2988 EA = tcg_temp_new(); \
2989 if (type == PPC_64B) \
2990 gen_addr_imm_index(ctx, EA, 0x03); \
2991 else \
2992 gen_addr_imm_index(ctx, EA, 0); \
2993 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2994 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2995 tcg_temp_free(EA); \
2996 }
2997
2998 #define GEN_STUX(name, stop, opc2, opc3, type) \
2999 static void glue(gen_, name##ux)(DisasContext *ctx) \
3000 { \
3001 TCGv EA; \
3002 if (unlikely(rA(ctx->opcode) == 0)) { \
3003 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3004 return; \
3005 } \
3006 gen_set_access_type(ctx, ACCESS_INT); \
3007 EA = tcg_temp_new(); \
3008 gen_addr_reg_index(ctx, EA); \
3009 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3010 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3011 tcg_temp_free(EA); \
3012 }
3013
3014 #define GEN_STX_E(name, stop, opc2, opc3, type, type2) \
3015 static void glue(gen_, name##x)(DisasContext *ctx) \
3016 { \
3017 TCGv EA; \
3018 gen_set_access_type(ctx, ACCESS_INT); \
3019 EA = tcg_temp_new(); \
3020 gen_addr_reg_index(ctx, EA); \
3021 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3022 tcg_temp_free(EA); \
3023 }
3024 #define GEN_STX(name, stop, opc2, opc3, type) \
3025 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE)
3026
3027 #define GEN_STS(name, stop, op, type) \
3028 GEN_ST(name, stop, op | 0x20, type); \
3029 GEN_STU(name, stop, op | 0x21, type); \
3030 GEN_STUX(name, stop, 0x17, op | 0x01, type); \
3031 GEN_STX(name, stop, 0x17, op | 0x00, type)
3032
3033 /* stb stbu stbux stbx */
3034 GEN_STS(stb, st8, 0x06, PPC_INTEGER);
3035 /* sth sthu sthux sthx */
3036 GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
3037 /* stw stwu stwux stwx */
3038 GEN_STS(stw, st32, 0x04, PPC_INTEGER);
3039 #if defined(TARGET_PPC64)
3040 GEN_STUX(std, st64, 0x15, 0x05, PPC_64B);
3041 GEN_STX(std, st64, 0x15, 0x04, PPC_64B);
3042
3043 static void gen_std(DisasContext *ctx)
3044 {
3045 int rs;
3046 TCGv EA;
3047
3048 rs = rS(ctx->opcode);
3049 if ((ctx->opcode & 0x3) == 0x2) { /* stq */
3050 bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
3051 bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
3052
3053 if (!(ctx->insns_flags & PPC_64BX)) {
3054 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3055 }
3056
3057 if (!legal_in_user_mode && ctx->pr) {
3058 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
3059 return;
3060 }
3061
3062 if (!le_is_supported && ctx->le_mode) {
3063 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
3064 return;
3065 }
3066
3067 if (unlikely(rs & 1)) {
3068 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3069 return;
3070 }
3071 gen_set_access_type(ctx, ACCESS_INT);
3072 EA = tcg_temp_new();
3073 gen_addr_imm_index(ctx, EA, 0x03);
3074
3075 /* We only need to swap high and low halves. gen_qemu_st64 does
3076 necessary 64-bit byteswap already. */
3077 if (unlikely(ctx->le_mode)) {
3078 gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
3079 gen_addr_add(ctx, EA, EA, 8);
3080 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
3081 } else {
3082 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
3083 gen_addr_add(ctx, EA, EA, 8);
3084 gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
3085 }
3086 tcg_temp_free(EA);
3087 } else {
3088 /* std / stdu*/
3089 if (Rc(ctx->opcode)) {
3090 if (unlikely(rA(ctx->opcode) == 0)) {
3091 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3092 return;
3093 }
3094 }
3095 gen_set_access_type(ctx, ACCESS_INT);
3096 EA = tcg_temp_new();
3097 gen_addr_imm_index(ctx, EA, 0x03);
3098 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
3099 if (Rc(ctx->opcode))
3100 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
3101 tcg_temp_free(EA);
3102 }
3103 }
3104 #endif
3105 /*** Integer load and store with byte reverse ***/
3106
3107 /* lhbrx */
3108 static inline void gen_qemu_ld16ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
3109 {
3110 TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
3111 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
3112 }
3113 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
3114
3115 /* lwbrx */
3116 static inline void gen_qemu_ld32ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
3117 {
3118 TCGMemOp op = MO_UL | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
3119 tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
3120 }
3121 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
3122
3123 #if defined(TARGET_PPC64)
3124 /* ldbrx */
3125 static inline void gen_qemu_ld64ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
3126 {
3127 TCGMemOp op = MO_Q | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
3128 tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
3129 }
3130 GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX);
3131 #endif /* TARGET_PPC64 */
3132
3133 /* sthbrx */
3134 static inline void gen_qemu_st16r(DisasContext *ctx, TCGv arg1, TCGv arg2)
3135 {
3136 TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
3137 tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
3138 }
3139 GEN_STX(sthbr, st16r