PPC: E500: Implement msgclr
[qemu.git] / target-ppc / translate.c
1 /*
2 * PowerPC emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25
26 #include "cpu.h"
27 #include "disas.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
30 #include "host-utils.h"
31
32 #include "helper.h"
33 #define GEN_HELPER 1
34 #include "helper.h"
35
36 #define CPU_SINGLE_STEP 0x1
37 #define CPU_BRANCH_STEP 0x2
38 #define GDBSTUB_SINGLE_STEP 0x4
39
40 /* Include definitions for instructions classes and implementations flags */
41 //#define PPC_DEBUG_DISAS
42 //#define DO_PPC_STATISTICS
43
44 #ifdef PPC_DEBUG_DISAS
45 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
46 #else
47 # define LOG_DISAS(...) do { } while (0)
48 #endif
49 /*****************************************************************************/
50 /* Code translation helpers */
51
52 /* global register indexes */
53 static TCGv_ptr cpu_env;
54 static char cpu_reg_names[10*3 + 22*4 /* GPR */
55 #if !defined(TARGET_PPC64)
56 + 10*4 + 22*5 /* SPE GPRh */
57 #endif
58 + 10*4 + 22*5 /* FPR */
59 + 2*(10*6 + 22*7) /* AVRh, AVRl */
60 + 8*5 /* CRF */];
61 static TCGv cpu_gpr[32];
62 #if !defined(TARGET_PPC64)
63 static TCGv cpu_gprh[32];
64 #endif
65 static TCGv_i64 cpu_fpr[32];
66 static TCGv_i64 cpu_avrh[32], cpu_avrl[32];
67 static TCGv_i32 cpu_crf[8];
68 static TCGv cpu_nip;
69 static TCGv cpu_msr;
70 static TCGv cpu_ctr;
71 static TCGv cpu_lr;
72 #if defined(TARGET_PPC64)
73 static TCGv cpu_cfar;
74 #endif
75 static TCGv cpu_xer;
76 static TCGv cpu_reserve;
77 static TCGv_i32 cpu_fpscr;
78 static TCGv_i32 cpu_access_type;
79
80 #include "gen-icount.h"
81
82 void ppc_translate_init(void)
83 {
84 int i;
85 char* p;
86 size_t cpu_reg_names_size;
87 static int done_init = 0;
88
89 if (done_init)
90 return;
91
92 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
93
94 p = cpu_reg_names;
95 cpu_reg_names_size = sizeof(cpu_reg_names);
96
97 for (i = 0; i < 8; i++) {
98 snprintf(p, cpu_reg_names_size, "crf%d", i);
99 cpu_crf[i] = tcg_global_mem_new_i32(TCG_AREG0,
100 offsetof(CPUState, crf[i]), p);
101 p += 5;
102 cpu_reg_names_size -= 5;
103 }
104
105 for (i = 0; i < 32; i++) {
106 snprintf(p, cpu_reg_names_size, "r%d", i);
107 cpu_gpr[i] = tcg_global_mem_new(TCG_AREG0,
108 offsetof(CPUState, gpr[i]), p);
109 p += (i < 10) ? 3 : 4;
110 cpu_reg_names_size -= (i < 10) ? 3 : 4;
111 #if !defined(TARGET_PPC64)
112 snprintf(p, cpu_reg_names_size, "r%dH", i);
113 cpu_gprh[i] = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, gprh[i]), p);
115 p += (i < 10) ? 4 : 5;
116 cpu_reg_names_size -= (i < 10) ? 4 : 5;
117 #endif
118
119 snprintf(p, cpu_reg_names_size, "fp%d", i);
120 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
121 offsetof(CPUState, fpr[i]), p);
122 p += (i < 10) ? 4 : 5;
123 cpu_reg_names_size -= (i < 10) ? 4 : 5;
124
125 snprintf(p, cpu_reg_names_size, "avr%dH", i);
126 #ifdef HOST_WORDS_BIGENDIAN
127 cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
128 offsetof(CPUState, avr[i].u64[0]), p);
129 #else
130 cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
131 offsetof(CPUState, avr[i].u64[1]), p);
132 #endif
133 p += (i < 10) ? 6 : 7;
134 cpu_reg_names_size -= (i < 10) ? 6 : 7;
135
136 snprintf(p, cpu_reg_names_size, "avr%dL", i);
137 #ifdef HOST_WORDS_BIGENDIAN
138 cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
139 offsetof(CPUState, avr[i].u64[1]), p);
140 #else
141 cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
142 offsetof(CPUState, avr[i].u64[0]), p);
143 #endif
144 p += (i < 10) ? 6 : 7;
145 cpu_reg_names_size -= (i < 10) ? 6 : 7;
146 }
147
148 cpu_nip = tcg_global_mem_new(TCG_AREG0,
149 offsetof(CPUState, nip), "nip");
150
151 cpu_msr = tcg_global_mem_new(TCG_AREG0,
152 offsetof(CPUState, msr), "msr");
153
154 cpu_ctr = tcg_global_mem_new(TCG_AREG0,
155 offsetof(CPUState, ctr), "ctr");
156
157 cpu_lr = tcg_global_mem_new(TCG_AREG0,
158 offsetof(CPUState, lr), "lr");
159
160 #if defined(TARGET_PPC64)
161 cpu_cfar = tcg_global_mem_new(TCG_AREG0,
162 offsetof(CPUState, cfar), "cfar");
163 #endif
164
165 cpu_xer = tcg_global_mem_new(TCG_AREG0,
166 offsetof(CPUState, xer), "xer");
167
168 cpu_reserve = tcg_global_mem_new(TCG_AREG0,
169 offsetof(CPUState, reserve_addr),
170 "reserve_addr");
171
172 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
173 offsetof(CPUState, fpscr), "fpscr");
174
175 cpu_access_type = tcg_global_mem_new_i32(TCG_AREG0,
176 offsetof(CPUState, access_type), "access_type");
177
178 /* register helpers */
179 #define GEN_HELPER 2
180 #include "helper.h"
181
182 done_init = 1;
183 }
184
185 /* internal defines */
186 typedef struct DisasContext {
187 struct TranslationBlock *tb;
188 target_ulong nip;
189 uint32_t opcode;
190 uint32_t exception;
191 /* Routine used to access memory */
192 int mem_idx;
193 int access_type;
194 /* Translation flags */
195 int le_mode;
196 #if defined(TARGET_PPC64)
197 int sf_mode;
198 int has_cfar;
199 #endif
200 int fpu_enabled;
201 int altivec_enabled;
202 int spe_enabled;
203 ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
204 int singlestep_enabled;
205 } DisasContext;
206
207 struct opc_handler_t {
208 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
209 uint32_t inval1;
210 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
211 uint32_t inval2;
212 /* instruction type */
213 uint64_t type;
214 /* extended instruction type */
215 uint64_t type2;
216 /* handler */
217 void (*handler)(DisasContext *ctx);
218 #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
219 const char *oname;
220 #endif
221 #if defined(DO_PPC_STATISTICS)
222 uint64_t count;
223 #endif
224 };
225
226 static inline void gen_reset_fpstatus(void)
227 {
228 gen_helper_reset_fpstatus();
229 }
230
231 static inline void gen_compute_fprf(TCGv_i64 arg, int set_fprf, int set_rc)
232 {
233 TCGv_i32 t0 = tcg_temp_new_i32();
234
235 if (set_fprf != 0) {
236 /* This case might be optimized later */
237 tcg_gen_movi_i32(t0, 1);
238 gen_helper_compute_fprf(t0, arg, t0);
239 if (unlikely(set_rc)) {
240 tcg_gen_mov_i32(cpu_crf[1], t0);
241 }
242 gen_helper_float_check_status();
243 } else if (unlikely(set_rc)) {
244 /* We always need to compute fpcc */
245 tcg_gen_movi_i32(t0, 0);
246 gen_helper_compute_fprf(t0, arg, t0);
247 tcg_gen_mov_i32(cpu_crf[1], t0);
248 }
249
250 tcg_temp_free_i32(t0);
251 }
252
253 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
254 {
255 if (ctx->access_type != access_type) {
256 tcg_gen_movi_i32(cpu_access_type, access_type);
257 ctx->access_type = access_type;
258 }
259 }
260
261 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
262 {
263 #if defined(TARGET_PPC64)
264 if (ctx->sf_mode)
265 tcg_gen_movi_tl(cpu_nip, nip);
266 else
267 #endif
268 tcg_gen_movi_tl(cpu_nip, (uint32_t)nip);
269 }
270
271 static inline void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
272 {
273 TCGv_i32 t0, t1;
274 if (ctx->exception == POWERPC_EXCP_NONE) {
275 gen_update_nip(ctx, ctx->nip);
276 }
277 t0 = tcg_const_i32(excp);
278 t1 = tcg_const_i32(error);
279 gen_helper_raise_exception_err(t0, t1);
280 tcg_temp_free_i32(t0);
281 tcg_temp_free_i32(t1);
282 ctx->exception = (excp);
283 }
284
285 static inline void gen_exception(DisasContext *ctx, uint32_t excp)
286 {
287 TCGv_i32 t0;
288 if (ctx->exception == POWERPC_EXCP_NONE) {
289 gen_update_nip(ctx, ctx->nip);
290 }
291 t0 = tcg_const_i32(excp);
292 gen_helper_raise_exception(t0);
293 tcg_temp_free_i32(t0);
294 ctx->exception = (excp);
295 }
296
297 static inline void gen_debug_exception(DisasContext *ctx)
298 {
299 TCGv_i32 t0;
300
301 if ((ctx->exception != POWERPC_EXCP_BRANCH) &&
302 (ctx->exception != POWERPC_EXCP_SYNC)) {
303 gen_update_nip(ctx, ctx->nip);
304 }
305 t0 = tcg_const_i32(EXCP_DEBUG);
306 gen_helper_raise_exception(t0);
307 tcg_temp_free_i32(t0);
308 }
309
310 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
311 {
312 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | error);
313 }
314
315 /* Stop translation */
316 static inline void gen_stop_exception(DisasContext *ctx)
317 {
318 gen_update_nip(ctx, ctx->nip);
319 ctx->exception = POWERPC_EXCP_STOP;
320 }
321
322 /* No need to update nip here, as execution flow will change */
323 static inline void gen_sync_exception(DisasContext *ctx)
324 {
325 ctx->exception = POWERPC_EXCP_SYNC;
326 }
327
328 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
329 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
330
331 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
332 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
333
334 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
335 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
336
337 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
338 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
339
340 typedef struct opcode_t {
341 unsigned char opc1, opc2, opc3;
342 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
343 unsigned char pad[5];
344 #else
345 unsigned char pad[1];
346 #endif
347 opc_handler_t handler;
348 const char *oname;
349 } opcode_t;
350
351 /*****************************************************************************/
352 /*** Instruction decoding ***/
353 #define EXTRACT_HELPER(name, shift, nb) \
354 static inline uint32_t name(uint32_t opcode) \
355 { \
356 return (opcode >> (shift)) & ((1 << (nb)) - 1); \
357 }
358
359 #define EXTRACT_SHELPER(name, shift, nb) \
360 static inline int32_t name(uint32_t opcode) \
361 { \
362 return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
363 }
364
365 /* Opcode part 1 */
366 EXTRACT_HELPER(opc1, 26, 6);
367 /* Opcode part 2 */
368 EXTRACT_HELPER(opc2, 1, 5);
369 /* Opcode part 3 */
370 EXTRACT_HELPER(opc3, 6, 5);
371 /* Update Cr0 flags */
372 EXTRACT_HELPER(Rc, 0, 1);
373 /* Destination */
374 EXTRACT_HELPER(rD, 21, 5);
375 /* Source */
376 EXTRACT_HELPER(rS, 21, 5);
377 /* First operand */
378 EXTRACT_HELPER(rA, 16, 5);
379 /* Second operand */
380 EXTRACT_HELPER(rB, 11, 5);
381 /* Third operand */
382 EXTRACT_HELPER(rC, 6, 5);
383 /*** Get CRn ***/
384 EXTRACT_HELPER(crfD, 23, 3);
385 EXTRACT_HELPER(crfS, 18, 3);
386 EXTRACT_HELPER(crbD, 21, 5);
387 EXTRACT_HELPER(crbA, 16, 5);
388 EXTRACT_HELPER(crbB, 11, 5);
389 /* SPR / TBL */
390 EXTRACT_HELPER(_SPR, 11, 10);
391 static inline uint32_t SPR(uint32_t opcode)
392 {
393 uint32_t sprn = _SPR(opcode);
394
395 return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
396 }
397 /*** Get constants ***/
398 EXTRACT_HELPER(IMM, 12, 8);
399 /* 16 bits signed immediate value */
400 EXTRACT_SHELPER(SIMM, 0, 16);
401 /* 16 bits unsigned immediate value */
402 EXTRACT_HELPER(UIMM, 0, 16);
403 /* 5 bits signed immediate value */
404 EXTRACT_HELPER(SIMM5, 16, 5);
405 /* 5 bits signed immediate value */
406 EXTRACT_HELPER(UIMM5, 16, 5);
407 /* Bit count */
408 EXTRACT_HELPER(NB, 11, 5);
409 /* Shift count */
410 EXTRACT_HELPER(SH, 11, 5);
411 /* Vector shift count */
412 EXTRACT_HELPER(VSH, 6, 4);
413 /* Mask start */
414 EXTRACT_HELPER(MB, 6, 5);
415 /* Mask end */
416 EXTRACT_HELPER(ME, 1, 5);
417 /* Trap operand */
418 EXTRACT_HELPER(TO, 21, 5);
419
420 EXTRACT_HELPER(CRM, 12, 8);
421 EXTRACT_HELPER(FM, 17, 8);
422 EXTRACT_HELPER(SR, 16, 4);
423 EXTRACT_HELPER(FPIMM, 12, 4);
424
425 /*** Jump target decoding ***/
426 /* Displacement */
427 EXTRACT_SHELPER(d, 0, 16);
428 /* Immediate address */
429 static inline target_ulong LI(uint32_t opcode)
430 {
431 return (opcode >> 0) & 0x03FFFFFC;
432 }
433
434 static inline uint32_t BD(uint32_t opcode)
435 {
436 return (opcode >> 0) & 0xFFFC;
437 }
438
439 EXTRACT_HELPER(BO, 21, 5);
440 EXTRACT_HELPER(BI, 16, 5);
441 /* Absolute/relative address */
442 EXTRACT_HELPER(AA, 1, 1);
443 /* Link */
444 EXTRACT_HELPER(LK, 0, 1);
445
446 /* Create a mask between <start> and <end> bits */
447 static inline target_ulong MASK(uint32_t start, uint32_t end)
448 {
449 target_ulong ret;
450
451 #if defined(TARGET_PPC64)
452 if (likely(start == 0)) {
453 ret = UINT64_MAX << (63 - end);
454 } else if (likely(end == 63)) {
455 ret = UINT64_MAX >> start;
456 }
457 #else
458 if (likely(start == 0)) {
459 ret = UINT32_MAX << (31 - end);
460 } else if (likely(end == 31)) {
461 ret = UINT32_MAX >> start;
462 }
463 #endif
464 else {
465 ret = (((target_ulong)(-1ULL)) >> (start)) ^
466 (((target_ulong)(-1ULL) >> (end)) >> 1);
467 if (unlikely(start > end))
468 return ~ret;
469 }
470
471 return ret;
472 }
473
474 /*****************************************************************************/
475 /* PowerPC instructions table */
476
477 #if defined(DO_PPC_STATISTICS)
478 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
479 { \
480 .opc1 = op1, \
481 .opc2 = op2, \
482 .opc3 = op3, \
483 .pad = { 0, }, \
484 .handler = { \
485 .inval1 = invl, \
486 .type = _typ, \
487 .type2 = _typ2, \
488 .handler = &gen_##name, \
489 .oname = stringify(name), \
490 }, \
491 .oname = stringify(name), \
492 }
493 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
494 { \
495 .opc1 = op1, \
496 .opc2 = op2, \
497 .opc3 = op3, \
498 .pad = { 0, }, \
499 .handler = { \
500 .inval1 = invl1, \
501 .inval2 = invl2, \
502 .type = _typ, \
503 .type2 = _typ2, \
504 .handler = &gen_##name, \
505 .oname = stringify(name), \
506 }, \
507 .oname = stringify(name), \
508 }
509 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
510 { \
511 .opc1 = op1, \
512 .opc2 = op2, \
513 .opc3 = op3, \
514 .pad = { 0, }, \
515 .handler = { \
516 .inval1 = invl, \
517 .type = _typ, \
518 .type2 = _typ2, \
519 .handler = &gen_##name, \
520 .oname = onam, \
521 }, \
522 .oname = onam, \
523 }
524 #else
525 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
526 { \
527 .opc1 = op1, \
528 .opc2 = op2, \
529 .opc3 = op3, \
530 .pad = { 0, }, \
531 .handler = { \
532 .inval1 = invl, \
533 .type = _typ, \
534 .type2 = _typ2, \
535 .handler = &gen_##name, \
536 }, \
537 .oname = stringify(name), \
538 }
539 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
540 { \
541 .opc1 = op1, \
542 .opc2 = op2, \
543 .opc3 = op3, \
544 .pad = { 0, }, \
545 .handler = { \
546 .inval1 = invl1, \
547 .inval2 = invl2, \
548 .type = _typ, \
549 .type2 = _typ2, \
550 .handler = &gen_##name, \
551 }, \
552 .oname = stringify(name), \
553 }
554 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
555 { \
556 .opc1 = op1, \
557 .opc2 = op2, \
558 .opc3 = op3, \
559 .pad = { 0, }, \
560 .handler = { \
561 .inval1 = invl, \
562 .type = _typ, \
563 .type2 = _typ2, \
564 .handler = &gen_##name, \
565 }, \
566 .oname = onam, \
567 }
568 #endif
569
570 /* SPR load/store helpers */
571 static inline void gen_load_spr(TCGv t, int reg)
572 {
573 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUState, spr[reg]));
574 }
575
576 static inline void gen_store_spr(int reg, TCGv t)
577 {
578 tcg_gen_st_tl(t, cpu_env, offsetof(CPUState, spr[reg]));
579 }
580
581 /* Invalid instruction */
582 static void gen_invalid(DisasContext *ctx)
583 {
584 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
585 }
586
587 static opc_handler_t invalid_handler = {
588 .inval1 = 0xFFFFFFFF,
589 .inval2 = 0xFFFFFFFF,
590 .type = PPC_NONE,
591 .type2 = PPC_NONE,
592 .handler = gen_invalid,
593 };
594
595 /*** Integer comparison ***/
596
597 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
598 {
599 int l1, l2, l3;
600
601 tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_xer);
602 tcg_gen_shri_i32(cpu_crf[crf], cpu_crf[crf], XER_SO);
603 tcg_gen_andi_i32(cpu_crf[crf], cpu_crf[crf], 1);
604
605 l1 = gen_new_label();
606 l2 = gen_new_label();
607 l3 = gen_new_label();
608 if (s) {
609 tcg_gen_brcond_tl(TCG_COND_LT, arg0, arg1, l1);
610 tcg_gen_brcond_tl(TCG_COND_GT, arg0, arg1, l2);
611 } else {
612 tcg_gen_brcond_tl(TCG_COND_LTU, arg0, arg1, l1);
613 tcg_gen_brcond_tl(TCG_COND_GTU, arg0, arg1, l2);
614 }
615 tcg_gen_ori_i32(cpu_crf[crf], cpu_crf[crf], 1 << CRF_EQ);
616 tcg_gen_br(l3);
617 gen_set_label(l1);
618 tcg_gen_ori_i32(cpu_crf[crf], cpu_crf[crf], 1 << CRF_LT);
619 tcg_gen_br(l3);
620 gen_set_label(l2);
621 tcg_gen_ori_i32(cpu_crf[crf], cpu_crf[crf], 1 << CRF_GT);
622 gen_set_label(l3);
623 }
624
625 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
626 {
627 TCGv t0 = tcg_const_local_tl(arg1);
628 gen_op_cmp(arg0, t0, s, crf);
629 tcg_temp_free(t0);
630 }
631
632 #if defined(TARGET_PPC64)
633 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
634 {
635 TCGv t0, t1;
636 t0 = tcg_temp_local_new();
637 t1 = tcg_temp_local_new();
638 if (s) {
639 tcg_gen_ext32s_tl(t0, arg0);
640 tcg_gen_ext32s_tl(t1, arg1);
641 } else {
642 tcg_gen_ext32u_tl(t0, arg0);
643 tcg_gen_ext32u_tl(t1, arg1);
644 }
645 gen_op_cmp(t0, t1, s, crf);
646 tcg_temp_free(t1);
647 tcg_temp_free(t0);
648 }
649
650 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
651 {
652 TCGv t0 = tcg_const_local_tl(arg1);
653 gen_op_cmp32(arg0, t0, s, crf);
654 tcg_temp_free(t0);
655 }
656 #endif
657
658 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
659 {
660 #if defined(TARGET_PPC64)
661 if (!(ctx->sf_mode))
662 gen_op_cmpi32(reg, 0, 1, 0);
663 else
664 #endif
665 gen_op_cmpi(reg, 0, 1, 0);
666 }
667
668 /* cmp */
669 static void gen_cmp(DisasContext *ctx)
670 {
671 #if defined(TARGET_PPC64)
672 if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
673 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
674 1, crfD(ctx->opcode));
675 else
676 #endif
677 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
678 1, crfD(ctx->opcode));
679 }
680
681 /* cmpi */
682 static void gen_cmpi(DisasContext *ctx)
683 {
684 #if defined(TARGET_PPC64)
685 if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
686 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
687 1, crfD(ctx->opcode));
688 else
689 #endif
690 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
691 1, crfD(ctx->opcode));
692 }
693
694 /* cmpl */
695 static void gen_cmpl(DisasContext *ctx)
696 {
697 #if defined(TARGET_PPC64)
698 if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
699 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
700 0, crfD(ctx->opcode));
701 else
702 #endif
703 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
704 0, crfD(ctx->opcode));
705 }
706
707 /* cmpli */
708 static void gen_cmpli(DisasContext *ctx)
709 {
710 #if defined(TARGET_PPC64)
711 if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
712 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
713 0, crfD(ctx->opcode));
714 else
715 #endif
716 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
717 0, crfD(ctx->opcode));
718 }
719
720 /* isel (PowerPC 2.03 specification) */
721 static void gen_isel(DisasContext *ctx)
722 {
723 int l1, l2;
724 uint32_t bi = rC(ctx->opcode);
725 uint32_t mask;
726 TCGv_i32 t0;
727
728 l1 = gen_new_label();
729 l2 = gen_new_label();
730
731 mask = 1 << (3 - (bi & 0x03));
732 t0 = tcg_temp_new_i32();
733 tcg_gen_andi_i32(t0, cpu_crf[bi >> 2], mask);
734 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
735 if (rA(ctx->opcode) == 0)
736 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
737 else
738 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
739 tcg_gen_br(l2);
740 gen_set_label(l1);
741 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
742 gen_set_label(l2);
743 tcg_temp_free_i32(t0);
744 }
745
746 /*** Integer arithmetic ***/
747
748 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
749 TCGv arg1, TCGv arg2, int sub)
750 {
751 int l1;
752 TCGv t0;
753
754 l1 = gen_new_label();
755 /* Start with XER OV disabled, the most likely case */
756 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
757 t0 = tcg_temp_local_new();
758 tcg_gen_xor_tl(t0, arg0, arg1);
759 #if defined(TARGET_PPC64)
760 if (!ctx->sf_mode)
761 tcg_gen_ext32s_tl(t0, t0);
762 #endif
763 if (sub)
764 tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0, l1);
765 else
766 tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l1);
767 tcg_gen_xor_tl(t0, arg1, arg2);
768 #if defined(TARGET_PPC64)
769 if (!ctx->sf_mode)
770 tcg_gen_ext32s_tl(t0, t0);
771 #endif
772 if (sub)
773 tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l1);
774 else
775 tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0, l1);
776 tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
777 gen_set_label(l1);
778 tcg_temp_free(t0);
779 }
780
781 static inline void gen_op_arith_compute_ca(DisasContext *ctx, TCGv arg1,
782 TCGv arg2, int sub)
783 {
784 int l1 = gen_new_label();
785
786 #if defined(TARGET_PPC64)
787 if (!(ctx->sf_mode)) {
788 TCGv t0, t1;
789 t0 = tcg_temp_new();
790 t1 = tcg_temp_new();
791
792 tcg_gen_ext32u_tl(t0, arg1);
793 tcg_gen_ext32u_tl(t1, arg2);
794 if (sub) {
795 tcg_gen_brcond_tl(TCG_COND_GTU, t0, t1, l1);
796 } else {
797 tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
798 }
799 tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
800 gen_set_label(l1);
801 tcg_temp_free(t0);
802 tcg_temp_free(t1);
803 } else
804 #endif
805 {
806 if (sub) {
807 tcg_gen_brcond_tl(TCG_COND_GTU, arg1, arg2, l1);
808 } else {
809 tcg_gen_brcond_tl(TCG_COND_GEU, arg1, arg2, l1);
810 }
811 tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
812 gen_set_label(l1);
813 }
814 }
815
816 /* Common add function */
817 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
818 TCGv arg2, int add_ca, int compute_ca,
819 int compute_ov)
820 {
821 TCGv t0, t1;
822
823 if ((!compute_ca && !compute_ov) ||
824 (!TCGV_EQUAL(ret,arg1) && !TCGV_EQUAL(ret, arg2))) {
825 t0 = ret;
826 } else {
827 t0 = tcg_temp_local_new();
828 }
829
830 if (add_ca) {
831 t1 = tcg_temp_local_new();
832 tcg_gen_andi_tl(t1, cpu_xer, (1 << XER_CA));
833 tcg_gen_shri_tl(t1, t1, XER_CA);
834 } else {
835 TCGV_UNUSED(t1);
836 }
837
838 if (compute_ca && compute_ov) {
839 /* Start with XER CA and OV disabled, the most likely case */
840 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~((1 << XER_CA) | (1 << XER_OV)));
841 } else if (compute_ca) {
842 /* Start with XER CA disabled, the most likely case */
843 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
844 } else if (compute_ov) {
845 /* Start with XER OV disabled, the most likely case */
846 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
847 }
848
849 tcg_gen_add_tl(t0, arg1, arg2);
850
851 if (compute_ca) {
852 gen_op_arith_compute_ca(ctx, t0, arg1, 0);
853 }
854 if (add_ca) {
855 tcg_gen_add_tl(t0, t0, t1);
856 gen_op_arith_compute_ca(ctx, t0, t1, 0);
857 tcg_temp_free(t1);
858 }
859 if (compute_ov) {
860 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
861 }
862
863 if (unlikely(Rc(ctx->opcode) != 0))
864 gen_set_Rc0(ctx, t0);
865
866 if (!TCGV_EQUAL(t0, ret)) {
867 tcg_gen_mov_tl(ret, t0);
868 tcg_temp_free(t0);
869 }
870 }
871 /* Add functions with two operands */
872 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
873 static void glue(gen_, name)(DisasContext *ctx) \
874 { \
875 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
876 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
877 add_ca, compute_ca, compute_ov); \
878 }
879 /* Add functions with one operand and one immediate */
880 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
881 add_ca, compute_ca, compute_ov) \
882 static void glue(gen_, name)(DisasContext *ctx) \
883 { \
884 TCGv t0 = tcg_const_local_tl(const_val); \
885 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
886 cpu_gpr[rA(ctx->opcode)], t0, \
887 add_ca, compute_ca, compute_ov); \
888 tcg_temp_free(t0); \
889 }
890
891 /* add add. addo addo. */
892 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
893 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
894 /* addc addc. addco addco. */
895 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
896 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
897 /* adde adde. addeo addeo. */
898 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
899 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
900 /* addme addme. addmeo addmeo. */
901 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
902 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
903 /* addze addze. addzeo addzeo.*/
904 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
905 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
906 /* addi */
907 static void gen_addi(DisasContext *ctx)
908 {
909 target_long simm = SIMM(ctx->opcode);
910
911 if (rA(ctx->opcode) == 0) {
912 /* li case */
913 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm);
914 } else {
915 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], simm);
916 }
917 }
918 /* addic addic.*/
919 static inline void gen_op_addic(DisasContext *ctx, TCGv ret, TCGv arg1,
920 int compute_Rc0)
921 {
922 target_long simm = SIMM(ctx->opcode);
923
924 /* Start with XER CA and OV disabled, the most likely case */
925 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
926
927 if (likely(simm != 0)) {
928 TCGv t0 = tcg_temp_local_new();
929 tcg_gen_addi_tl(t0, arg1, simm);
930 gen_op_arith_compute_ca(ctx, t0, arg1, 0);
931 tcg_gen_mov_tl(ret, t0);
932 tcg_temp_free(t0);
933 } else {
934 tcg_gen_mov_tl(ret, arg1);
935 }
936 if (compute_Rc0) {
937 gen_set_Rc0(ctx, ret);
938 }
939 }
940
941 static void gen_addic(DisasContext *ctx)
942 {
943 gen_op_addic(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0);
944 }
945
946 static void gen_addic_(DisasContext *ctx)
947 {
948 gen_op_addic(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1);
949 }
950
951 /* addis */
952 static void gen_addis(DisasContext *ctx)
953 {
954 target_long simm = SIMM(ctx->opcode);
955
956 if (rA(ctx->opcode) == 0) {
957 /* lis case */
958 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm << 16);
959 } else {
960 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], simm << 16);
961 }
962 }
963
964 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
965 TCGv arg2, int sign, int compute_ov)
966 {
967 int l1 = gen_new_label();
968 int l2 = gen_new_label();
969 TCGv_i32 t0 = tcg_temp_local_new_i32();
970 TCGv_i32 t1 = tcg_temp_local_new_i32();
971
972 tcg_gen_trunc_tl_i32(t0, arg1);
973 tcg_gen_trunc_tl_i32(t1, arg2);
974 tcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1);
975 if (sign) {
976 int l3 = gen_new_label();
977 tcg_gen_brcondi_i32(TCG_COND_NE, t1, -1, l3);
978 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, INT32_MIN, l1);
979 gen_set_label(l3);
980 tcg_gen_div_i32(t0, t0, t1);
981 } else {
982 tcg_gen_divu_i32(t0, t0, t1);
983 }
984 if (compute_ov) {
985 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
986 }
987 tcg_gen_br(l2);
988 gen_set_label(l1);
989 if (sign) {
990 tcg_gen_sari_i32(t0, t0, 31);
991 } else {
992 tcg_gen_movi_i32(t0, 0);
993 }
994 if (compute_ov) {
995 tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
996 }
997 gen_set_label(l2);
998 tcg_gen_extu_i32_tl(ret, t0);
999 tcg_temp_free_i32(t0);
1000 tcg_temp_free_i32(t1);
1001 if (unlikely(Rc(ctx->opcode) != 0))
1002 gen_set_Rc0(ctx, ret);
1003 }
1004 /* Div functions */
1005 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
1006 static void glue(gen_, name)(DisasContext *ctx) \
1007 { \
1008 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
1009 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1010 sign, compute_ov); \
1011 }
1012 /* divwu divwu. divwuo divwuo. */
1013 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
1014 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
1015 /* divw divw. divwo divwo. */
1016 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
1017 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
1018 #if defined(TARGET_PPC64)
1019 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
1020 TCGv arg2, int sign, int compute_ov)
1021 {
1022 int l1 = gen_new_label();
1023 int l2 = gen_new_label();
1024
1025 tcg_gen_brcondi_i64(TCG_COND_EQ, arg2, 0, l1);
1026 if (sign) {
1027 int l3 = gen_new_label();
1028 tcg_gen_brcondi_i64(TCG_COND_NE, arg2, -1, l3);
1029 tcg_gen_brcondi_i64(TCG_COND_EQ, arg1, INT64_MIN, l1);
1030 gen_set_label(l3);
1031 tcg_gen_div_i64(ret, arg1, arg2);
1032 } else {
1033 tcg_gen_divu_i64(ret, arg1, arg2);
1034 }
1035 if (compute_ov) {
1036 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
1037 }
1038 tcg_gen_br(l2);
1039 gen_set_label(l1);
1040 if (sign) {
1041 tcg_gen_sari_i64(ret, arg1, 63);
1042 } else {
1043 tcg_gen_movi_i64(ret, 0);
1044 }
1045 if (compute_ov) {
1046 tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
1047 }
1048 gen_set_label(l2);
1049 if (unlikely(Rc(ctx->opcode) != 0))
1050 gen_set_Rc0(ctx, ret);
1051 }
1052 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1053 static void glue(gen_, name)(DisasContext *ctx) \
1054 { \
1055 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1056 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1057 sign, compute_ov); \
1058 }
1059 /* divwu divwu. divwuo divwuo. */
1060 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1061 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1062 /* divw divw. divwo divwo. */
1063 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1064 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1065 #endif
1066
1067 /* mulhw mulhw. */
1068 static void gen_mulhw(DisasContext *ctx)
1069 {
1070 TCGv_i64 t0, t1;
1071
1072 t0 = tcg_temp_new_i64();
1073 t1 = tcg_temp_new_i64();
1074 #if defined(TARGET_PPC64)
1075 tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
1076 tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
1077 tcg_gen_mul_i64(t0, t0, t1);
1078 tcg_gen_shri_i64(cpu_gpr[rD(ctx->opcode)], t0, 32);
1079 #else
1080 tcg_gen_ext_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
1081 tcg_gen_ext_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
1082 tcg_gen_mul_i64(t0, t0, t1);
1083 tcg_gen_shri_i64(t0, t0, 32);
1084 tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t0);
1085 #endif
1086 tcg_temp_free_i64(t0);
1087 tcg_temp_free_i64(t1);
1088 if (unlikely(Rc(ctx->opcode) != 0))
1089 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1090 }
1091
1092 /* mulhwu mulhwu. */
1093 static void gen_mulhwu(DisasContext *ctx)
1094 {
1095 TCGv_i64 t0, t1;
1096
1097 t0 = tcg_temp_new_i64();
1098 t1 = tcg_temp_new_i64();
1099 #if defined(TARGET_PPC64)
1100 tcg_gen_ext32u_i64(t0, cpu_gpr[rA(ctx->opcode)]);
1101 tcg_gen_ext32u_i64(t1, cpu_gpr[rB(ctx->opcode)]);
1102 tcg_gen_mul_i64(t0, t0, t1);
1103 tcg_gen_shri_i64(cpu_gpr[rD(ctx->opcode)], t0, 32);
1104 #else
1105 tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
1106 tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
1107 tcg_gen_mul_i64(t0, t0, t1);
1108 tcg_gen_shri_i64(t0, t0, 32);
1109 tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t0);
1110 #endif
1111 tcg_temp_free_i64(t0);
1112 tcg_temp_free_i64(t1);
1113 if (unlikely(Rc(ctx->opcode) != 0))
1114 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1115 }
1116
1117 /* mullw mullw. */
1118 static void gen_mullw(DisasContext *ctx)
1119 {
1120 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1121 cpu_gpr[rB(ctx->opcode)]);
1122 tcg_gen_ext32s_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)]);
1123 if (unlikely(Rc(ctx->opcode) != 0))
1124 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1125 }
1126
1127 /* mullwo mullwo. */
1128 static void gen_mullwo(DisasContext *ctx)
1129 {
1130 int l1;
1131 TCGv_i64 t0, t1;
1132
1133 t0 = tcg_temp_new_i64();
1134 t1 = tcg_temp_new_i64();
1135 l1 = gen_new_label();
1136 /* Start with XER OV disabled, the most likely case */
1137 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
1138 #if defined(TARGET_PPC64)
1139 tcg_gen_ext32s_i64(t0, cpu_gpr[rA(ctx->opcode)]);
1140 tcg_gen_ext32s_i64(t1, cpu_gpr[rB(ctx->opcode)]);
1141 #else
1142 tcg_gen_ext_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
1143 tcg_gen_ext_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
1144 #endif
1145 tcg_gen_mul_i64(t0, t0, t1);
1146 #if defined(TARGET_PPC64)
1147 tcg_gen_ext32s_i64(cpu_gpr[rD(ctx->opcode)], t0);
1148 tcg_gen_brcond_i64(TCG_COND_EQ, t0, cpu_gpr[rD(ctx->opcode)], l1);
1149 #else
1150 tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t0);
1151 tcg_gen_ext32s_i64(t1, t0);
1152 tcg_gen_brcond_i64(TCG_COND_EQ, t0, t1, l1);
1153 #endif
1154 tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
1155 gen_set_label(l1);
1156 tcg_temp_free_i64(t0);
1157 tcg_temp_free_i64(t1);
1158 if (unlikely(Rc(ctx->opcode) != 0))
1159 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1160 }
1161
1162 /* mulli */
1163 static void gen_mulli(DisasContext *ctx)
1164 {
1165 tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1166 SIMM(ctx->opcode));
1167 }
1168 #if defined(TARGET_PPC64)
1169 #define GEN_INT_ARITH_MUL_HELPER(name, opc3) \
1170 static void glue(gen_, name)(DisasContext *ctx) \
1171 { \
1172 gen_helper_##name (cpu_gpr[rD(ctx->opcode)], \
1173 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
1174 if (unlikely(Rc(ctx->opcode) != 0)) \
1175 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
1176 }
1177 /* mulhd mulhd. */
1178 GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00);
1179 /* mulhdu mulhdu. */
1180 GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02);
1181
1182 /* mulld mulld. */
1183 static void gen_mulld(DisasContext *ctx)
1184 {
1185 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1186 cpu_gpr[rB(ctx->opcode)]);
1187 if (unlikely(Rc(ctx->opcode) != 0))
1188 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1189 }
1190 /* mulldo mulldo. */
1191 GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17);
1192 #endif
1193
1194 /* neg neg. nego nego. */
1195 static inline void gen_op_arith_neg(DisasContext *ctx, TCGv ret, TCGv arg1,
1196 int ov_check)
1197 {
1198 int l1 = gen_new_label();
1199 int l2 = gen_new_label();
1200 TCGv t0 = tcg_temp_local_new();
1201 #if defined(TARGET_PPC64)
1202 if (ctx->sf_mode) {
1203 tcg_gen_mov_tl(t0, arg1);
1204 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, INT64_MIN, l1);
1205 } else
1206 #endif
1207 {
1208 tcg_gen_ext32s_tl(t0, arg1);
1209 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, INT32_MIN, l1);
1210 }
1211 tcg_gen_neg_tl(ret, arg1);
1212 if (ov_check) {
1213 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
1214 }
1215 tcg_gen_br(l2);
1216 gen_set_label(l1);
1217 tcg_gen_mov_tl(ret, t0);
1218 if (ov_check) {
1219 tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
1220 }
1221 gen_set_label(l2);
1222 tcg_temp_free(t0);
1223 if (unlikely(Rc(ctx->opcode) != 0))
1224 gen_set_Rc0(ctx, ret);
1225 }
1226
1227 static void gen_neg(DisasContext *ctx)
1228 {
1229 gen_op_arith_neg(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0);
1230 }
1231
1232 static void gen_nego(DisasContext *ctx)
1233 {
1234 gen_op_arith_neg(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1);
1235 }
1236
1237 /* Common subf function */
1238 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
1239 TCGv arg2, int add_ca, int compute_ca,
1240 int compute_ov)
1241 {
1242 TCGv t0, t1;
1243
1244 if ((!compute_ca && !compute_ov) ||
1245 (!TCGV_EQUAL(ret, arg1) && !TCGV_EQUAL(ret, arg2))) {
1246 t0 = ret;
1247 } else {
1248 t0 = tcg_temp_local_new();
1249 }
1250
1251 if (add_ca) {
1252 t1 = tcg_temp_local_new();
1253 tcg_gen_andi_tl(t1, cpu_xer, (1 << XER_CA));
1254 tcg_gen_shri_tl(t1, t1, XER_CA);
1255 } else {
1256 TCGV_UNUSED(t1);
1257 }
1258
1259 if (compute_ca && compute_ov) {
1260 /* Start with XER CA and OV disabled, the most likely case */
1261 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~((1 << XER_CA) | (1 << XER_OV)));
1262 } else if (compute_ca) {
1263 /* Start with XER CA disabled, the most likely case */
1264 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
1265 } else if (compute_ov) {
1266 /* Start with XER OV disabled, the most likely case */
1267 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
1268 }
1269
1270 if (add_ca) {
1271 tcg_gen_not_tl(t0, arg1);
1272 tcg_gen_add_tl(t0, t0, arg2);
1273 gen_op_arith_compute_ca(ctx, t0, arg2, 0);
1274 tcg_gen_add_tl(t0, t0, t1);
1275 gen_op_arith_compute_ca(ctx, t0, t1, 0);
1276 tcg_temp_free(t1);
1277 } else {
1278 tcg_gen_sub_tl(t0, arg2, arg1);
1279 if (compute_ca) {
1280 gen_op_arith_compute_ca(ctx, t0, arg2, 1);
1281 }
1282 }
1283 if (compute_ov) {
1284 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
1285 }
1286
1287 if (unlikely(Rc(ctx->opcode) != 0))
1288 gen_set_Rc0(ctx, t0);
1289
1290 if (!TCGV_EQUAL(t0, ret)) {
1291 tcg_gen_mov_tl(ret, t0);
1292 tcg_temp_free(t0);
1293 }
1294 }
1295 /* Sub functions with Two operands functions */
1296 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
1297 static void glue(gen_, name)(DisasContext *ctx) \
1298 { \
1299 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1300 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1301 add_ca, compute_ca, compute_ov); \
1302 }
1303 /* Sub functions with one operand and one immediate */
1304 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
1305 add_ca, compute_ca, compute_ov) \
1306 static void glue(gen_, name)(DisasContext *ctx) \
1307 { \
1308 TCGv t0 = tcg_const_local_tl(const_val); \
1309 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1310 cpu_gpr[rA(ctx->opcode)], t0, \
1311 add_ca, compute_ca, compute_ov); \
1312 tcg_temp_free(t0); \
1313 }
1314 /* subf subf. subfo subfo. */
1315 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
1316 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
1317 /* subfc subfc. subfco subfco. */
1318 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
1319 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
1320 /* subfe subfe. subfeo subfo. */
1321 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
1322 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
1323 /* subfme subfme. subfmeo subfmeo. */
1324 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
1325 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
1326 /* subfze subfze. subfzeo subfzeo.*/
1327 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
1328 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
1329
1330 /* subfic */
1331 static void gen_subfic(DisasContext *ctx)
1332 {
1333 /* Start with XER CA and OV disabled, the most likely case */
1334 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
1335 TCGv t0 = tcg_temp_local_new();
1336 TCGv t1 = tcg_const_local_tl(SIMM(ctx->opcode));
1337 tcg_gen_sub_tl(t0, t1, cpu_gpr[rA(ctx->opcode)]);
1338 gen_op_arith_compute_ca(ctx, t0, t1, 1);
1339 tcg_temp_free(t1);
1340 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
1341 tcg_temp_free(t0);
1342 }
1343
1344 /*** Integer logical ***/
1345 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
1346 static void glue(gen_, name)(DisasContext *ctx) \
1347 { \
1348 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
1349 cpu_gpr[rB(ctx->opcode)]); \
1350 if (unlikely(Rc(ctx->opcode) != 0)) \
1351 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1352 }
1353
1354 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
1355 static void glue(gen_, name)(DisasContext *ctx) \
1356 { \
1357 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
1358 if (unlikely(Rc(ctx->opcode) != 0)) \
1359 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1360 }
1361
1362 /* and & and. */
1363 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
1364 /* andc & andc. */
1365 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
1366
1367 /* andi. */
1368 static void gen_andi_(DisasContext *ctx)
1369 {
1370 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode));
1371 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1372 }
1373
1374 /* andis. */
1375 static void gen_andis_(DisasContext *ctx)
1376 {
1377 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16);
1378 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1379 }
1380
1381 /* cntlzw */
1382 static void gen_cntlzw(DisasContext *ctx)
1383 {
1384 gen_helper_cntlzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1385 if (unlikely(Rc(ctx->opcode) != 0))
1386 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1387 }
1388 /* eqv & eqv. */
1389 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
1390 /* extsb & extsb. */
1391 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
1392 /* extsh & extsh. */
1393 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
1394 /* nand & nand. */
1395 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
1396 /* nor & nor. */
1397 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
1398
1399 /* or & or. */
1400 static void gen_or(DisasContext *ctx)
1401 {
1402 int rs, ra, rb;
1403
1404 rs = rS(ctx->opcode);
1405 ra = rA(ctx->opcode);
1406 rb = rB(ctx->opcode);
1407 /* Optimisation for mr. ri case */
1408 if (rs != ra || rs != rb) {
1409 if (rs != rb)
1410 tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
1411 else
1412 tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
1413 if (unlikely(Rc(ctx->opcode) != 0))
1414 gen_set_Rc0(ctx, cpu_gpr[ra]);
1415 } else if (unlikely(Rc(ctx->opcode) != 0)) {
1416 gen_set_Rc0(ctx, cpu_gpr[rs]);
1417 #if defined(TARGET_PPC64)
1418 } else {
1419 int prio = 0;
1420
1421 switch (rs) {
1422 case 1:
1423 /* Set process priority to low */
1424 prio = 2;
1425 break;
1426 case 6:
1427 /* Set process priority to medium-low */
1428 prio = 3;
1429 break;
1430 case 2:
1431 /* Set process priority to normal */
1432 prio = 4;
1433 break;
1434 #if !defined(CONFIG_USER_ONLY)
1435 case 31:
1436 if (ctx->mem_idx > 0) {
1437 /* Set process priority to very low */
1438 prio = 1;
1439 }
1440 break;
1441 case 5:
1442 if (ctx->mem_idx > 0) {
1443 /* Set process priority to medium-hight */
1444 prio = 5;
1445 }
1446 break;
1447 case 3:
1448 if (ctx->mem_idx > 0) {
1449 /* Set process priority to high */
1450 prio = 6;
1451 }
1452 break;
1453 case 7:
1454 if (ctx->mem_idx > 1) {
1455 /* Set process priority to very high */
1456 prio = 7;
1457 }
1458 break;
1459 #endif
1460 default:
1461 /* nop */
1462 break;
1463 }
1464 if (prio) {
1465 TCGv t0 = tcg_temp_new();
1466 gen_load_spr(t0, SPR_PPR);
1467 tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
1468 tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
1469 gen_store_spr(SPR_PPR, t0);
1470 tcg_temp_free(t0);
1471 }
1472 #endif
1473 }
1474 }
1475 /* orc & orc. */
1476 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
1477
1478 /* xor & xor. */
1479 static void gen_xor(DisasContext *ctx)
1480 {
1481 /* Optimisation for "set to zero" case */
1482 if (rS(ctx->opcode) != rB(ctx->opcode))
1483 tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1484 else
1485 tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
1486 if (unlikely(Rc(ctx->opcode) != 0))
1487 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1488 }
1489
1490 /* ori */
1491 static void gen_ori(DisasContext *ctx)
1492 {
1493 target_ulong uimm = UIMM(ctx->opcode);
1494
1495 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1496 /* NOP */
1497 /* XXX: should handle special NOPs for POWER series */
1498 return;
1499 }
1500 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1501 }
1502
1503 /* oris */
1504 static void gen_oris(DisasContext *ctx)
1505 {
1506 target_ulong uimm = UIMM(ctx->opcode);
1507
1508 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1509 /* NOP */
1510 return;
1511 }
1512 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1513 }
1514
1515 /* xori */
1516 static void gen_xori(DisasContext *ctx)
1517 {
1518 target_ulong uimm = UIMM(ctx->opcode);
1519
1520 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1521 /* NOP */
1522 return;
1523 }
1524 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1525 }
1526
1527 /* xoris */
1528 static void gen_xoris(DisasContext *ctx)
1529 {
1530 target_ulong uimm = UIMM(ctx->opcode);
1531
1532 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1533 /* NOP */
1534 return;
1535 }
1536 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1537 }
1538
1539 /* popcntb : PowerPC 2.03 specification */
1540 static void gen_popcntb(DisasContext *ctx)
1541 {
1542 gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1543 }
1544
1545 static void gen_popcntw(DisasContext *ctx)
1546 {
1547 gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1548 }
1549
1550 #if defined(TARGET_PPC64)
1551 /* popcntd: PowerPC 2.06 specification */
1552 static void gen_popcntd(DisasContext *ctx)
1553 {
1554 gen_helper_popcntd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1555 }
1556 #endif
1557
1558 #if defined(TARGET_PPC64)
1559 /* extsw & extsw. */
1560 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
1561
1562 /* cntlzd */
1563 static void gen_cntlzd(DisasContext *ctx)
1564 {
1565 gen_helper_cntlzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1566 if (unlikely(Rc(ctx->opcode) != 0))
1567 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1568 }
1569 #endif
1570
1571 /*** Integer rotate ***/
1572
1573 /* rlwimi & rlwimi. */
1574 static void gen_rlwimi(DisasContext *ctx)
1575 {
1576 uint32_t mb, me, sh;
1577
1578 mb = MB(ctx->opcode);
1579 me = ME(ctx->opcode);
1580 sh = SH(ctx->opcode);
1581 if (likely(sh == 0 && mb == 0 && me == 31)) {
1582 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1583 } else {
1584 target_ulong mask;
1585 TCGv t1;
1586 TCGv t0 = tcg_temp_new();
1587 #if defined(TARGET_PPC64)
1588 TCGv_i32 t2 = tcg_temp_new_i32();
1589 tcg_gen_trunc_i64_i32(t2, cpu_gpr[rS(ctx->opcode)]);
1590 tcg_gen_rotli_i32(t2, t2, sh);
1591 tcg_gen_extu_i32_i64(t0, t2);
1592 tcg_temp_free_i32(t2);
1593 #else
1594 tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
1595 #endif
1596 #if defined(TARGET_PPC64)
1597 mb += 32;
1598 me += 32;
1599 #endif
1600 mask = MASK(mb, me);
1601 t1 = tcg_temp_new();
1602 tcg_gen_andi_tl(t0, t0, mask);
1603 tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
1604 tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1605 tcg_temp_free(t0);
1606 tcg_temp_free(t1);
1607 }
1608 if (unlikely(Rc(ctx->opcode) != 0))
1609 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1610 }
1611
1612 /* rlwinm & rlwinm. */
1613 static void gen_rlwinm(DisasContext *ctx)
1614 {
1615 uint32_t mb, me, sh;
1616
1617 sh = SH(ctx->opcode);
1618 mb = MB(ctx->opcode);
1619 me = ME(ctx->opcode);
1620
1621 if (likely(mb == 0 && me == (31 - sh))) {
1622 if (likely(sh == 0)) {
1623 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1624 } else {
1625 TCGv t0 = tcg_temp_new();
1626 tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
1627 tcg_gen_shli_tl(t0, t0, sh);
1628 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
1629 tcg_temp_free(t0);
1630 }
1631 } else if (likely(sh != 0 && me == 31 && sh == (32 - mb))) {
1632 TCGv t0 = tcg_temp_new();
1633 tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
1634 tcg_gen_shri_tl(t0, t0, mb);
1635 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
1636 tcg_temp_free(t0);
1637 } else {
1638 TCGv t0 = tcg_temp_new();
1639 #if defined(TARGET_PPC64)
1640 TCGv_i32 t1 = tcg_temp_new_i32();
1641 tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]);
1642 tcg_gen_rotli_i32(t1, t1, sh);
1643 tcg_gen_extu_i32_i64(t0, t1);
1644 tcg_temp_free_i32(t1);
1645 #else
1646 tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
1647 #endif
1648 #if defined(TARGET_PPC64)
1649 mb += 32;
1650 me += 32;
1651 #endif
1652 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1653 tcg_temp_free(t0);
1654 }
1655 if (unlikely(Rc(ctx->opcode) != 0))
1656 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1657 }
1658
1659 /* rlwnm & rlwnm. */
1660 static void gen_rlwnm(DisasContext *ctx)
1661 {
1662 uint32_t mb, me;
1663 TCGv t0;
1664 #if defined(TARGET_PPC64)
1665 TCGv_i32 t1, t2;
1666 #endif
1667
1668 mb = MB(ctx->opcode);
1669 me = ME(ctx->opcode);
1670 t0 = tcg_temp_new();
1671 tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1f);
1672 #if defined(TARGET_PPC64)
1673 t1 = tcg_temp_new_i32();
1674 t2 = tcg_temp_new_i32();
1675 tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]);
1676 tcg_gen_trunc_i64_i32(t2, t0);
1677 tcg_gen_rotl_i32(t1, t1, t2);
1678 tcg_gen_extu_i32_i64(t0, t1);
1679 tcg_temp_free_i32(t1);
1680 tcg_temp_free_i32(t2);
1681 #else
1682 tcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0);
1683 #endif
1684 if (unlikely(mb != 0 || me != 31)) {
1685 #if defined(TARGET_PPC64)
1686 mb += 32;
1687 me += 32;
1688 #endif
1689 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1690 } else {
1691 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
1692 }
1693 tcg_temp_free(t0);
1694 if (unlikely(Rc(ctx->opcode) != 0))
1695 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1696 }
1697
1698 #if defined(TARGET_PPC64)
1699 #define GEN_PPC64_R2(name, opc1, opc2) \
1700 static void glue(gen_, name##0)(DisasContext *ctx) \
1701 { \
1702 gen_##name(ctx, 0); \
1703 } \
1704 \
1705 static void glue(gen_, name##1)(DisasContext *ctx) \
1706 { \
1707 gen_##name(ctx, 1); \
1708 }
1709 #define GEN_PPC64_R4(name, opc1, opc2) \
1710 static void glue(gen_, name##0)(DisasContext *ctx) \
1711 { \
1712 gen_##name(ctx, 0, 0); \
1713 } \
1714 \
1715 static void glue(gen_, name##1)(DisasContext *ctx) \
1716 { \
1717 gen_##name(ctx, 0, 1); \
1718 } \
1719 \
1720 static void glue(gen_, name##2)(DisasContext *ctx) \
1721 { \
1722 gen_##name(ctx, 1, 0); \
1723 } \
1724 \
1725 static void glue(gen_, name##3)(DisasContext *ctx) \
1726 { \
1727 gen_##name(ctx, 1, 1); \
1728 }
1729
1730 static inline void gen_rldinm(DisasContext *ctx, uint32_t mb, uint32_t me,
1731 uint32_t sh)
1732 {
1733 if (likely(sh != 0 && mb == 0 && me == (63 - sh))) {
1734 tcg_gen_shli_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
1735 } else if (likely(sh != 0 && me == 63 && sh == (64 - mb))) {
1736 tcg_gen_shri_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], mb);
1737 } else {
1738 TCGv t0 = tcg_temp_new();
1739 tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
1740 if (likely(mb == 0 && me == 63)) {
1741 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
1742 } else {
1743 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1744 }
1745 tcg_temp_free(t0);
1746 }
1747 if (unlikely(Rc(ctx->opcode) != 0))
1748 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1749 }
1750 /* rldicl - rldicl. */
1751 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
1752 {
1753 uint32_t sh, mb;
1754
1755 sh = SH(ctx->opcode) | (shn << 5);
1756 mb = MB(ctx->opcode) | (mbn << 5);
1757 gen_rldinm(ctx, mb, 63, sh);
1758 }
1759 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
1760 /* rldicr - rldicr. */
1761 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
1762 {
1763 uint32_t sh, me;
1764
1765 sh = SH(ctx->opcode) | (shn << 5);
1766 me = MB(ctx->opcode) | (men << 5);
1767 gen_rldinm(ctx, 0, me, sh);
1768 }
1769 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
1770 /* rldic - rldic. */
1771 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
1772 {
1773 uint32_t sh, mb;
1774
1775 sh = SH(ctx->opcode) | (shn << 5);
1776 mb = MB(ctx->opcode) | (mbn << 5);
1777 gen_rldinm(ctx, mb, 63 - sh, sh);
1778 }
1779 GEN_PPC64_R4(rldic, 0x1E, 0x04);
1780
1781 static inline void gen_rldnm(DisasContext *ctx, uint32_t mb, uint32_t me)
1782 {
1783 TCGv t0;
1784
1785 mb = MB(ctx->opcode);
1786 me = ME(ctx->opcode);
1787 t0 = tcg_temp_new();
1788 tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3f);
1789 tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1790 if (unlikely(mb != 0 || me != 63)) {
1791 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1792 } else {
1793 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
1794 }
1795 tcg_temp_free(t0);
1796 if (unlikely(Rc(ctx->opcode) != 0))
1797 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1798 }
1799
1800 /* rldcl - rldcl. */
1801 static inline void gen_rldcl(DisasContext *ctx, int mbn)
1802 {
1803 uint32_t mb;
1804
1805 mb = MB(ctx->opcode) | (mbn << 5);
1806 gen_rldnm(ctx, mb, 63);
1807 }
1808 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
1809 /* rldcr - rldcr. */
1810 static inline void gen_rldcr(DisasContext *ctx, int men)
1811 {
1812 uint32_t me;
1813
1814 me = MB(ctx->opcode) | (men << 5);
1815 gen_rldnm(ctx, 0, me);
1816 }
1817 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
1818 /* rldimi - rldimi. */
1819 static inline void gen_rldimi(DisasContext *ctx, int mbn, int shn)
1820 {
1821 uint32_t sh, mb, me;
1822
1823 sh = SH(ctx->opcode) | (shn << 5);
1824 mb = MB(ctx->opcode) | (mbn << 5);
1825 me = 63 - sh;
1826 if (unlikely(sh == 0 && mb == 0)) {
1827 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1828 } else {
1829 TCGv t0, t1;
1830 target_ulong mask;
1831
1832 t0 = tcg_temp_new();
1833 tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
1834 t1 = tcg_temp_new();
1835 mask = MASK(mb, me);
1836 tcg_gen_andi_tl(t0, t0, mask);
1837 tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
1838 tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1839 tcg_temp_free(t0);
1840 tcg_temp_free(t1);
1841 }
1842 if (unlikely(Rc(ctx->opcode) != 0))
1843 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1844 }
1845 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
1846 #endif
1847
1848 /*** Integer shift ***/
1849
1850 /* slw & slw. */
1851 static void gen_slw(DisasContext *ctx)
1852 {
1853 TCGv t0, t1;
1854
1855 t0 = tcg_temp_new();
1856 /* AND rS with a mask that is 0 when rB >= 0x20 */
1857 #if defined(TARGET_PPC64)
1858 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
1859 tcg_gen_sari_tl(t0, t0, 0x3f);
1860 #else
1861 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
1862 tcg_gen_sari_tl(t0, t0, 0x1f);
1863 #endif
1864 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1865 t1 = tcg_temp_new();
1866 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
1867 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1868 tcg_temp_free(t1);
1869 tcg_temp_free(t0);
1870 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
1871 if (unlikely(Rc(ctx->opcode) != 0))
1872 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1873 }
1874
1875 /* sraw & sraw. */
1876 static void gen_sraw(DisasContext *ctx)
1877 {
1878 gen_helper_sraw(cpu_gpr[rA(ctx->opcode)],
1879 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1880 if (unlikely(Rc(ctx->opcode) != 0))
1881 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1882 }
1883
1884 /* srawi & srawi. */
1885 static void gen_srawi(DisasContext *ctx)
1886 {
1887 int sh = SH(ctx->opcode);
1888 if (sh != 0) {
1889 int l1, l2;
1890 TCGv t0;
1891 l1 = gen_new_label();
1892 l2 = gen_new_label();
1893 t0 = tcg_temp_local_new();
1894 tcg_gen_ext32s_tl(t0, cpu_gpr[rS(ctx->opcode)]);
1895 tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l1);
1896 tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1ULL << sh) - 1);
1897 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
1898 tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
1899 tcg_gen_br(l2);
1900 gen_set_label(l1);
1901 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
1902 gen_set_label(l2);
1903 tcg_gen_ext32s_tl(t0, cpu_gpr[rS(ctx->opcode)]);
1904 tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], t0, sh);
1905 tcg_temp_free(t0);
1906 } else {
1907 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1908 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
1909 }
1910 if (unlikely(Rc(ctx->opcode) != 0))
1911 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1912 }
1913
1914 /* srw & srw. */
1915 static void gen_srw(DisasContext *ctx)
1916 {
1917 TCGv t0, t1;
1918
1919 t0 = tcg_temp_new();
1920 /* AND rS with a mask that is 0 when rB >= 0x20 */
1921 #if defined(TARGET_PPC64)
1922 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
1923 tcg_gen_sari_tl(t0, t0, 0x3f);
1924 #else
1925 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
1926 tcg_gen_sari_tl(t0, t0, 0x1f);
1927 #endif
1928 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1929 tcg_gen_ext32u_tl(t0, t0);
1930 t1 = tcg_temp_new();
1931 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
1932 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1933 tcg_temp_free(t1);
1934 tcg_temp_free(t0);
1935 if (unlikely(Rc(ctx->opcode) != 0))
1936 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1937 }
1938
1939 #if defined(TARGET_PPC64)
1940 /* sld & sld. */
1941 static void gen_sld(DisasContext *ctx)
1942 {
1943 TCGv t0, t1;
1944
1945 t0 = tcg_temp_new();
1946 /* AND rS with a mask that is 0 when rB >= 0x40 */
1947 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
1948 tcg_gen_sari_tl(t0, t0, 0x3f);
1949 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1950 t1 = tcg_temp_new();
1951 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
1952 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1953 tcg_temp_free(t1);
1954 tcg_temp_free(t0);
1955 if (unlikely(Rc(ctx->opcode) != 0))
1956 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1957 }
1958
1959 /* srad & srad. */
1960 static void gen_srad(DisasContext *ctx)
1961 {
1962 gen_helper_srad(cpu_gpr[rA(ctx->opcode)],
1963 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1964 if (unlikely(Rc(ctx->opcode) != 0))
1965 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1966 }
1967 /* sradi & sradi. */
1968 static inline void gen_sradi(DisasContext *ctx, int n)
1969 {
1970 int sh = SH(ctx->opcode) + (n << 5);
1971 if (sh != 0) {
1972 int l1, l2;
1973 TCGv t0;
1974 l1 = gen_new_label();
1975 l2 = gen_new_label();
1976 t0 = tcg_temp_local_new();
1977 tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rS(ctx->opcode)], 0, l1);
1978 tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1ULL << sh) - 1);
1979 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
1980 tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
1981 tcg_gen_br(l2);
1982 gen_set_label(l1);
1983 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
1984 gen_set_label(l2);
1985 tcg_temp_free(t0);
1986 tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
1987 } else {
1988 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1989 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
1990 }
1991 if (unlikely(Rc(ctx->opcode) != 0))
1992 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1993 }
1994
1995 static void gen_sradi0(DisasContext *ctx)
1996 {
1997 gen_sradi(ctx, 0);
1998 }
1999
2000 static void gen_sradi1(DisasContext *ctx)
2001 {
2002 gen_sradi(ctx, 1);
2003 }
2004
2005 /* srd & srd. */
2006 static void gen_srd(DisasContext *ctx)
2007 {
2008 TCGv t0, t1;
2009
2010 t0 = tcg_temp_new();
2011 /* AND rS with a mask that is 0 when rB >= 0x40 */
2012 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2013 tcg_gen_sari_tl(t0, t0, 0x3f);
2014 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2015 t1 = tcg_temp_new();
2016 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2017 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2018 tcg_temp_free(t1);
2019 tcg_temp_free(t0);
2020 if (unlikely(Rc(ctx->opcode) != 0))
2021 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2022 }
2023 #endif
2024
2025 /*** Floating-Point arithmetic ***/
2026 #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
2027 static void gen_f##name(DisasContext *ctx) \
2028 { \
2029 if (unlikely(!ctx->fpu_enabled)) { \
2030 gen_exception(ctx, POWERPC_EXCP_FPU); \
2031 return; \
2032 } \
2033 /* NIP cannot be restored if the memory exception comes from an helper */ \
2034 gen_update_nip(ctx, ctx->nip - 4); \
2035 gen_reset_fpstatus(); \
2036 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
2037 cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
2038 if (isfloat) { \
2039 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
2040 } \
2041 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], set_fprf, \
2042 Rc(ctx->opcode) != 0); \
2043 }
2044
2045 #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
2046 _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
2047 _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
2048
2049 #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2050 static void gen_f##name(DisasContext *ctx) \
2051 { \
2052 if (unlikely(!ctx->fpu_enabled)) { \
2053 gen_exception(ctx, POWERPC_EXCP_FPU); \
2054 return; \
2055 } \
2056 /* NIP cannot be restored if the memory exception comes from an helper */ \
2057 gen_update_nip(ctx, ctx->nip - 4); \
2058 gen_reset_fpstatus(); \
2059 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
2060 cpu_fpr[rB(ctx->opcode)]); \
2061 if (isfloat) { \
2062 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
2063 } \
2064 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2065 set_fprf, Rc(ctx->opcode) != 0); \
2066 }
2067 #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
2068 _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2069 _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2070
2071 #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2072 static void gen_f##name(DisasContext *ctx) \
2073 { \
2074 if (unlikely(!ctx->fpu_enabled)) { \
2075 gen_exception(ctx, POWERPC_EXCP_FPU); \
2076 return; \
2077 } \
2078 /* NIP cannot be restored if the memory exception comes from an helper */ \
2079 gen_update_nip(ctx, ctx->nip - 4); \
2080 gen_reset_fpstatus(); \
2081 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
2082 cpu_fpr[rC(ctx->opcode)]); \
2083 if (isfloat) { \
2084 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
2085 } \
2086 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2087 set_fprf, Rc(ctx->opcode) != 0); \
2088 }
2089 #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
2090 _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2091 _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2092
2093 #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
2094 static void gen_f##name(DisasContext *ctx) \
2095 { \
2096 if (unlikely(!ctx->fpu_enabled)) { \
2097 gen_exception(ctx, POWERPC_EXCP_FPU); \
2098 return; \
2099 } \
2100 /* NIP cannot be restored if the memory exception comes from an helper */ \
2101 gen_update_nip(ctx, ctx->nip - 4); \
2102 gen_reset_fpstatus(); \
2103 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
2104 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2105 set_fprf, Rc(ctx->opcode) != 0); \
2106 }
2107
2108 #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
2109 static void gen_f##name(DisasContext *ctx) \
2110 { \
2111 if (unlikely(!ctx->fpu_enabled)) { \
2112 gen_exception(ctx, POWERPC_EXCP_FPU); \
2113 return; \
2114 } \
2115 /* NIP cannot be restored if the memory exception comes from an helper */ \
2116 gen_update_nip(ctx, ctx->nip - 4); \
2117 gen_reset_fpstatus(); \
2118 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
2119 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2120 set_fprf, Rc(ctx->opcode) != 0); \
2121 }
2122
2123 /* fadd - fadds */
2124 GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT);
2125 /* fdiv - fdivs */
2126 GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT);
2127 /* fmul - fmuls */
2128 GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT);
2129
2130 /* fre */
2131 GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT);
2132
2133 /* fres */
2134 GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES);
2135
2136 /* frsqrte */
2137 GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE);
2138
2139 /* frsqrtes */
2140 static void gen_frsqrtes(DisasContext *ctx)
2141 {
2142 if (unlikely(!ctx->fpu_enabled)) {
2143 gen_exception(ctx, POWERPC_EXCP_FPU);
2144 return;
2145 }
2146 /* NIP cannot be restored if the memory exception comes from an helper */
2147 gen_update_nip(ctx, ctx->nip - 4);
2148 gen_reset_fpstatus();
2149 gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2150 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]);
2151 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
2152 }
2153
2154 /* fsel */
2155 _GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
2156 /* fsub - fsubs */
2157 GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
2158 /* Optional: */
2159
2160 /* fsqrt */
2161 static void gen_fsqrt(DisasContext *ctx)
2162 {
2163 if (unlikely(!ctx->fpu_enabled)) {
2164 gen_exception(ctx, POWERPC_EXCP_FPU);
2165 return;
2166 }
2167 /* NIP cannot be restored if the memory exception comes from an helper */
2168 gen_update_nip(ctx, ctx->nip - 4);
2169 gen_reset_fpstatus();
2170 gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2171 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
2172 }
2173
2174 static void gen_fsqrts(DisasContext *ctx)
2175 {
2176 if (unlikely(!ctx->fpu_enabled)) {
2177 gen_exception(ctx, POWERPC_EXCP_FPU);
2178 return;
2179 }
2180 /* NIP cannot be restored if the memory exception comes from an helper */
2181 gen_update_nip(ctx, ctx->nip - 4);
2182 gen_reset_fpstatus();
2183 gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2184 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]);
2185 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
2186 }
2187
2188 /*** Floating-Point multiply-and-add ***/
2189 /* fmadd - fmadds */
2190 GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
2191 /* fmsub - fmsubs */
2192 GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT);
2193 /* fnmadd - fnmadds */
2194 GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT);
2195 /* fnmsub - fnmsubs */
2196 GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT);
2197
2198 /*** Floating-Point round & convert ***/
2199 /* fctiw */
2200 GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT);
2201 /* fctiwz */
2202 GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT);
2203 /* frsp */
2204 GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT);
2205 #if defined(TARGET_PPC64)
2206 /* fcfid */
2207 GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC_64B);
2208 /* fctid */
2209 GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC_64B);
2210 /* fctidz */
2211 GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC_64B);
2212 #endif
2213
2214 /* frin */
2215 GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT);
2216 /* friz */
2217 GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT);
2218 /* frip */
2219 GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT);
2220 /* frim */
2221 GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT);
2222
2223 /*** Floating-Point compare ***/
2224
2225 /* fcmpo */
2226 static void gen_fcmpo(DisasContext *ctx)
2227 {
2228 TCGv_i32 crf;
2229 if (unlikely(!ctx->fpu_enabled)) {
2230 gen_exception(ctx, POWERPC_EXCP_FPU);
2231 return;
2232 }
2233 /* NIP cannot be restored if the memory exception comes from an helper */
2234 gen_update_nip(ctx, ctx->nip - 4);
2235 gen_reset_fpstatus();
2236 crf = tcg_const_i32(crfD(ctx->opcode));
2237 gen_helper_fcmpo(cpu_fpr[rA(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], crf);
2238 tcg_temp_free_i32(crf);
2239 gen_helper_float_check_status();
2240 }
2241
2242 /* fcmpu */
2243 static void gen_fcmpu(DisasContext *ctx)
2244 {
2245 TCGv_i32 crf;
2246 if (unlikely(!ctx->fpu_enabled)) {
2247 gen_exception(ctx, POWERPC_EXCP_FPU);
2248 return;
2249 }
2250 /* NIP cannot be restored if the memory exception comes from an helper */
2251 gen_update_nip(ctx, ctx->nip - 4);
2252 gen_reset_fpstatus();
2253 crf = tcg_const_i32(crfD(ctx->opcode));
2254 gen_helper_fcmpu(cpu_fpr[rA(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], crf);
2255 tcg_temp_free_i32(crf);
2256 gen_helper_float_check_status();
2257 }
2258
2259 /*** Floating-point move ***/
2260 /* fabs */
2261 /* XXX: beware that fabs never checks for NaNs nor update FPSCR */
2262 GEN_FLOAT_B(abs, 0x08, 0x08, 0, PPC_FLOAT);
2263
2264 /* fmr - fmr. */
2265 /* XXX: beware that fmr never checks for NaNs nor update FPSCR */
2266 static void gen_fmr(DisasContext *ctx)
2267 {
2268 if (unlikely(!ctx->fpu_enabled)) {
2269 gen_exception(ctx, POWERPC_EXCP_FPU);
2270 return;
2271 }
2272 tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2273 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2274 }
2275
2276 /* fnabs */
2277 /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
2278 GEN_FLOAT_B(nabs, 0x08, 0x04, 0, PPC_FLOAT);
2279 /* fneg */
2280 /* XXX: beware that fneg never checks for NaNs nor update FPSCR */
2281 GEN_FLOAT_B(neg, 0x08, 0x01, 0, PPC_FLOAT);
2282
2283 /*** Floating-Point status & ctrl register ***/
2284
2285 /* mcrfs */
2286 static void gen_mcrfs(DisasContext *ctx)
2287 {
2288 int bfa;
2289
2290 if (unlikely(!ctx->fpu_enabled)) {
2291 gen_exception(ctx, POWERPC_EXCP_FPU);
2292 return;
2293 }
2294 bfa = 4 * (7 - crfS(ctx->opcode));
2295 tcg_gen_shri_i32(cpu_crf[crfD(ctx->opcode)], cpu_fpscr, bfa);
2296 tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
2297 tcg_gen_andi_i32(cpu_fpscr, cpu_fpscr, ~(0xF << bfa));
2298 }
2299
2300 /* mffs */
2301 static void gen_mffs(DisasContext *ctx)
2302 {
2303 if (unlikely(!ctx->fpu_enabled)) {
2304 gen_exception(ctx, POWERPC_EXCP_FPU);
2305 return;
2306 }
2307 gen_reset_fpstatus();
2308 tcg_gen_extu_i32_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpscr);
2309 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2310 }
2311
2312 /* mtfsb0 */
2313 static void gen_mtfsb0(DisasContext *ctx)
2314 {
2315 uint8_t crb;
2316
2317 if (unlikely(!ctx->fpu_enabled)) {
2318 gen_exception(ctx, POWERPC_EXCP_FPU);
2319 return;
2320 }
2321 crb = 31 - crbD(ctx->opcode);
2322 gen_reset_fpstatus();
2323 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
2324 TCGv_i32 t0;
2325 /* NIP cannot be restored if the memory exception comes from an helper */
2326 gen_update_nip(ctx, ctx->nip - 4);
2327 t0 = tcg_const_i32(crb);
2328 gen_helper_fpscr_clrbit(t0);
2329 tcg_temp_free_i32(t0);
2330 }
2331 if (unlikely(Rc(ctx->opcode) != 0)) {
2332 tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
2333 }
2334 }
2335
2336 /* mtfsb1 */
2337 static void gen_mtfsb1(DisasContext *ctx)
2338 {
2339 uint8_t crb;
2340
2341 if (unlikely(!ctx->fpu_enabled)) {
2342 gen_exception(ctx, POWERPC_EXCP_FPU);
2343 return;
2344 }
2345 crb = 31 - crbD(ctx->opcode);
2346 gen_reset_fpstatus();
2347 /* XXX: we pretend we can only do IEEE floating-point computations */
2348 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
2349 TCGv_i32 t0;
2350 /* NIP cannot be restored if the memory exception comes from an helper */
2351 gen_update_nip(ctx, ctx->nip - 4);
2352 t0 = tcg_const_i32(crb);
2353 gen_helper_fpscr_setbit(t0);
2354 tcg_temp_free_i32(t0);
2355 }
2356 if (unlikely(Rc(ctx->opcode) != 0)) {
2357 tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
2358 }
2359 /* We can raise a differed exception */
2360 gen_helper_float_check_status();
2361 }
2362
2363 /* mtfsf */
2364 static void gen_mtfsf(DisasContext *ctx)
2365 {
2366 TCGv_i32 t0;
2367 int L = ctx->opcode & 0x02000000;
2368
2369 if (unlikely(!ctx->fpu_enabled)) {
2370 gen_exception(ctx, POWERPC_EXCP_FPU);
2371 return;
2372 }
2373 /* NIP cannot be restored if the memory exception comes from an helper */
2374 gen_update_nip(ctx, ctx->nip - 4);
2375 gen_reset_fpstatus();
2376 if (L)
2377 t0 = tcg_const_i32(0xff);
2378 else
2379 t0 = tcg_const_i32(FM(ctx->opcode));
2380 gen_helper_store_fpscr(cpu_fpr[rB(ctx->opcode)], t0);
2381 tcg_temp_free_i32(t0);
2382 if (unlikely(Rc(ctx->opcode) != 0)) {
2383 tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
2384 }
2385 /* We can raise a differed exception */
2386 gen_helper_float_check_status();
2387 }
2388
2389 /* mtfsfi */
2390 static void gen_mtfsfi(DisasContext *ctx)
2391 {
2392 int bf, sh;
2393 TCGv_i64 t0;
2394 TCGv_i32 t1;
2395
2396 if (unlikely(!ctx->fpu_enabled)) {
2397 gen_exception(ctx, POWERPC_EXCP_FPU);
2398 return;
2399 }
2400 bf = crbD(ctx->opcode) >> 2;
2401 sh = 7 - bf;
2402 /* NIP cannot be restored if the memory exception comes from an helper */
2403 gen_update_nip(ctx, ctx->nip - 4);
2404 gen_reset_fpstatus();
2405 t0 = tcg_const_i64(FPIMM(ctx->opcode) << (4 * sh));
2406 t1 = tcg_const_i32(1 << sh);
2407 gen_helper_store_fpscr(t0, t1);
2408 tcg_temp_free_i64(t0);
2409 tcg_temp_free_i32(t1);
2410 if (unlikely(Rc(ctx->opcode) != 0)) {
2411 tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
2412 }
2413 /* We can raise a differed exception */
2414 gen_helper_float_check_status();
2415 }
2416
2417 /*** Addressing modes ***/
2418 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2419 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
2420 target_long maskl)
2421 {
2422 target_long simm = SIMM(ctx->opcode);
2423
2424 simm &= ~maskl;
2425 if (rA(ctx->opcode) == 0) {
2426 #if defined(TARGET_PPC64)
2427 if (!ctx->sf_mode) {
2428 tcg_gen_movi_tl(EA, (uint32_t)simm);
2429 } else
2430 #endif
2431 tcg_gen_movi_tl(EA, simm);
2432 } else if (likely(simm != 0)) {
2433 tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
2434 #if defined(TARGET_PPC64)
2435 if (!ctx->sf_mode) {
2436 tcg_gen_ext32u_tl(EA, EA);
2437 }
2438 #endif
2439 } else {
2440 #if defined(TARGET_PPC64)
2441 if (!ctx->sf_mode) {
2442 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2443 } else
2444 #endif
2445 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2446 }
2447 }
2448
2449 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
2450 {
2451 if (rA(ctx->opcode) == 0) {
2452 #if defined(TARGET_PPC64)
2453 if (!ctx->sf_mode) {
2454 tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2455 } else
2456 #endif
2457 tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2458 } else {
2459 tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2460 #if defined(TARGET_PPC64)
2461 if (!ctx->sf_mode) {
2462 tcg_gen_ext32u_tl(EA, EA);
2463 }
2464 #endif
2465 }
2466 }
2467
2468 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
2469 {
2470 if (rA(ctx->opcode) == 0) {
2471 tcg_gen_movi_tl(EA, 0);
2472 } else {
2473 #if defined(TARGET_PPC64)
2474 if (!ctx->sf_mode) {
2475 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2476 } else
2477 #endif
2478 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2479 }
2480 }
2481
2482 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
2483 target_long val)
2484 {
2485 tcg_gen_addi_tl(ret, arg1, val);
2486 #if defined(TARGET_PPC64)
2487 if (!ctx->sf_mode) {
2488 tcg_gen_ext32u_tl(ret, ret);
2489 }
2490 #endif
2491 }
2492
2493 static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask)
2494 {
2495 int l1 = gen_new_label();
2496 TCGv t0 = tcg_temp_new();
2497 TCGv_i32 t1, t2;
2498 /* NIP cannot be restored if the memory exception comes from an helper */
2499 gen_update_nip(ctx, ctx->nip - 4);
2500 tcg_gen_andi_tl(t0, EA, mask);
2501 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
2502 t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
2503 t2 = tcg_const_i32(0);
2504 gen_helper_raise_exception_err(t1, t2);
2505 tcg_temp_free_i32(t1);
2506 tcg_temp_free_i32(t2);
2507 gen_set_label(l1);
2508 tcg_temp_free(t0);
2509 }
2510
2511 /*** Integer load ***/
2512 static inline void gen_qemu_ld8u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2513 {
2514 tcg_gen_qemu_ld8u(arg1, arg2, ctx->mem_idx);
2515 }
2516
2517 static inline void gen_qemu_ld8s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2518 {
2519 tcg_gen_qemu_ld8s(arg1, arg2, ctx->mem_idx);
2520 }
2521
2522 static inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2523 {
2524 tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
2525 if (unlikely(ctx->le_mode)) {
2526 tcg_gen_bswap16_tl(arg1, arg1);
2527 }
2528 }
2529
2530 static inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2531 {
2532 if (unlikely(ctx->le_mode)) {
2533 tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
2534 tcg_gen_bswap16_tl(arg1, arg1);
2535 tcg_gen_ext16s_tl(arg1, arg1);
2536 } else {
2537 tcg_gen_qemu_ld16s(arg1, arg2, ctx->mem_idx);
2538 }
2539 }
2540
2541 static inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2542 {
2543 tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
2544 if (unlikely(ctx->le_mode)) {
2545 tcg_gen_bswap32_tl(arg1, arg1);
2546 }
2547 }
2548
2549 #if defined(TARGET_PPC64)
2550 static inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2551 {
2552 if (unlikely(ctx->le_mode)) {
2553 tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
2554 tcg_gen_bswap32_tl(arg1, arg1);
2555 tcg_gen_ext32s_tl(arg1, arg1);
2556 } else
2557 tcg_gen_qemu_ld32s(arg1, arg2, ctx->mem_idx);
2558 }
2559 #endif
2560
2561 static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
2562 {
2563 tcg_gen_qemu_ld64(arg1, arg2, ctx->mem_idx);
2564 if (unlikely(ctx->le_mode)) {
2565 tcg_gen_bswap64_i64(arg1, arg1);
2566 }
2567 }
2568
2569 static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
2570 {
2571 tcg_gen_qemu_st8(arg1, arg2, ctx->mem_idx);
2572 }
2573
2574 static inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2)
2575 {
2576 if (unlikely(ctx->le_mode)) {
2577 TCGv t0 = tcg_temp_new();
2578 tcg_gen_ext16u_tl(t0, arg1);
2579 tcg_gen_bswap16_tl(t0, t0);
2580 tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx);
2581 tcg_temp_free(t0);
2582 } else {
2583 tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx);
2584 }
2585 }
2586
2587 static inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2)
2588 {
2589 if (unlikely(ctx->le_mode)) {
2590 TCGv t0 = tcg_temp_new();
2591 tcg_gen_ext32u_tl(t0, arg1);
2592 tcg_gen_bswap32_tl(t0, t0);
2593 tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx);
2594 tcg_temp_free(t0);
2595 } else {
2596 tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx);
2597 }
2598 }
2599
2600 static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
2601 {
2602 if (unlikely(ctx->le_mode)) {
2603 TCGv_i64 t0 = tcg_temp_new_i64();
2604 tcg_gen_bswap64_i64(t0, arg1);
2605 tcg_gen_qemu_st64(t0, arg2, ctx->mem_idx);
2606 tcg_temp_free_i64(t0);
2607 } else
2608 tcg_gen_qemu_st64(arg1, arg2, ctx->mem_idx);
2609 }
2610
2611 #define GEN_LD(name, ldop, opc, type) \
2612 static void glue(gen_, name)(DisasContext *ctx) \
2613 { \
2614 TCGv EA; \
2615 gen_set_access_type(ctx, ACCESS_INT); \
2616 EA = tcg_temp_new(); \
2617 gen_addr_imm_index(ctx, EA, 0); \
2618 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2619 tcg_temp_free(EA); \
2620 }
2621
2622 #define GEN_LDU(name, ldop, opc, type) \
2623 static void glue(gen_, name##u)(DisasContext *ctx) \
2624 { \
2625 TCGv EA; \
2626 if (unlikely(rA(ctx->opcode) == 0 || \
2627 rA(ctx->opcode) == rD(ctx->opcode))) { \
2628 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2629 return; \
2630 } \
2631 gen_set_access_type(ctx, ACCESS_INT); \
2632 EA = tcg_temp_new(); \
2633 if (type == PPC_64B) \
2634 gen_addr_imm_index(ctx, EA, 0x03); \
2635 else \
2636 gen_addr_imm_index(ctx, EA, 0); \
2637 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2638 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2639 tcg_temp_free(EA); \
2640 }
2641
2642 #define GEN_LDUX(name, ldop, opc2, opc3, type) \
2643 static void glue(gen_, name##ux)(DisasContext *ctx) \
2644 { \
2645 TCGv EA; \
2646 if (unlikely(rA(ctx->opcode) == 0 || \
2647 rA(ctx->opcode) == rD(ctx->opcode))) { \
2648 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2649 return; \
2650 } \
2651 gen_set_access_type(ctx, ACCESS_INT); \
2652 EA = tcg_temp_new(); \
2653 gen_addr_reg_index(ctx, EA); \
2654 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2655 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2656 tcg_temp_free(EA); \
2657 }
2658
2659 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2660 static void glue(gen_, name##x)(DisasContext *ctx) \
2661 { \
2662 TCGv EA; \
2663 gen_set_access_type(ctx, ACCESS_INT); \
2664 EA = tcg_temp_new(); \
2665 gen_addr_reg_index(ctx, EA); \
2666 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2667 tcg_temp_free(EA); \
2668 }
2669
2670 #define GEN_LDS(name, ldop, op, type) \
2671 GEN_LD(name, ldop, op | 0x20, type); \
2672 GEN_LDU(name, ldop, op | 0x21, type); \
2673 GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
2674 GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2675
2676 /* lbz lbzu lbzux lbzx */
2677 GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER);
2678 /* lha lhau lhaux lhax */
2679 GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER);
2680 /* lhz lhzu lhzux lhzx */
2681 GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER);
2682 /* lwz lwzu lwzux lwzx */
2683 GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER);
2684 #if defined(TARGET_PPC64)
2685 /* lwaux */
2686 GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
2687 /* lwax */
2688 GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
2689 /* ldux */
2690 GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B);
2691 /* ldx */
2692 GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B);
2693
2694 static void gen_ld(DisasContext *ctx)
2695 {
2696 TCGv EA;
2697 if (Rc(ctx->opcode)) {
2698 if (unlikely(rA(ctx->opcode) == 0 ||
2699 rA(ctx->opcode) == rD(ctx->opcode))) {
2700 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2701 return;
2702 }
2703 }
2704 gen_set_access_type(ctx, ACCESS_INT);
2705 EA = tcg_temp_new();
2706 gen_addr_imm_index(ctx, EA, 0x03);
2707 if (ctx->opcode & 0x02) {
2708 /* lwa (lwau is undefined) */
2709 gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2710 } else {
2711 /* ld - ldu */
2712 gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2713 }
2714 if (Rc(ctx->opcode))
2715 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2716 tcg_temp_free(EA);
2717 }
2718
2719 /* lq */
2720 static void gen_lq(DisasContext *ctx)
2721 {
2722 #if defined(CONFIG_USER_ONLY)
2723 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2724 #else
2725 int ra, rd;
2726 TCGv EA;
2727
2728 /* Restore CPU state */
2729 if (unlikely(ctx->mem_idx == 0)) {
2730 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2731 return;
2732 }
2733 ra = rA(ctx->opcode);
2734 rd = rD(ctx->opcode);
2735 if (unlikely((rd & 1) || rd == ra)) {
2736 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2737 return;
2738 }
2739 if (unlikely(ctx->le_mode)) {
2740 /* Little-endian mode is not handled */
2741 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
2742 return;
2743 }
2744 gen_set_access_type(ctx, ACCESS_INT);
2745 EA = tcg_temp_new();
2746 gen_addr_imm_index(ctx, EA, 0x0F);
2747 gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
2748 gen_addr_add(ctx, EA, EA, 8);
2749 gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
2750 tcg_temp_free(EA);
2751 #endif
2752 }
2753 #endif
2754
2755 /*** Integer store ***/
2756 #define GEN_ST(name, stop, opc, type) \
2757 static void glue(gen_, name)(DisasContext *ctx) \
2758 { \
2759 TCGv EA; \
2760 gen_set_access_type(ctx, ACCESS_INT); \
2761 EA = tcg_temp_new(); \
2762 gen_addr_imm_index(ctx, EA, 0); \
2763 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2764 tcg_temp_free(EA); \
2765 }
2766
2767 #define GEN_STU(name, stop, opc, type) \
2768 static void glue(gen_, stop##u)(DisasContext *ctx) \
2769 { \
2770 TCGv EA; \
2771 if (unlikely(rA(ctx->opcode) == 0)) { \
2772 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2773 return; \
2774 } \
2775 gen_set_access_type(ctx, ACCESS_INT); \
2776 EA = tcg_temp_new(); \
2777 if (type == PPC_64B) \
2778 gen_addr_imm_index(ctx, EA, 0x03); \
2779 else \
2780 gen_addr_imm_index(ctx, EA, 0); \
2781 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2782 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2783 tcg_temp_free(EA); \
2784 }
2785
2786 #define GEN_STUX(name, stop, opc2, opc3, type) \
2787 static void glue(gen_, name##ux)(DisasContext *ctx) \
2788 { \
2789 TCGv EA; \
2790 if (unlikely(rA(ctx->opcode) == 0)) { \
2791 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2792 return; \
2793 } \
2794 gen_set_access_type(ctx, ACCESS_INT); \
2795 EA = tcg_temp_new(); \
2796 gen_addr_reg_index(ctx, EA); \
2797 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2798 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2799 tcg_temp_free(EA); \
2800 }
2801
2802 #define GEN_STX(name, stop, opc2, opc3, type) \
2803 static void glue(gen_, name##x)(DisasContext *ctx) \
2804 { \
2805 TCGv EA; \
2806 gen_set_access_type(ctx, ACCESS_INT); \
2807 EA = tcg_temp_new(); \
2808 gen_addr_reg_index(ctx, EA); \
2809 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2810 tcg_temp_free(EA); \
2811 }
2812
2813 #define GEN_STS(name, stop, op, type) \
2814 GEN_ST(name, stop, op | 0x20, type); \
2815 GEN_STU(name, stop, op | 0x21, type); \
2816 GEN_STUX(name, stop, 0x17, op | 0x01, type); \
2817 GEN_STX(name, stop, 0x17, op | 0x00, type)
2818
2819 /* stb stbu stbux stbx */
2820 GEN_STS(stb, st8, 0x06, PPC_INTEGER);
2821 /* sth sthu sthux sthx */
2822 GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
2823 /* stw stwu stwux stwx */
2824 GEN_STS(stw, st32, 0x04, PPC_INTEGER);
2825 #if defined(TARGET_PPC64)
2826 GEN_STUX(std, st64, 0x15, 0x05, PPC_64B);
2827 GEN_STX(std, st64, 0x15, 0x04, PPC_64B);
2828
2829 static void gen_std(DisasContext *ctx)
2830 {
2831 int rs;
2832 TCGv EA;
2833
2834 rs = rS(ctx->opcode);
2835 if ((ctx->opcode & 0x3) == 0x2) {
2836 #if defined(CONFIG_USER_ONLY)
2837 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2838 #else
2839 /* stq */
2840 if (unlikely(ctx->mem_idx == 0)) {
2841 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2842 return;
2843 }
2844 if (unlikely(rs & 1)) {
2845 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2846 return;
2847 }
2848 if (unlikely(ctx->le_mode)) {
2849 /* Little-endian mode is not handled */
2850 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
2851 return;
2852 }
2853 gen_set_access_type(ctx, ACCESS_INT);
2854 EA = tcg_temp_new();
2855 gen_addr_imm_index(ctx, EA, 0x03);
2856 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
2857 gen_addr_add(ctx, EA, EA, 8);
2858 gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
2859 tcg_temp_free(EA);
2860 #endif
2861 } else {
2862 /* std / stdu */
2863 if (Rc(ctx->opcode)) {
2864 if (unlikely(rA(ctx->opcode) == 0)) {
2865 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2866 return;
2867 }
2868 }
2869 gen_set_access_type(ctx, ACCESS_INT);
2870 EA = tcg_temp_new();
2871 gen_addr_imm_index(ctx, EA, 0x03);
2872 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
2873 if (Rc(ctx->opcode))
2874 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2875 tcg_temp_free(EA);
2876 }
2877 }
2878 #endif
2879 /*** Integer load and store with byte reverse ***/
2880 /* lhbrx */
2881 static inline void gen_qemu_ld16ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
2882 {
2883 tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
2884 if (likely(!ctx->le_mode)) {
2885 tcg_gen_bswap16_tl(arg1, arg1);
2886 }
2887 }
2888 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
2889
2890 /* lwbrx */
2891 static inline void gen_qemu_ld32ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
2892 {
2893 tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
2894 if (likely(!ctx->le_mode)) {
2895 tcg_gen_bswap32_tl(arg1, arg1);
2896 }
2897 }
2898 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
2899
2900 /* sthbrx */
2901 static inline void gen_qemu_st16r(DisasContext *ctx, TCGv arg1, TCGv arg2)
2902 {
2903 if (likely(!ctx->le_mode)) {
2904 TCGv t0 = tcg_temp_new();
2905 tcg_gen_ext16u_tl(t0, arg1);
2906 tcg_gen_bswap16_tl(t0, t0);
2907 tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx);
2908 tcg_temp_free(t0);
2909 } else {
2910 tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx);
2911 }
2912 }
2913 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
2914
2915 /* stwbrx */
2916 static inline void gen_qemu_st32r(DisasContext *ctx, TCGv arg1, TCGv arg2)
2917 {
2918 if (likely(!ctx->le_mode)) {
2919 TCGv t0 = tcg_temp_new();
2920 tcg_gen_ext32u_tl(t0, arg1);
2921 tcg_gen_bswap32_tl(t0, t0);
2922 tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx);
2923 tcg_temp_free(t0);
2924 } else {
2925 tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx);
2926 }
2927 }
2928 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
2929
2930 /*** Integer load and store multiple ***/
2931
2932 /* lmw */
2933 static void gen_lmw(DisasContext *ctx)
2934 {
2935 TCGv t0;
2936 TCGv_i32 t1;
2937 gen_set_access_type(ctx, ACCESS_INT);
2938 /* NIP cannot be restored if the memory exception comes from an helper */
2939 gen_update_nip(ctx, ctx->nip - 4);
2940 t0 = tcg_temp_new();
2941 t1 = tcg_const_i32(rD(ctx->opcode));
2942 gen_addr_imm_index(ctx, t0, 0);
2943 gen_helper_lmw(t0, t1);
2944 tcg_temp_free(t0);
2945 tcg_temp_free_i32(t1);
2946 }
2947
2948 /* stmw */
2949 static void gen_stmw(DisasContext *ctx)
2950 {
2951 TCGv t0;
2952 TCGv_i32 t1;
2953 gen_set_access_type(ctx, ACCESS_INT);
2954 /* NIP cannot be restored if the memory exception comes from an helper */
2955 gen_update_nip(ctx, ctx->nip - 4);
2956 t0 = tcg_temp_new();
2957 t1 = tcg_const_i32(rS(ctx->opcode));
2958 gen_addr_imm_index(ctx, t0, 0);
2959 gen_helper_stmw(t0, t1);
2960 tcg_temp_free(t0);
2961 tcg_temp_free_i32(t1);
2962 }
2963
2964 /*** Integer load and store strings ***/
2965
2966 /* lswi */
2967 /* PowerPC32 specification says we must generate an exception if
2968 * rA is in the range of registers to be loaded.
2969 * In an other hand, IBM says this is valid, but rA won't be loaded.
2970 * For now, I'll follow the spec...
2971 */
2972 static void gen_lswi(DisasContext *ctx)
2973 {
2974 TCGv t0;
2975 TCGv_i32 t1, t2;
2976 int nb = NB(ctx->opcode);
2977 int start = rD(ctx->opcode);
2978 int ra = rA(ctx->opcode);
2979 int nr;
2980
2981 if (nb == 0)
2982 nb = 32;
2983 nr = nb / 4;
2984 if (unlikely(((start + nr) > 32 &&
2985 start <= ra && (start + nr - 32) > ra) ||
2986 ((start + nr) <= 32 && start <= ra && (start + nr) > ra))) {
2987 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
2988 return;
2989 }
2990 gen_set_access_type(ctx, ACCESS_INT);
2991 /* NIP cannot be restored if the memory exception comes from an helper */
2992 gen_update_nip(ctx, ctx->nip - 4);
2993 t0 = tcg_temp_new();
2994 gen_addr_register(ctx, t0);
2995 t1 = tcg_const_i32(nb);
2996 t2 = tcg_const_i32(start);
2997 gen_helper_lsw(t0, t1, t2);
2998 tcg_temp_free(t0);
2999 tcg_temp_free_i32(t1);
3000 tcg_temp_free_i32(t2);
3001 }
3002
3003 /* lswx */
3004 static void gen_lswx(DisasContext *ctx)
3005 {
3006 TCGv t0;
3007 TCGv_i32 t1, t2, t3;
3008 gen_set_access_type(ctx, ACCESS_INT);
3009 /* NIP cannot be restored if the memory exception comes from an helper */
3010 gen_update_nip(ctx, ctx->nip - 4);
3011 t0 = tcg_temp_new();
3012 gen_addr_reg_index(ctx, t0);
3013 t1 = tcg_const_i32(rD(ctx->opcode));
3014 t2 = tcg_const_i32(rA(ctx->opcode));
3015 t3 = tcg_const_i32(rB(ctx->opcode));
3016 gen_helper_lswx(t0, t1, t2, t3);
3017 tcg_temp_free(t0);
3018 tcg_temp_free_i32(t1);
3019 tcg_temp_free_i32(t2);
3020 tcg_temp_free_i32(t3);
3021 }
3022
3023 /* stswi */
3024 static void gen_stswi(DisasContext *ctx)
3025 {
3026 TCGv t0;
3027 TCGv_i32 t1, t2;
3028 int nb = NB(ctx->opcode);
3029 gen_set_access_type(ctx, ACCESS_INT);
3030 /* NIP cannot be restored if the memory exception comes from an helper */
3031 gen_update_nip(ctx, ctx->nip - 4);
3032 t0 = tcg_temp_new();
3033 gen_addr_register(ctx, t0);
3034 if (nb == 0)
3035 nb = 32;
3036 t1 = tcg_const_i32(nb);
3037 t2 = tcg_const_i32(rS(ctx->opcode));
3038 gen_helper_stsw(t0, t1, t2);
3039 tcg_temp_free(t0);
3040 tcg_temp_free_i32(t1);
3041 tcg_temp_free_i32(t2);
3042 }
3043
3044 /* stswx */
3045 static void gen_stswx(DisasContext *ctx)
3046 {
3047 TCGv t0;
3048 TCGv_i32 t1, t2;
3049 gen_set_access_type(ctx, ACCESS_INT);
3050 /* NIP cannot be restored if the memory exception comes from an helper */
3051 gen_update_nip(ctx, ctx->nip - 4);
3052 t0 = tcg_temp_new();
3053 gen_addr_reg_index(ctx, t0);
3054 t1 = tcg_temp_new_i32();
3055 tcg_gen_trunc_tl_i32(t1, cpu_xer);
3056 tcg_gen_andi_i32(t1, t1, 0x7F);
3057 t2 = tcg_const_i32(rS(ctx->opcode));
3058 gen_helper_stsw(t0, t1, t2);
3059 tcg_temp_free(t0);
3060 tcg_temp_free_i32(t1);
3061 tcg_temp_free_i32(t2);
3062 }
3063
3064 /*** Memory synchronisation ***/
3065 /* eieio */
3066 static void gen_eieio(DisasContext *ctx)
3067 {
3068 }
3069
3070 /* isync */
3071 static void gen_isync(DisasContext *ctx)
3072 {
3073 gen_stop_exception(ctx);
3074 }
3075
3076 /* lwarx */
3077 static void gen_lwarx(DisasContext *ctx)
3078 {
3079 TCGv t0;
3080 TCGv gpr = cpu_gpr[rD(ctx->opcode)];
3081 gen_set_access_type(ctx, ACCESS_RES);
3082 t0 = tcg_temp_local_new();
3083 gen_addr_reg_index(ctx, t0);
3084 gen_check_align(ctx, t0, 0x03);
3085 gen_qemu_ld32u(ctx, gpr, t0);
3086 tcg_gen_mov_tl(cpu_reserve, t0);
3087 tcg_gen_st_tl(gpr, cpu_env, offsetof(CPUState, reserve_val));
3088 tcg_temp_free(t0);
3089 }
3090
3091 #if defined(CONFIG_USER_ONLY)
3092 static void gen_conditional_store (DisasContext *ctx, TCGv EA,
3093 int reg, int size)
3094 {
3095 TCGv t0 = tcg_temp_new();
3096 uint32_t save_exception = ctx->exception;
3097
3098 tcg_gen_st_tl(EA, cpu_env, offsetof(CPUState, reserve_ea));
3099 tcg_gen_movi_tl(t0, (size << 5) | reg);
3100 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, reserve_info));
3101 tcg_temp_free(t0);
3102 gen_update_nip(ctx, ctx->nip-4);
3103 ctx->exception = POWERPC_EXCP_BRANCH;
3104 gen_exception(ctx, POWERPC_EXCP_STCX);
3105 ctx->exception = save_exception;
3106 }
3107 #endif
3108
3109 /* stwcx. */
3110 static void gen_stwcx_(DisasContext *ctx)
3111 {
3112 TCGv t0;
3113 gen_set_access_type(ctx, ACCESS_RES);
3114 t0 = tcg_temp_local_new();
3115 gen_addr_reg_index(ctx, t0);
3116 gen_check_align(ctx, t0, 0x03);
3117 #if defined(CONFIG_USER_ONLY)
3118 gen_conditional_store(ctx, t0, rS(ctx->opcode), 4);
3119 #else
3120 {
3121 int l1;
3122
3123 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_xer);
3124 tcg_gen_shri_i32(cpu_crf[0], cpu_crf[0], XER_SO);
3125 tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 1);
3126 l1 = gen_new_label();