PPC: Add another 64 bits to instruction feature mask
[qemu.git] / target-ppc / translate.c
1 /*
2 * PowerPC emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "disas.h"
29 #include "tcg-op.h"
30 #include "qemu-common.h"
31 #include "host-utils.h"
32
33 #include "helper.h"
34 #define GEN_HELPER 1
35 #include "helper.h"
36
37 #define CPU_SINGLE_STEP 0x1
38 #define CPU_BRANCH_STEP 0x2
39 #define GDBSTUB_SINGLE_STEP 0x4
40
41 /* Include definitions for instructions classes and implementations flags */
42 //#define PPC_DEBUG_DISAS
43 //#define DO_PPC_STATISTICS
44
45 #ifdef PPC_DEBUG_DISAS
46 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
47 #else
48 # define LOG_DISAS(...) do { } while (0)
49 #endif
50 /*****************************************************************************/
51 /* Code translation helpers */
52
53 /* global register indexes */
54 static TCGv_ptr cpu_env;
55 static char cpu_reg_names[10*3 + 22*4 /* GPR */
56 #if !defined(TARGET_PPC64)
57 + 10*4 + 22*5 /* SPE GPRh */
58 #endif
59 + 10*4 + 22*5 /* FPR */
60 + 2*(10*6 + 22*7) /* AVRh, AVRl */
61 + 8*5 /* CRF */];
62 static TCGv cpu_gpr[32];
63 #if !defined(TARGET_PPC64)
64 static TCGv cpu_gprh[32];
65 #endif
66 static TCGv_i64 cpu_fpr[32];
67 static TCGv_i64 cpu_avrh[32], cpu_avrl[32];
68 static TCGv_i32 cpu_crf[8];
69 static TCGv cpu_nip;
70 static TCGv cpu_msr;
71 static TCGv cpu_ctr;
72 static TCGv cpu_lr;
73 static TCGv cpu_xer;
74 static TCGv cpu_reserve;
75 static TCGv_i32 cpu_fpscr;
76 static TCGv_i32 cpu_access_type;
77
78 #include "gen-icount.h"
79
80 void ppc_translate_init(void)
81 {
82 int i;
83 char* p;
84 size_t cpu_reg_names_size;
85 static int done_init = 0;
86
87 if (done_init)
88 return;
89
90 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
91
92 p = cpu_reg_names;
93 cpu_reg_names_size = sizeof(cpu_reg_names);
94
95 for (i = 0; i < 8; i++) {
96 snprintf(p, cpu_reg_names_size, "crf%d", i);
97 cpu_crf[i] = tcg_global_mem_new_i32(TCG_AREG0,
98 offsetof(CPUState, crf[i]), p);
99 p += 5;
100 cpu_reg_names_size -= 5;
101 }
102
103 for (i = 0; i < 32; i++) {
104 snprintf(p, cpu_reg_names_size, "r%d", i);
105 cpu_gpr[i] = tcg_global_mem_new(TCG_AREG0,
106 offsetof(CPUState, gpr[i]), p);
107 p += (i < 10) ? 3 : 4;
108 cpu_reg_names_size -= (i < 10) ? 3 : 4;
109 #if !defined(TARGET_PPC64)
110 snprintf(p, cpu_reg_names_size, "r%dH", i);
111 cpu_gprh[i] = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUState, gprh[i]), p);
113 p += (i < 10) ? 4 : 5;
114 cpu_reg_names_size -= (i < 10) ? 4 : 5;
115 #endif
116
117 snprintf(p, cpu_reg_names_size, "fp%d", i);
118 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
119 offsetof(CPUState, fpr[i]), p);
120 p += (i < 10) ? 4 : 5;
121 cpu_reg_names_size -= (i < 10) ? 4 : 5;
122
123 snprintf(p, cpu_reg_names_size, "avr%dH", i);
124 #ifdef HOST_WORDS_BIGENDIAN
125 cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
126 offsetof(CPUState, avr[i].u64[0]), p);
127 #else
128 cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
129 offsetof(CPUState, avr[i].u64[1]), p);
130 #endif
131 p += (i < 10) ? 6 : 7;
132 cpu_reg_names_size -= (i < 10) ? 6 : 7;
133
134 snprintf(p, cpu_reg_names_size, "avr%dL", i);
135 #ifdef HOST_WORDS_BIGENDIAN
136 cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
137 offsetof(CPUState, avr[i].u64[1]), p);
138 #else
139 cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
140 offsetof(CPUState, avr[i].u64[0]), p);
141 #endif
142 p += (i < 10) ? 6 : 7;
143 cpu_reg_names_size -= (i < 10) ? 6 : 7;
144 }
145
146 cpu_nip = tcg_global_mem_new(TCG_AREG0,
147 offsetof(CPUState, nip), "nip");
148
149 cpu_msr = tcg_global_mem_new(TCG_AREG0,
150 offsetof(CPUState, msr), "msr");
151
152 cpu_ctr = tcg_global_mem_new(TCG_AREG0,
153 offsetof(CPUState, ctr), "ctr");
154
155 cpu_lr = tcg_global_mem_new(TCG_AREG0,
156 offsetof(CPUState, lr), "lr");
157
158 cpu_xer = tcg_global_mem_new(TCG_AREG0,
159 offsetof(CPUState, xer), "xer");
160
161 cpu_reserve = tcg_global_mem_new(TCG_AREG0,
162 offsetof(CPUState, reserve_addr),
163 "reserve_addr");
164
165 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
166 offsetof(CPUState, fpscr), "fpscr");
167
168 cpu_access_type = tcg_global_mem_new_i32(TCG_AREG0,
169 offsetof(CPUState, access_type), "access_type");
170
171 /* register helpers */
172 #define GEN_HELPER 2
173 #include "helper.h"
174
175 done_init = 1;
176 }
177
178 /* internal defines */
179 typedef struct DisasContext {
180 struct TranslationBlock *tb;
181 target_ulong nip;
182 uint32_t opcode;
183 uint32_t exception;
184 /* Routine used to access memory */
185 int mem_idx;
186 int access_type;
187 /* Translation flags */
188 int le_mode;
189 #if defined(TARGET_PPC64)
190 int sf_mode;
191 #endif
192 int fpu_enabled;
193 int altivec_enabled;
194 int spe_enabled;
195 ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
196 int singlestep_enabled;
197 } DisasContext;
198
199 struct opc_handler_t {
200 /* invalid bits */
201 uint32_t inval;
202 /* instruction type */
203 uint64_t type;
204 /* extended instruction type */
205 uint64_t type2;
206 /* handler */
207 void (*handler)(DisasContext *ctx);
208 #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
209 const char *oname;
210 #endif
211 #if defined(DO_PPC_STATISTICS)
212 uint64_t count;
213 #endif
214 };
215
216 static inline void gen_reset_fpstatus(void)
217 {
218 #ifdef CONFIG_SOFTFLOAT
219 gen_helper_reset_fpstatus();
220 #endif
221 }
222
223 static inline void gen_compute_fprf(TCGv_i64 arg, int set_fprf, int set_rc)
224 {
225 TCGv_i32 t0 = tcg_temp_new_i32();
226
227 if (set_fprf != 0) {
228 /* This case might be optimized later */
229 tcg_gen_movi_i32(t0, 1);
230 gen_helper_compute_fprf(t0, arg, t0);
231 if (unlikely(set_rc)) {
232 tcg_gen_mov_i32(cpu_crf[1], t0);
233 }
234 gen_helper_float_check_status();
235 } else if (unlikely(set_rc)) {
236 /* We always need to compute fpcc */
237 tcg_gen_movi_i32(t0, 0);
238 gen_helper_compute_fprf(t0, arg, t0);
239 tcg_gen_mov_i32(cpu_crf[1], t0);
240 }
241
242 tcg_temp_free_i32(t0);
243 }
244
245 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
246 {
247 if (ctx->access_type != access_type) {
248 tcg_gen_movi_i32(cpu_access_type, access_type);
249 ctx->access_type = access_type;
250 }
251 }
252
253 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
254 {
255 #if defined(TARGET_PPC64)
256 if (ctx->sf_mode)
257 tcg_gen_movi_tl(cpu_nip, nip);
258 else
259 #endif
260 tcg_gen_movi_tl(cpu_nip, (uint32_t)nip);
261 }
262
263 static inline void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
264 {
265 TCGv_i32 t0, t1;
266 if (ctx->exception == POWERPC_EXCP_NONE) {
267 gen_update_nip(ctx, ctx->nip);
268 }
269 t0 = tcg_const_i32(excp);
270 t1 = tcg_const_i32(error);
271 gen_helper_raise_exception_err(t0, t1);
272 tcg_temp_free_i32(t0);
273 tcg_temp_free_i32(t1);
274 ctx->exception = (excp);
275 }
276
277 static inline void gen_exception(DisasContext *ctx, uint32_t excp)
278 {
279 TCGv_i32 t0;
280 if (ctx->exception == POWERPC_EXCP_NONE) {
281 gen_update_nip(ctx, ctx->nip);
282 }
283 t0 = tcg_const_i32(excp);
284 gen_helper_raise_exception(t0);
285 tcg_temp_free_i32(t0);
286 ctx->exception = (excp);
287 }
288
289 static inline void gen_debug_exception(DisasContext *ctx)
290 {
291 TCGv_i32 t0;
292
293 if (ctx->exception != POWERPC_EXCP_BRANCH)
294 gen_update_nip(ctx, ctx->nip);
295 t0 = tcg_const_i32(EXCP_DEBUG);
296 gen_helper_raise_exception(t0);
297 tcg_temp_free_i32(t0);
298 }
299
300 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
301 {
302 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | error);
303 }
304
305 /* Stop translation */
306 static inline void gen_stop_exception(DisasContext *ctx)
307 {
308 gen_update_nip(ctx, ctx->nip);
309 ctx->exception = POWERPC_EXCP_STOP;
310 }
311
312 /* No need to update nip here, as execution flow will change */
313 static inline void gen_sync_exception(DisasContext *ctx)
314 {
315 ctx->exception = POWERPC_EXCP_SYNC;
316 }
317
318 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
319 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
320
321 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
322 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
323
324 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
325 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
326
327 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
328 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
329
330 typedef struct opcode_t {
331 unsigned char opc1, opc2, opc3;
332 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
333 unsigned char pad[5];
334 #else
335 unsigned char pad[1];
336 #endif
337 opc_handler_t handler;
338 const char *oname;
339 } opcode_t;
340
341 /*****************************************************************************/
342 /*** Instruction decoding ***/
343 #define EXTRACT_HELPER(name, shift, nb) \
344 static inline uint32_t name(uint32_t opcode) \
345 { \
346 return (opcode >> (shift)) & ((1 << (nb)) - 1); \
347 }
348
349 #define EXTRACT_SHELPER(name, shift, nb) \
350 static inline int32_t name(uint32_t opcode) \
351 { \
352 return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
353 }
354
355 /* Opcode part 1 */
356 EXTRACT_HELPER(opc1, 26, 6);
357 /* Opcode part 2 */
358 EXTRACT_HELPER(opc2, 1, 5);
359 /* Opcode part 3 */
360 EXTRACT_HELPER(opc3, 6, 5);
361 /* Update Cr0 flags */
362 EXTRACT_HELPER(Rc, 0, 1);
363 /* Destination */
364 EXTRACT_HELPER(rD, 21, 5);
365 /* Source */
366 EXTRACT_HELPER(rS, 21, 5);
367 /* First operand */
368 EXTRACT_HELPER(rA, 16, 5);
369 /* Second operand */
370 EXTRACT_HELPER(rB, 11, 5);
371 /* Third operand */
372 EXTRACT_HELPER(rC, 6, 5);
373 /*** Get CRn ***/
374 EXTRACT_HELPER(crfD, 23, 3);
375 EXTRACT_HELPER(crfS, 18, 3);
376 EXTRACT_HELPER(crbD, 21, 5);
377 EXTRACT_HELPER(crbA, 16, 5);
378 EXTRACT_HELPER(crbB, 11, 5);
379 /* SPR / TBL */
380 EXTRACT_HELPER(_SPR, 11, 10);
381 static inline uint32_t SPR(uint32_t opcode)
382 {
383 uint32_t sprn = _SPR(opcode);
384
385 return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
386 }
387 /*** Get constants ***/
388 EXTRACT_HELPER(IMM, 12, 8);
389 /* 16 bits signed immediate value */
390 EXTRACT_SHELPER(SIMM, 0, 16);
391 /* 16 bits unsigned immediate value */
392 EXTRACT_HELPER(UIMM, 0, 16);
393 /* 5 bits signed immediate value */
394 EXTRACT_HELPER(SIMM5, 16, 5);
395 /* 5 bits signed immediate value */
396 EXTRACT_HELPER(UIMM5, 16, 5);
397 /* Bit count */
398 EXTRACT_HELPER(NB, 11, 5);
399 /* Shift count */
400 EXTRACT_HELPER(SH, 11, 5);
401 /* Vector shift count */
402 EXTRACT_HELPER(VSH, 6, 4);
403 /* Mask start */
404 EXTRACT_HELPER(MB, 6, 5);
405 /* Mask end */
406 EXTRACT_HELPER(ME, 1, 5);
407 /* Trap operand */
408 EXTRACT_HELPER(TO, 21, 5);
409
410 EXTRACT_HELPER(CRM, 12, 8);
411 EXTRACT_HELPER(FM, 17, 8);
412 EXTRACT_HELPER(SR, 16, 4);
413 EXTRACT_HELPER(FPIMM, 12, 4);
414
415 /*** Jump target decoding ***/
416 /* Displacement */
417 EXTRACT_SHELPER(d, 0, 16);
418 /* Immediate address */
419 static inline target_ulong LI(uint32_t opcode)
420 {
421 return (opcode >> 0) & 0x03FFFFFC;
422 }
423
424 static inline uint32_t BD(uint32_t opcode)
425 {
426 return (opcode >> 0) & 0xFFFC;
427 }
428
429 EXTRACT_HELPER(BO, 21, 5);
430 EXTRACT_HELPER(BI, 16, 5);
431 /* Absolute/relative address */
432 EXTRACT_HELPER(AA, 1, 1);
433 /* Link */
434 EXTRACT_HELPER(LK, 0, 1);
435
436 /* Create a mask between <start> and <end> bits */
437 static inline target_ulong MASK(uint32_t start, uint32_t end)
438 {
439 target_ulong ret;
440
441 #if defined(TARGET_PPC64)
442 if (likely(start == 0)) {
443 ret = UINT64_MAX << (63 - end);
444 } else if (likely(end == 63)) {
445 ret = UINT64_MAX >> start;
446 }
447 #else
448 if (likely(start == 0)) {
449 ret = UINT32_MAX << (31 - end);
450 } else if (likely(end == 31)) {
451 ret = UINT32_MAX >> start;
452 }
453 #endif
454 else {
455 ret = (((target_ulong)(-1ULL)) >> (start)) ^
456 (((target_ulong)(-1ULL) >> (end)) >> 1);
457 if (unlikely(start > end))
458 return ~ret;
459 }
460
461 return ret;
462 }
463
464 /*****************************************************************************/
465 /* PowerPC instructions table */
466
467 #if defined(DO_PPC_STATISTICS)
468 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
469 { \
470 .opc1 = op1, \
471 .opc2 = op2, \
472 .opc3 = op3, \
473 .pad = { 0, }, \
474 .handler = { \
475 .inval = invl, \
476 .type = _typ, \
477 .type2 = _typ2, \
478 .handler = &gen_##name, \
479 .oname = stringify(name), \
480 }, \
481 .oname = stringify(name), \
482 }
483 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
484 { \
485 .opc1 = op1, \
486 .opc2 = op2, \
487 .opc3 = op3, \
488 .pad = { 0, }, \
489 .handler = { \
490 .inval = invl, \
491 .type = _typ, \
492 .type2 = _typ2, \
493 .handler = &gen_##name, \
494 .oname = onam, \
495 }, \
496 .oname = onam, \
497 }
498 #else
499 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
500 { \
501 .opc1 = op1, \
502 .opc2 = op2, \
503 .opc3 = op3, \
504 .pad = { 0, }, \
505 .handler = { \
506 .inval = invl, \
507 .type = _typ, \
508 .type2 = _typ2, \
509 .handler = &gen_##name, \
510 }, \
511 .oname = stringify(name), \
512 }
513 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
514 { \
515 .opc1 = op1, \
516 .opc2 = op2, \
517 .opc3 = op3, \
518 .pad = { 0, }, \
519 .handler = { \
520 .inval = invl, \
521 .type = _typ, \
522 .type2 = _typ2, \
523 .handler = &gen_##name, \
524 }, \
525 .oname = onam, \
526 }
527 #endif
528
529 /* SPR load/store helpers */
530 static inline void gen_load_spr(TCGv t, int reg)
531 {
532 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUState, spr[reg]));
533 }
534
535 static inline void gen_store_spr(int reg, TCGv t)
536 {
537 tcg_gen_st_tl(t, cpu_env, offsetof(CPUState, spr[reg]));
538 }
539
540 /* Invalid instruction */
541 static void gen_invalid(DisasContext *ctx)
542 {
543 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
544 }
545
546 static opc_handler_t invalid_handler = {
547 .inval = 0xFFFFFFFF,
548 .type = PPC_NONE,
549 .type2 = PPC_NONE,
550 .handler = gen_invalid,
551 };
552
553 /*** Integer comparison ***/
554
555 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
556 {
557 int l1, l2, l3;
558
559 tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_xer);
560 tcg_gen_shri_i32(cpu_crf[crf], cpu_crf[crf], XER_SO);
561 tcg_gen_andi_i32(cpu_crf[crf], cpu_crf[crf], 1);
562
563 l1 = gen_new_label();
564 l2 = gen_new_label();
565 l3 = gen_new_label();
566 if (s) {
567 tcg_gen_brcond_tl(TCG_COND_LT, arg0, arg1, l1);
568 tcg_gen_brcond_tl(TCG_COND_GT, arg0, arg1, l2);
569 } else {
570 tcg_gen_brcond_tl(TCG_COND_LTU, arg0, arg1, l1);
571 tcg_gen_brcond_tl(TCG_COND_GTU, arg0, arg1, l2);
572 }
573 tcg_gen_ori_i32(cpu_crf[crf], cpu_crf[crf], 1 << CRF_EQ);
574 tcg_gen_br(l3);
575 gen_set_label(l1);
576 tcg_gen_ori_i32(cpu_crf[crf], cpu_crf[crf], 1 << CRF_LT);
577 tcg_gen_br(l3);
578 gen_set_label(l2);
579 tcg_gen_ori_i32(cpu_crf[crf], cpu_crf[crf], 1 << CRF_GT);
580 gen_set_label(l3);
581 }
582
583 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
584 {
585 TCGv t0 = tcg_const_local_tl(arg1);
586 gen_op_cmp(arg0, t0, s, crf);
587 tcg_temp_free(t0);
588 }
589
590 #if defined(TARGET_PPC64)
591 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
592 {
593 TCGv t0, t1;
594 t0 = tcg_temp_local_new();
595 t1 = tcg_temp_local_new();
596 if (s) {
597 tcg_gen_ext32s_tl(t0, arg0);
598 tcg_gen_ext32s_tl(t1, arg1);
599 } else {
600 tcg_gen_ext32u_tl(t0, arg0);
601 tcg_gen_ext32u_tl(t1, arg1);
602 }
603 gen_op_cmp(t0, t1, s, crf);
604 tcg_temp_free(t1);
605 tcg_temp_free(t0);
606 }
607
608 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
609 {
610 TCGv t0 = tcg_const_local_tl(arg1);
611 gen_op_cmp32(arg0, t0, s, crf);
612 tcg_temp_free(t0);
613 }
614 #endif
615
616 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
617 {
618 #if defined(TARGET_PPC64)
619 if (!(ctx->sf_mode))
620 gen_op_cmpi32(reg, 0, 1, 0);
621 else
622 #endif
623 gen_op_cmpi(reg, 0, 1, 0);
624 }
625
626 /* cmp */
627 static void gen_cmp(DisasContext *ctx)
628 {
629 #if defined(TARGET_PPC64)
630 if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
631 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
632 1, crfD(ctx->opcode));
633 else
634 #endif
635 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
636 1, crfD(ctx->opcode));
637 }
638
639 /* cmpi */
640 static void gen_cmpi(DisasContext *ctx)
641 {
642 #if defined(TARGET_PPC64)
643 if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
644 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
645 1, crfD(ctx->opcode));
646 else
647 #endif
648 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
649 1, crfD(ctx->opcode));
650 }
651
652 /* cmpl */
653 static void gen_cmpl(DisasContext *ctx)
654 {
655 #if defined(TARGET_PPC64)
656 if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
657 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
658 0, crfD(ctx->opcode));
659 else
660 #endif
661 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
662 0, crfD(ctx->opcode));
663 }
664
665 /* cmpli */
666 static void gen_cmpli(DisasContext *ctx)
667 {
668 #if defined(TARGET_PPC64)
669 if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
670 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
671 0, crfD(ctx->opcode));
672 else
673 #endif
674 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
675 0, crfD(ctx->opcode));
676 }
677
678 /* isel (PowerPC 2.03 specification) */
679 static void gen_isel(DisasContext *ctx)
680 {
681 int l1, l2;
682 uint32_t bi = rC(ctx->opcode);
683 uint32_t mask;
684 TCGv_i32 t0;
685
686 l1 = gen_new_label();
687 l2 = gen_new_label();
688
689 mask = 1 << (3 - (bi & 0x03));
690 t0 = tcg_temp_new_i32();
691 tcg_gen_andi_i32(t0, cpu_crf[bi >> 2], mask);
692 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
693 if (rA(ctx->opcode) == 0)
694 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
695 else
696 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
697 tcg_gen_br(l2);
698 gen_set_label(l1);
699 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
700 gen_set_label(l2);
701 tcg_temp_free_i32(t0);
702 }
703
704 /*** Integer arithmetic ***/
705
706 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
707 TCGv arg1, TCGv arg2, int sub)
708 {
709 int l1;
710 TCGv t0;
711
712 l1 = gen_new_label();
713 /* Start with XER OV disabled, the most likely case */
714 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
715 t0 = tcg_temp_local_new();
716 tcg_gen_xor_tl(t0, arg0, arg1);
717 #if defined(TARGET_PPC64)
718 if (!ctx->sf_mode)
719 tcg_gen_ext32s_tl(t0, t0);
720 #endif
721 if (sub)
722 tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0, l1);
723 else
724 tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l1);
725 tcg_gen_xor_tl(t0, arg1, arg2);
726 #if defined(TARGET_PPC64)
727 if (!ctx->sf_mode)
728 tcg_gen_ext32s_tl(t0, t0);
729 #endif
730 if (sub)
731 tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l1);
732 else
733 tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0, l1);
734 tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
735 gen_set_label(l1);
736 tcg_temp_free(t0);
737 }
738
739 static inline void gen_op_arith_compute_ca(DisasContext *ctx, TCGv arg1,
740 TCGv arg2, int sub)
741 {
742 int l1 = gen_new_label();
743
744 #if defined(TARGET_PPC64)
745 if (!(ctx->sf_mode)) {
746 TCGv t0, t1;
747 t0 = tcg_temp_new();
748 t1 = tcg_temp_new();
749
750 tcg_gen_ext32u_tl(t0, arg1);
751 tcg_gen_ext32u_tl(t1, arg2);
752 if (sub) {
753 tcg_gen_brcond_tl(TCG_COND_GTU, t0, t1, l1);
754 } else {
755 tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
756 }
757 tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
758 gen_set_label(l1);
759 tcg_temp_free(t0);
760 tcg_temp_free(t1);
761 } else
762 #endif
763 {
764 if (sub) {
765 tcg_gen_brcond_tl(TCG_COND_GTU, arg1, arg2, l1);
766 } else {
767 tcg_gen_brcond_tl(TCG_COND_GEU, arg1, arg2, l1);
768 }
769 tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
770 gen_set_label(l1);
771 }
772 }
773
774 /* Common add function */
775 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
776 TCGv arg2, int add_ca, int compute_ca,
777 int compute_ov)
778 {
779 TCGv t0, t1;
780
781 if ((!compute_ca && !compute_ov) ||
782 (!TCGV_EQUAL(ret,arg1) && !TCGV_EQUAL(ret, arg2))) {
783 t0 = ret;
784 } else {
785 t0 = tcg_temp_local_new();
786 }
787
788 if (add_ca) {
789 t1 = tcg_temp_local_new();
790 tcg_gen_andi_tl(t1, cpu_xer, (1 << XER_CA));
791 tcg_gen_shri_tl(t1, t1, XER_CA);
792 } else {
793 TCGV_UNUSED(t1);
794 }
795
796 if (compute_ca && compute_ov) {
797 /* Start with XER CA and OV disabled, the most likely case */
798 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~((1 << XER_CA) | (1 << XER_OV)));
799 } else if (compute_ca) {
800 /* Start with XER CA disabled, the most likely case */
801 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
802 } else if (compute_ov) {
803 /* Start with XER OV disabled, the most likely case */
804 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
805 }
806
807 tcg_gen_add_tl(t0, arg1, arg2);
808
809 if (compute_ca) {
810 gen_op_arith_compute_ca(ctx, t0, arg1, 0);
811 }
812 if (add_ca) {
813 tcg_gen_add_tl(t0, t0, t1);
814 gen_op_arith_compute_ca(ctx, t0, t1, 0);
815 tcg_temp_free(t1);
816 }
817 if (compute_ov) {
818 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
819 }
820
821 if (unlikely(Rc(ctx->opcode) != 0))
822 gen_set_Rc0(ctx, t0);
823
824 if (!TCGV_EQUAL(t0, ret)) {
825 tcg_gen_mov_tl(ret, t0);
826 tcg_temp_free(t0);
827 }
828 }
829 /* Add functions with two operands */
830 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
831 static void glue(gen_, name)(DisasContext *ctx) \
832 { \
833 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
834 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
835 add_ca, compute_ca, compute_ov); \
836 }
837 /* Add functions with one operand and one immediate */
838 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
839 add_ca, compute_ca, compute_ov) \
840 static void glue(gen_, name)(DisasContext *ctx) \
841 { \
842 TCGv t0 = tcg_const_local_tl(const_val); \
843 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
844 cpu_gpr[rA(ctx->opcode)], t0, \
845 add_ca, compute_ca, compute_ov); \
846 tcg_temp_free(t0); \
847 }
848
849 /* add add. addo addo. */
850 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
851 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
852 /* addc addc. addco addco. */
853 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
854 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
855 /* adde adde. addeo addeo. */
856 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
857 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
858 /* addme addme. addmeo addmeo. */
859 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
860 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
861 /* addze addze. addzeo addzeo.*/
862 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
863 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
864 /* addi */
865 static void gen_addi(DisasContext *ctx)
866 {
867 target_long simm = SIMM(ctx->opcode);
868
869 if (rA(ctx->opcode) == 0) {
870 /* li case */
871 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm);
872 } else {
873 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], simm);
874 }
875 }
876 /* addic addic.*/
877 static inline void gen_op_addic(DisasContext *ctx, TCGv ret, TCGv arg1,
878 int compute_Rc0)
879 {
880 target_long simm = SIMM(ctx->opcode);
881
882 /* Start with XER CA and OV disabled, the most likely case */
883 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
884
885 if (likely(simm != 0)) {
886 TCGv t0 = tcg_temp_local_new();
887 tcg_gen_addi_tl(t0, arg1, simm);
888 gen_op_arith_compute_ca(ctx, t0, arg1, 0);
889 tcg_gen_mov_tl(ret, t0);
890 tcg_temp_free(t0);
891 } else {
892 tcg_gen_mov_tl(ret, arg1);
893 }
894 if (compute_Rc0) {
895 gen_set_Rc0(ctx, ret);
896 }
897 }
898
899 static void gen_addic(DisasContext *ctx)
900 {
901 gen_op_addic(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0);
902 }
903
904 static void gen_addic_(DisasContext *ctx)
905 {
906 gen_op_addic(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1);
907 }
908
909 /* addis */
910 static void gen_addis(DisasContext *ctx)
911 {
912 target_long simm = SIMM(ctx->opcode);
913
914 if (rA(ctx->opcode) == 0) {
915 /* lis case */
916 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm << 16);
917 } else {
918 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], simm << 16);
919 }
920 }
921
922 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
923 TCGv arg2, int sign, int compute_ov)
924 {
925 int l1 = gen_new_label();
926 int l2 = gen_new_label();
927 TCGv_i32 t0 = tcg_temp_local_new_i32();
928 TCGv_i32 t1 = tcg_temp_local_new_i32();
929
930 tcg_gen_trunc_tl_i32(t0, arg1);
931 tcg_gen_trunc_tl_i32(t1, arg2);
932 tcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1);
933 if (sign) {
934 int l3 = gen_new_label();
935 tcg_gen_brcondi_i32(TCG_COND_NE, t1, -1, l3);
936 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, INT32_MIN, l1);
937 gen_set_label(l3);
938 tcg_gen_div_i32(t0, t0, t1);
939 } else {
940 tcg_gen_divu_i32(t0, t0, t1);
941 }
942 if (compute_ov) {
943 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
944 }
945 tcg_gen_br(l2);
946 gen_set_label(l1);
947 if (sign) {
948 tcg_gen_sari_i32(t0, t0, 31);
949 } else {
950 tcg_gen_movi_i32(t0, 0);
951 }
952 if (compute_ov) {
953 tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
954 }
955 gen_set_label(l2);
956 tcg_gen_extu_i32_tl(ret, t0);
957 tcg_temp_free_i32(t0);
958 tcg_temp_free_i32(t1);
959 if (unlikely(Rc(ctx->opcode) != 0))
960 gen_set_Rc0(ctx, ret);
961 }
962 /* Div functions */
963 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
964 static void glue(gen_, name)(DisasContext *ctx) \
965 { \
966 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
967 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
968 sign, compute_ov); \
969 }
970 /* divwu divwu. divwuo divwuo. */
971 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
972 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
973 /* divw divw. divwo divwo. */
974 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
975 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
976 #if defined(TARGET_PPC64)
977 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
978 TCGv arg2, int sign, int compute_ov)
979 {
980 int l1 = gen_new_label();
981 int l2 = gen_new_label();
982
983 tcg_gen_brcondi_i64(TCG_COND_EQ, arg2, 0, l1);
984 if (sign) {
985 int l3 = gen_new_label();
986 tcg_gen_brcondi_i64(TCG_COND_NE, arg2, -1, l3);
987 tcg_gen_brcondi_i64(TCG_COND_EQ, arg1, INT64_MIN, l1);
988 gen_set_label(l3);
989 tcg_gen_div_i64(ret, arg1, arg2);
990 } else {
991 tcg_gen_divu_i64(ret, arg1, arg2);
992 }
993 if (compute_ov) {
994 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
995 }
996 tcg_gen_br(l2);
997 gen_set_label(l1);
998 if (sign) {
999 tcg_gen_sari_i64(ret, arg1, 63);
1000 } else {
1001 tcg_gen_movi_i64(ret, 0);
1002 }
1003 if (compute_ov) {
1004 tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
1005 }
1006 gen_set_label(l2);
1007 if (unlikely(Rc(ctx->opcode) != 0))
1008 gen_set_Rc0(ctx, ret);
1009 }
1010 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1011 static void glue(gen_, name)(DisasContext *ctx) \
1012 { \
1013 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1014 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1015 sign, compute_ov); \
1016 }
1017 /* divwu divwu. divwuo divwuo. */
1018 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1019 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1020 /* divw divw. divwo divwo. */
1021 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1022 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1023 #endif
1024
1025 /* mulhw mulhw. */
1026 static void gen_mulhw(DisasContext *ctx)
1027 {
1028 TCGv_i64 t0, t1;
1029
1030 t0 = tcg_temp_new_i64();
1031 t1 = tcg_temp_new_i64();
1032 #if defined(TARGET_PPC64)
1033 tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
1034 tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
1035 tcg_gen_mul_i64(t0, t0, t1);
1036 tcg_gen_shri_i64(cpu_gpr[rD(ctx->opcode)], t0, 32);
1037 #else
1038 tcg_gen_ext_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
1039 tcg_gen_ext_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
1040 tcg_gen_mul_i64(t0, t0, t1);
1041 tcg_gen_shri_i64(t0, t0, 32);
1042 tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t0);
1043 #endif
1044 tcg_temp_free_i64(t0);
1045 tcg_temp_free_i64(t1);
1046 if (unlikely(Rc(ctx->opcode) != 0))
1047 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1048 }
1049
1050 /* mulhwu mulhwu. */
1051 static void gen_mulhwu(DisasContext *ctx)
1052 {
1053 TCGv_i64 t0, t1;
1054
1055 t0 = tcg_temp_new_i64();
1056 t1 = tcg_temp_new_i64();
1057 #if defined(TARGET_PPC64)
1058 tcg_gen_ext32u_i64(t0, cpu_gpr[rA(ctx->opcode)]);
1059 tcg_gen_ext32u_i64(t1, cpu_gpr[rB(ctx->opcode)]);
1060 tcg_gen_mul_i64(t0, t0, t1);
1061 tcg_gen_shri_i64(cpu_gpr[rD(ctx->opcode)], t0, 32);
1062 #else
1063 tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
1064 tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
1065 tcg_gen_mul_i64(t0, t0, t1);
1066 tcg_gen_shri_i64(t0, t0, 32);
1067 tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t0);
1068 #endif
1069 tcg_temp_free_i64(t0);
1070 tcg_temp_free_i64(t1);
1071 if (unlikely(Rc(ctx->opcode) != 0))
1072 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1073 }
1074
1075 /* mullw mullw. */
1076 static void gen_mullw(DisasContext *ctx)
1077 {
1078 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1079 cpu_gpr[rB(ctx->opcode)]);
1080 tcg_gen_ext32s_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)]);
1081 if (unlikely(Rc(ctx->opcode) != 0))
1082 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1083 }
1084
1085 /* mullwo mullwo. */
1086 static void gen_mullwo(DisasContext *ctx)
1087 {
1088 int l1;
1089 TCGv_i64 t0, t1;
1090
1091 t0 = tcg_temp_new_i64();
1092 t1 = tcg_temp_new_i64();
1093 l1 = gen_new_label();
1094 /* Start with XER OV disabled, the most likely case */
1095 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
1096 #if defined(TARGET_PPC64)
1097 tcg_gen_ext32s_i64(t0, cpu_gpr[rA(ctx->opcode)]);
1098 tcg_gen_ext32s_i64(t1, cpu_gpr[rB(ctx->opcode)]);
1099 #else
1100 tcg_gen_ext_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
1101 tcg_gen_ext_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
1102 #endif
1103 tcg_gen_mul_i64(t0, t0, t1);
1104 #if defined(TARGET_PPC64)
1105 tcg_gen_ext32s_i64(cpu_gpr[rD(ctx->opcode)], t0);
1106 tcg_gen_brcond_i64(TCG_COND_EQ, t0, cpu_gpr[rD(ctx->opcode)], l1);
1107 #else
1108 tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t0);
1109 tcg_gen_ext32s_i64(t1, t0);
1110 tcg_gen_brcond_i64(TCG_COND_EQ, t0, t1, l1);
1111 #endif
1112 tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
1113 gen_set_label(l1);
1114 tcg_temp_free_i64(t0);
1115 tcg_temp_free_i64(t1);
1116 if (unlikely(Rc(ctx->opcode) != 0))
1117 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1118 }
1119
1120 /* mulli */
1121 static void gen_mulli(DisasContext *ctx)
1122 {
1123 tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1124 SIMM(ctx->opcode));
1125 }
1126 #if defined(TARGET_PPC64)
1127 #define GEN_INT_ARITH_MUL_HELPER(name, opc3) \
1128 static void glue(gen_, name)(DisasContext *ctx) \
1129 { \
1130 gen_helper_##name (cpu_gpr[rD(ctx->opcode)], \
1131 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
1132 if (unlikely(Rc(ctx->opcode) != 0)) \
1133 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
1134 }
1135 /* mulhd mulhd. */
1136 GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00);
1137 /* mulhdu mulhdu. */
1138 GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02);
1139
1140 /* mulld mulld. */
1141 static void gen_mulld(DisasContext *ctx)
1142 {
1143 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1144 cpu_gpr[rB(ctx->opcode)]);
1145 if (unlikely(Rc(ctx->opcode) != 0))
1146 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1147 }
1148 /* mulldo mulldo. */
1149 GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17);
1150 #endif
1151
1152 /* neg neg. nego nego. */
1153 static inline void gen_op_arith_neg(DisasContext *ctx, TCGv ret, TCGv arg1,
1154 int ov_check)
1155 {
1156 int l1 = gen_new_label();
1157 int l2 = gen_new_label();
1158 TCGv t0 = tcg_temp_local_new();
1159 #if defined(TARGET_PPC64)
1160 if (ctx->sf_mode) {
1161 tcg_gen_mov_tl(t0, arg1);
1162 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, INT64_MIN, l1);
1163 } else
1164 #endif
1165 {
1166 tcg_gen_ext32s_tl(t0, arg1);
1167 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, INT32_MIN, l1);
1168 }
1169 tcg_gen_neg_tl(ret, arg1);
1170 if (ov_check) {
1171 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
1172 }
1173 tcg_gen_br(l2);
1174 gen_set_label(l1);
1175 tcg_gen_mov_tl(ret, t0);
1176 if (ov_check) {
1177 tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
1178 }
1179 gen_set_label(l2);
1180 tcg_temp_free(t0);
1181 if (unlikely(Rc(ctx->opcode) != 0))
1182 gen_set_Rc0(ctx, ret);
1183 }
1184
1185 static void gen_neg(DisasContext *ctx)
1186 {
1187 gen_op_arith_neg(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0);
1188 }
1189
1190 static void gen_nego(DisasContext *ctx)
1191 {
1192 gen_op_arith_neg(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1);
1193 }
1194
1195 /* Common subf function */
1196 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
1197 TCGv arg2, int add_ca, int compute_ca,
1198 int compute_ov)
1199 {
1200 TCGv t0, t1;
1201
1202 if ((!compute_ca && !compute_ov) ||
1203 (!TCGV_EQUAL(ret, arg1) && !TCGV_EQUAL(ret, arg2))) {
1204 t0 = ret;
1205 } else {
1206 t0 = tcg_temp_local_new();
1207 }
1208
1209 if (add_ca) {
1210 t1 = tcg_temp_local_new();
1211 tcg_gen_andi_tl(t1, cpu_xer, (1 << XER_CA));
1212 tcg_gen_shri_tl(t1, t1, XER_CA);
1213 } else {
1214 TCGV_UNUSED(t1);
1215 }
1216
1217 if (compute_ca && compute_ov) {
1218 /* Start with XER CA and OV disabled, the most likely case */
1219 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~((1 << XER_CA) | (1 << XER_OV)));
1220 } else if (compute_ca) {
1221 /* Start with XER CA disabled, the most likely case */
1222 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
1223 } else if (compute_ov) {
1224 /* Start with XER OV disabled, the most likely case */
1225 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
1226 }
1227
1228 if (add_ca) {
1229 tcg_gen_not_tl(t0, arg1);
1230 tcg_gen_add_tl(t0, t0, arg2);
1231 gen_op_arith_compute_ca(ctx, t0, arg2, 0);
1232 tcg_gen_add_tl(t0, t0, t1);
1233 gen_op_arith_compute_ca(ctx, t0, t1, 0);
1234 tcg_temp_free(t1);
1235 } else {
1236 tcg_gen_sub_tl(t0, arg2, arg1);
1237 if (compute_ca) {
1238 gen_op_arith_compute_ca(ctx, t0, arg2, 1);
1239 }
1240 }
1241 if (compute_ov) {
1242 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
1243 }
1244
1245 if (unlikely(Rc(ctx->opcode) != 0))
1246 gen_set_Rc0(ctx, t0);
1247
1248 if (!TCGV_EQUAL(t0, ret)) {
1249 tcg_gen_mov_tl(ret, t0);
1250 tcg_temp_free(t0);
1251 }
1252 }
1253 /* Sub functions with Two operands functions */
1254 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
1255 static void glue(gen_, name)(DisasContext *ctx) \
1256 { \
1257 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1258 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1259 add_ca, compute_ca, compute_ov); \
1260 }
1261 /* Sub functions with one operand and one immediate */
1262 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
1263 add_ca, compute_ca, compute_ov) \
1264 static void glue(gen_, name)(DisasContext *ctx) \
1265 { \
1266 TCGv t0 = tcg_const_local_tl(const_val); \
1267 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1268 cpu_gpr[rA(ctx->opcode)], t0, \
1269 add_ca, compute_ca, compute_ov); \
1270 tcg_temp_free(t0); \
1271 }
1272 /* subf subf. subfo subfo. */
1273 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
1274 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
1275 /* subfc subfc. subfco subfco. */
1276 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
1277 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
1278 /* subfe subfe. subfeo subfo. */
1279 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
1280 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
1281 /* subfme subfme. subfmeo subfmeo. */
1282 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
1283 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
1284 /* subfze subfze. subfzeo subfzeo.*/
1285 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
1286 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
1287
1288 /* subfic */
1289 static void gen_subfic(DisasContext *ctx)
1290 {
1291 /* Start with XER CA and OV disabled, the most likely case */
1292 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
1293 TCGv t0 = tcg_temp_local_new();
1294 TCGv t1 = tcg_const_local_tl(SIMM(ctx->opcode));
1295 tcg_gen_sub_tl(t0, t1, cpu_gpr[rA(ctx->opcode)]);
1296 gen_op_arith_compute_ca(ctx, t0, t1, 1);
1297 tcg_temp_free(t1);
1298 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
1299 tcg_temp_free(t0);
1300 }
1301
1302 /*** Integer logical ***/
1303 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
1304 static void glue(gen_, name)(DisasContext *ctx) \
1305 { \
1306 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
1307 cpu_gpr[rB(ctx->opcode)]); \
1308 if (unlikely(Rc(ctx->opcode) != 0)) \
1309 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1310 }
1311
1312 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
1313 static void glue(gen_, name)(DisasContext *ctx) \
1314 { \
1315 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
1316 if (unlikely(Rc(ctx->opcode) != 0)) \
1317 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1318 }
1319
1320 /* and & and. */
1321 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
1322 /* andc & andc. */
1323 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
1324
1325 /* andi. */
1326 static void gen_andi_(DisasContext *ctx)
1327 {
1328 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode));
1329 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1330 }
1331
1332 /* andis. */
1333 static void gen_andis_(DisasContext *ctx)
1334 {
1335 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16);
1336 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1337 }
1338
1339 /* cntlzw */
1340 static void gen_cntlzw(DisasContext *ctx)
1341 {
1342 gen_helper_cntlzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1343 if (unlikely(Rc(ctx->opcode) != 0))
1344 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1345 }
1346 /* eqv & eqv. */
1347 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
1348 /* extsb & extsb. */
1349 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
1350 /* extsh & extsh. */
1351 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
1352 /* nand & nand. */
1353 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
1354 /* nor & nor. */
1355 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
1356
1357 /* or & or. */
1358 static void gen_or(DisasContext *ctx)
1359 {
1360 int rs, ra, rb;
1361
1362 rs = rS(ctx->opcode);
1363 ra = rA(ctx->opcode);
1364 rb = rB(ctx->opcode);
1365 /* Optimisation for mr. ri case */
1366 if (rs != ra || rs != rb) {
1367 if (rs != rb)
1368 tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
1369 else
1370 tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
1371 if (unlikely(Rc(ctx->opcode) != 0))
1372 gen_set_Rc0(ctx, cpu_gpr[ra]);
1373 } else if (unlikely(Rc(ctx->opcode) != 0)) {
1374 gen_set_Rc0(ctx, cpu_gpr[rs]);
1375 #if defined(TARGET_PPC64)
1376 } else {
1377 int prio = 0;
1378
1379 switch (rs) {
1380 case 1:
1381 /* Set process priority to low */
1382 prio = 2;
1383 break;
1384 case 6:
1385 /* Set process priority to medium-low */
1386 prio = 3;
1387 break;
1388 case 2:
1389 /* Set process priority to normal */
1390 prio = 4;
1391 break;
1392 #if !defined(CONFIG_USER_ONLY)
1393 case 31:
1394 if (ctx->mem_idx > 0) {
1395 /* Set process priority to very low */
1396 prio = 1;
1397 }
1398 break;
1399 case 5:
1400 if (ctx->mem_idx > 0) {
1401 /* Set process priority to medium-hight */
1402 prio = 5;
1403 }
1404 break;
1405 case 3:
1406 if (ctx->mem_idx > 0) {
1407 /* Set process priority to high */
1408 prio = 6;
1409 }
1410 break;
1411 case 7:
1412 if (ctx->mem_idx > 1) {
1413 /* Set process priority to very high */
1414 prio = 7;
1415 }
1416 break;
1417 #endif
1418 default:
1419 /* nop */
1420 break;
1421 }
1422 if (prio) {
1423 TCGv t0 = tcg_temp_new();
1424 gen_load_spr(t0, SPR_PPR);
1425 tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
1426 tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
1427 gen_store_spr(SPR_PPR, t0);
1428 tcg_temp_free(t0);
1429 }
1430 #endif
1431 }
1432 }
1433 /* orc & orc. */
1434 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
1435
1436 /* xor & xor. */
1437 static void gen_xor(DisasContext *ctx)
1438 {
1439 /* Optimisation for "set to zero" case */
1440 if (rS(ctx->opcode) != rB(ctx->opcode))
1441 tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1442 else
1443 tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
1444 if (unlikely(Rc(ctx->opcode) != 0))
1445 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1446 }
1447
1448 /* ori */
1449 static void gen_ori(DisasContext *ctx)
1450 {
1451 target_ulong uimm = UIMM(ctx->opcode);
1452
1453 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1454 /* NOP */
1455 /* XXX: should handle special NOPs for POWER series */
1456 return;
1457 }
1458 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1459 }
1460
1461 /* oris */
1462 static void gen_oris(DisasContext *ctx)
1463 {
1464 target_ulong uimm = UIMM(ctx->opcode);
1465
1466 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1467 /* NOP */
1468 return;
1469 }
1470 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1471 }
1472
1473 /* xori */
1474 static void gen_xori(DisasContext *ctx)
1475 {
1476 target_ulong uimm = UIMM(ctx->opcode);
1477
1478 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1479 /* NOP */
1480 return;
1481 }
1482 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1483 }
1484
1485 /* xoris */
1486 static void gen_xoris(DisasContext *ctx)
1487 {
1488 target_ulong uimm = UIMM(ctx->opcode);
1489
1490 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1491 /* NOP */
1492 return;
1493 }
1494 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1495 }
1496
1497 /* popcntb : PowerPC 2.03 specification */
1498 static void gen_popcntb(DisasContext *ctx)
1499 {
1500 gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1501 }
1502
1503 static void gen_popcntw(DisasContext *ctx)
1504 {
1505 gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1506 }
1507
1508 #if defined(TARGET_PPC64)
1509 /* popcntd: PowerPC 2.06 specification */
1510 static void gen_popcntd(DisasContext *ctx)
1511 {
1512 gen_helper_popcntd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1513 }
1514 #endif
1515
1516 #if defined(TARGET_PPC64)
1517 /* extsw & extsw. */
1518 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
1519
1520 /* cntlzd */
1521 static void gen_cntlzd(DisasContext *ctx)
1522 {
1523 gen_helper_cntlzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1524 if (unlikely(Rc(ctx->opcode) != 0))
1525 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1526 }
1527 #endif
1528
1529 /*** Integer rotate ***/
1530
1531 /* rlwimi & rlwimi. */
1532 static void gen_rlwimi(DisasContext *ctx)
1533 {
1534 uint32_t mb, me, sh;
1535
1536 mb = MB(ctx->opcode);
1537 me = ME(ctx->opcode);
1538 sh = SH(ctx->opcode);
1539 if (likely(sh == 0 && mb == 0 && me == 31)) {
1540 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1541 } else {
1542 target_ulong mask;
1543 TCGv t1;
1544 TCGv t0 = tcg_temp_new();
1545 #if defined(TARGET_PPC64)
1546 TCGv_i32 t2 = tcg_temp_new_i32();
1547 tcg_gen_trunc_i64_i32(t2, cpu_gpr[rS(ctx->opcode)]);
1548 tcg_gen_rotli_i32(t2, t2, sh);
1549 tcg_gen_extu_i32_i64(t0, t2);
1550 tcg_temp_free_i32(t2);
1551 #else
1552 tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
1553 #endif
1554 #if defined(TARGET_PPC64)
1555 mb += 32;
1556 me += 32;
1557 #endif
1558 mask = MASK(mb, me);
1559 t1 = tcg_temp_new();
1560 tcg_gen_andi_tl(t0, t0, mask);
1561 tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
1562 tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1563 tcg_temp_free(t0);
1564 tcg_temp_free(t1);
1565 }
1566 if (unlikely(Rc(ctx->opcode) != 0))
1567 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1568 }
1569
1570 /* rlwinm & rlwinm. */
1571 static void gen_rlwinm(DisasContext *ctx)
1572 {
1573 uint32_t mb, me, sh;
1574
1575 sh = SH(ctx->opcode);
1576 mb = MB(ctx->opcode);
1577 me = ME(ctx->opcode);
1578
1579 if (likely(mb == 0 && me == (31 - sh))) {
1580 if (likely(sh == 0)) {
1581 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1582 } else {
1583 TCGv t0 = tcg_temp_new();
1584 tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
1585 tcg_gen_shli_tl(t0, t0, sh);
1586 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
1587 tcg_temp_free(t0);
1588 }
1589 } else if (likely(sh != 0 && me == 31 && sh == (32 - mb))) {
1590 TCGv t0 = tcg_temp_new();
1591 tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
1592 tcg_gen_shri_tl(t0, t0, mb);
1593 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
1594 tcg_temp_free(t0);
1595 } else {
1596 TCGv t0 = tcg_temp_new();
1597 #if defined(TARGET_PPC64)
1598 TCGv_i32 t1 = tcg_temp_new_i32();
1599 tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]);
1600 tcg_gen_rotli_i32(t1, t1, sh);
1601 tcg_gen_extu_i32_i64(t0, t1);
1602 tcg_temp_free_i32(t1);
1603 #else
1604 tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
1605 #endif
1606 #if defined(TARGET_PPC64)
1607 mb += 32;
1608 me += 32;
1609 #endif
1610 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1611 tcg_temp_free(t0);
1612 }
1613 if (unlikely(Rc(ctx->opcode) != 0))
1614 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1615 }
1616
1617 /* rlwnm & rlwnm. */
1618 static void gen_rlwnm(DisasContext *ctx)
1619 {
1620 uint32_t mb, me;
1621 TCGv t0;
1622 #if defined(TARGET_PPC64)
1623 TCGv_i32 t1, t2;
1624 #endif
1625
1626 mb = MB(ctx->opcode);
1627 me = ME(ctx->opcode);
1628 t0 = tcg_temp_new();
1629 tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1f);
1630 #if defined(TARGET_PPC64)
1631 t1 = tcg_temp_new_i32();
1632 t2 = tcg_temp_new_i32();
1633 tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]);
1634 tcg_gen_trunc_i64_i32(t2, t0);
1635 tcg_gen_rotl_i32(t1, t1, t2);
1636 tcg_gen_extu_i32_i64(t0, t1);
1637 tcg_temp_free_i32(t1);
1638 tcg_temp_free_i32(t2);
1639 #else
1640 tcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0);
1641 #endif
1642 if (unlikely(mb != 0 || me != 31)) {
1643 #if defined(TARGET_PPC64)
1644 mb += 32;
1645 me += 32;
1646 #endif
1647 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1648 } else {
1649 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
1650 }
1651 tcg_temp_free(t0);
1652 if (unlikely(Rc(ctx->opcode) != 0))
1653 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1654 }
1655
1656 #if defined(TARGET_PPC64)
1657 #define GEN_PPC64_R2(name, opc1, opc2) \
1658 static void glue(gen_, name##0)(DisasContext *ctx) \
1659 { \
1660 gen_##name(ctx, 0); \
1661 } \
1662 \
1663 static void glue(gen_, name##1)(DisasContext *ctx) \
1664 { \
1665 gen_##name(ctx, 1); \
1666 }
1667 #define GEN_PPC64_R4(name, opc1, opc2) \
1668 static void glue(gen_, name##0)(DisasContext *ctx) \
1669 { \
1670 gen_##name(ctx, 0, 0); \
1671 } \
1672 \
1673 static void glue(gen_, name##1)(DisasContext *ctx) \
1674 { \
1675 gen_##name(ctx, 0, 1); \
1676 } \
1677 \
1678 static void glue(gen_, name##2)(DisasContext *ctx) \
1679 { \
1680 gen_##name(ctx, 1, 0); \
1681 } \
1682 \
1683 static void glue(gen_, name##3)(DisasContext *ctx) \
1684 { \
1685 gen_##name(ctx, 1, 1); \
1686 }
1687
1688 static inline void gen_rldinm(DisasContext *ctx, uint32_t mb, uint32_t me,
1689 uint32_t sh)
1690 {
1691 if (likely(sh != 0 && mb == 0 && me == (63 - sh))) {
1692 tcg_gen_shli_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
1693 } else if (likely(sh != 0 && me == 63 && sh == (64 - mb))) {
1694 tcg_gen_shri_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], mb);
1695 } else {
1696 TCGv t0 = tcg_temp_new();
1697 tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
1698 if (likely(mb == 0 && me == 63)) {
1699 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
1700 } else {
1701 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1702 }
1703 tcg_temp_free(t0);
1704 }
1705 if (unlikely(Rc(ctx->opcode) != 0))
1706 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1707 }
1708 /* rldicl - rldicl. */
1709 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
1710 {
1711 uint32_t sh, mb;
1712
1713 sh = SH(ctx->opcode) | (shn << 5);
1714 mb = MB(ctx->opcode) | (mbn << 5);
1715 gen_rldinm(ctx, mb, 63, sh);
1716 }
1717 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
1718 /* rldicr - rldicr. */
1719 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
1720 {
1721 uint32_t sh, me;
1722
1723 sh = SH(ctx->opcode) | (shn << 5);
1724 me = MB(ctx->opcode) | (men << 5);
1725 gen_rldinm(ctx, 0, me, sh);
1726 }
1727 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
1728 /* rldic - rldic. */
1729 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
1730 {
1731 uint32_t sh, mb;
1732
1733 sh = SH(ctx->opcode) | (shn << 5);
1734 mb = MB(ctx->opcode) | (mbn << 5);
1735 gen_rldinm(ctx, mb, 63 - sh, sh);
1736 }
1737 GEN_PPC64_R4(rldic, 0x1E, 0x04);
1738
1739 static inline void gen_rldnm(DisasContext *ctx, uint32_t mb, uint32_t me)
1740 {
1741 TCGv t0;
1742
1743 mb = MB(ctx->opcode);
1744 me = ME(ctx->opcode);
1745 t0 = tcg_temp_new();
1746 tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3f);
1747 tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1748 if (unlikely(mb != 0 || me != 63)) {
1749 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1750 } else {
1751 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
1752 }
1753 tcg_temp_free(t0);
1754 if (unlikely(Rc(ctx->opcode) != 0))
1755 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1756 }
1757
1758 /* rldcl - rldcl. */
1759 static inline void gen_rldcl(DisasContext *ctx, int mbn)
1760 {
1761 uint32_t mb;
1762
1763 mb = MB(ctx->opcode) | (mbn << 5);
1764 gen_rldnm(ctx, mb, 63);
1765 }
1766 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
1767 /* rldcr - rldcr. */
1768 static inline void gen_rldcr(DisasContext *ctx, int men)
1769 {
1770 uint32_t me;
1771
1772 me = MB(ctx->opcode) | (men << 5);
1773 gen_rldnm(ctx, 0, me);
1774 }
1775 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
1776 /* rldimi - rldimi. */
1777 static inline void gen_rldimi(DisasContext *ctx, int mbn, int shn)
1778 {
1779 uint32_t sh, mb, me;
1780
1781 sh = SH(ctx->opcode) | (shn << 5);
1782 mb = MB(ctx->opcode) | (mbn << 5);
1783 me = 63 - sh;
1784 if (unlikely(sh == 0 && mb == 0)) {
1785 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1786 } else {
1787 TCGv t0, t1;
1788 target_ulong mask;
1789
1790 t0 = tcg_temp_new();
1791 tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
1792 t1 = tcg_temp_new();
1793 mask = MASK(mb, me);
1794 tcg_gen_andi_tl(t0, t0, mask);
1795 tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
1796 tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1797 tcg_temp_free(t0);
1798 tcg_temp_free(t1);
1799 }
1800 if (unlikely(Rc(ctx->opcode) != 0))
1801 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1802 }
1803 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
1804 #endif
1805
1806 /*** Integer shift ***/
1807
1808 /* slw & slw. */
1809 static void gen_slw(DisasContext *ctx)
1810 {
1811 TCGv t0, t1;
1812
1813 t0 = tcg_temp_new();
1814 /* AND rS with a mask that is 0 when rB >= 0x20 */
1815 #if defined(TARGET_PPC64)
1816 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
1817 tcg_gen_sari_tl(t0, t0, 0x3f);
1818 #else
1819 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
1820 tcg_gen_sari_tl(t0, t0, 0x1f);
1821 #endif
1822 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1823 t1 = tcg_temp_new();
1824 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
1825 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1826 tcg_temp_free(t1);
1827 tcg_temp_free(t0);
1828 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
1829 if (unlikely(Rc(ctx->opcode) != 0))
1830 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1831 }
1832
1833 /* sraw & sraw. */
1834 static void gen_sraw(DisasContext *ctx)
1835 {
1836 gen_helper_sraw(cpu_gpr[rA(ctx->opcode)],
1837 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1838 if (unlikely(Rc(ctx->opcode) != 0))
1839 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1840 }
1841
1842 /* srawi & srawi. */
1843 static void gen_srawi(DisasContext *ctx)
1844 {
1845 int sh = SH(ctx->opcode);
1846 if (sh != 0) {
1847 int l1, l2;
1848 TCGv t0;
1849 l1 = gen_new_label();
1850 l2 = gen_new_label();
1851 t0 = tcg_temp_local_new();
1852 tcg_gen_ext32s_tl(t0, cpu_gpr[rS(ctx->opcode)]);
1853 tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l1);
1854 tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1ULL << sh) - 1);
1855 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
1856 tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
1857 tcg_gen_br(l2);
1858 gen_set_label(l1);
1859 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
1860 gen_set_label(l2);
1861 tcg_gen_ext32s_tl(t0, cpu_gpr[rS(ctx->opcode)]);
1862 tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], t0, sh);
1863 tcg_temp_free(t0);
1864 } else {
1865 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1866 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
1867 }
1868 if (unlikely(Rc(ctx->opcode) != 0))
1869 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1870 }
1871
1872 /* srw & srw. */
1873 static void gen_srw(DisasContext *ctx)
1874 {
1875 TCGv t0, t1;
1876
1877 t0 = tcg_temp_new();
1878 /* AND rS with a mask that is 0 when rB >= 0x20 */
1879 #if defined(TARGET_PPC64)
1880 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
1881 tcg_gen_sari_tl(t0, t0, 0x3f);
1882 #else
1883 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
1884 tcg_gen_sari_tl(t0, t0, 0x1f);
1885 #endif
1886 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1887 tcg_gen_ext32u_tl(t0, t0);
1888 t1 = tcg_temp_new();
1889 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
1890 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1891 tcg_temp_free(t1);
1892 tcg_temp_free(t0);
1893 if (unlikely(Rc(ctx->opcode) != 0))
1894 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1895 }
1896
1897 #if defined(TARGET_PPC64)
1898 /* sld & sld. */
1899 static void gen_sld(DisasContext *ctx)
1900 {
1901 TCGv t0, t1;
1902
1903 t0 = tcg_temp_new();
1904 /* AND rS with a mask that is 0 when rB >= 0x40 */
1905 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
1906 tcg_gen_sari_tl(t0, t0, 0x3f);
1907 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1908 t1 = tcg_temp_new();
1909 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
1910 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1911 tcg_temp_free(t1);
1912 tcg_temp_free(t0);
1913 if (unlikely(Rc(ctx->opcode) != 0))
1914 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1915 }
1916
1917 /* srad & srad. */
1918 static void gen_srad(DisasContext *ctx)
1919 {
1920 gen_helper_srad(cpu_gpr[rA(ctx->opcode)],
1921 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1922 if (unlikely(Rc(ctx->opcode) != 0))
1923 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1924 }
1925 /* sradi & sradi. */
1926 static inline void gen_sradi(DisasContext *ctx, int n)
1927 {
1928 int sh = SH(ctx->opcode) + (n << 5);
1929 if (sh != 0) {
1930 int l1, l2;
1931 TCGv t0;
1932 l1 = gen_new_label();
1933 l2 = gen_new_label();
1934 t0 = tcg_temp_local_new();
1935 tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rS(ctx->opcode)], 0, l1);
1936 tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1ULL << sh) - 1);
1937 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
1938 tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
1939 tcg_gen_br(l2);
1940 gen_set_label(l1);
1941 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
1942 gen_set_label(l2);
1943 tcg_temp_free(t0);
1944 tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
1945 } else {
1946 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1947 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
1948 }
1949 if (unlikely(Rc(ctx->opcode) != 0))
1950 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1951 }
1952
1953 static void gen_sradi0(DisasContext *ctx)
1954 {
1955 gen_sradi(ctx, 0);
1956 }
1957
1958 static void gen_sradi1(DisasContext *ctx)
1959 {
1960 gen_sradi(ctx, 1);
1961 }
1962
1963 /* srd & srd. */
1964 static void gen_srd(DisasContext *ctx)
1965 {
1966 TCGv t0, t1;
1967
1968 t0 = tcg_temp_new();
1969 /* AND rS with a mask that is 0 when rB >= 0x40 */
1970 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
1971 tcg_gen_sari_tl(t0, t0, 0x3f);
1972 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1973 t1 = tcg_temp_new();
1974 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
1975 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1976 tcg_temp_free(t1);
1977 tcg_temp_free(t0);
1978 if (unlikely(Rc(ctx->opcode) != 0))
1979 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1980 }
1981 #endif
1982
1983 /*** Floating-Point arithmetic ***/
1984 #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
1985 static void gen_f##name(DisasContext *ctx) \
1986 { \
1987 if (unlikely(!ctx->fpu_enabled)) { \
1988 gen_exception(ctx, POWERPC_EXCP_FPU); \
1989 return; \
1990 } \
1991 /* NIP cannot be restored if the memory exception comes from an helper */ \
1992 gen_update_nip(ctx, ctx->nip - 4); \
1993 gen_reset_fpstatus(); \
1994 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
1995 cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
1996 if (isfloat) { \
1997 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
1998 } \
1999 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], set_fprf, \
2000 Rc(ctx->opcode) != 0); \
2001 }
2002
2003 #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
2004 _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
2005 _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
2006
2007 #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2008 static void gen_f##name(DisasContext *ctx) \
2009 { \
2010 if (unlikely(!ctx->fpu_enabled)) { \
2011 gen_exception(ctx, POWERPC_EXCP_FPU); \
2012 return; \
2013 } \
2014 /* NIP cannot be restored if the memory exception comes from an helper */ \
2015 gen_update_nip(ctx, ctx->nip - 4); \
2016 gen_reset_fpstatus(); \
2017 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
2018 cpu_fpr[rB(ctx->opcode)]); \
2019 if (isfloat) { \
2020 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
2021 } \
2022 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2023 set_fprf, Rc(ctx->opcode) != 0); \
2024 }
2025 #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
2026 _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2027 _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2028
2029 #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2030 static void gen_f##name(DisasContext *ctx) \
2031 { \
2032 if (unlikely(!ctx->fpu_enabled)) { \
2033 gen_exception(ctx, POWERPC_EXCP_FPU); \
2034 return; \
2035 } \
2036 /* NIP cannot be restored if the memory exception comes from an helper */ \
2037 gen_update_nip(ctx, ctx->nip - 4); \
2038 gen_reset_fpstatus(); \
2039 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
2040 cpu_fpr[rC(ctx->opcode)]); \
2041 if (isfloat) { \
2042 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
2043 } \
2044 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2045 set_fprf, Rc(ctx->opcode) != 0); \
2046 }
2047 #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
2048 _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2049 _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2050
2051 #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
2052 static void gen_f##name(DisasContext *ctx) \
2053 { \
2054 if (unlikely(!ctx->fpu_enabled)) { \
2055 gen_exception(ctx, POWERPC_EXCP_FPU); \
2056 return; \
2057 } \
2058 /* NIP cannot be restored if the memory exception comes from an helper */ \
2059 gen_update_nip(ctx, ctx->nip - 4); \
2060 gen_reset_fpstatus(); \
2061 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
2062 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2063 set_fprf, Rc(ctx->opcode) != 0); \
2064 }
2065
2066 #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
2067 static void gen_f##name(DisasContext *ctx) \
2068 { \
2069 if (unlikely(!ctx->fpu_enabled)) { \
2070 gen_exception(ctx, POWERPC_EXCP_FPU); \
2071 return; \
2072 } \
2073 /* NIP cannot be restored if the memory exception comes from an helper */ \
2074 gen_update_nip(ctx, ctx->nip - 4); \
2075 gen_reset_fpstatus(); \
2076 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
2077 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2078 set_fprf, Rc(ctx->opcode) != 0); \
2079 }
2080
2081 /* fadd - fadds */
2082 GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT);
2083 /* fdiv - fdivs */
2084 GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT);
2085 /* fmul - fmuls */
2086 GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT);
2087
2088 /* fre */
2089 GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT);
2090
2091 /* fres */
2092 GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES);
2093
2094 /* frsqrte */
2095 GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE);
2096
2097 /* frsqrtes */
2098 static void gen_frsqrtes(DisasContext *ctx)
2099 {
2100 if (unlikely(!ctx->fpu_enabled)) {
2101 gen_exception(ctx, POWERPC_EXCP_FPU);
2102 return;
2103 }
2104 /* NIP cannot be restored if the memory exception comes from an helper */
2105 gen_update_nip(ctx, ctx->nip - 4);
2106 gen_reset_fpstatus();
2107 gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2108 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]);
2109 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
2110 }
2111
2112 /* fsel */
2113 _GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
2114 /* fsub - fsubs */
2115 GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
2116 /* Optional: */
2117
2118 /* fsqrt */
2119 static void gen_fsqrt(DisasContext *ctx)
2120 {
2121 if (unlikely(!ctx->fpu_enabled)) {
2122 gen_exception(ctx, POWERPC_EXCP_FPU);
2123 return;
2124 }
2125 /* NIP cannot be restored if the memory exception comes from an helper */
2126 gen_update_nip(ctx, ctx->nip - 4);
2127 gen_reset_fpstatus();
2128 gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2129 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
2130 }
2131
2132 static void gen_fsqrts(DisasContext *ctx)
2133 {
2134 if (unlikely(!ctx->fpu_enabled)) {
2135 gen_exception(ctx, POWERPC_EXCP_FPU);
2136 return;
2137 }
2138 /* NIP cannot be restored if the memory exception comes from an helper */
2139 gen_update_nip(ctx, ctx->nip - 4);
2140 gen_reset_fpstatus();
2141 gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2142 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]);
2143 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
2144 }
2145
2146 /*** Floating-Point multiply-and-add ***/
2147 /* fmadd - fmadds */
2148 GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
2149 /* fmsub - fmsubs */
2150 GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT);
2151 /* fnmadd - fnmadds */
2152 GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT);
2153 /* fnmsub - fnmsubs */
2154 GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT);
2155
2156 /*** Floating-Point round & convert ***/
2157 /* fctiw */
2158 GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT);
2159 /* fctiwz */
2160 GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT);
2161 /* frsp */
2162 GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT);
2163 #if defined(TARGET_PPC64)
2164 /* fcfid */
2165 GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC_64B);
2166 /* fctid */
2167 GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC_64B);
2168 /* fctidz */
2169 GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC_64B);
2170 #endif
2171
2172 /* frin */
2173 GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT);
2174 /* friz */
2175 GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT);
2176 /* frip */
2177 GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT);
2178 /* frim */
2179 GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT);
2180
2181 /*** Floating-Point compare ***/
2182
2183 /* fcmpo */
2184 static void gen_fcmpo(DisasContext *ctx)
2185 {
2186 TCGv_i32 crf;
2187 if (unlikely(!ctx->fpu_enabled)) {
2188 gen_exception(ctx, POWERPC_EXCP_FPU);
2189 return;
2190 }
2191 /* NIP cannot be restored if the memory exception comes from an helper */
2192 gen_update_nip(ctx, ctx->nip - 4);
2193 gen_reset_fpstatus();
2194 crf = tcg_const_i32(crfD(ctx->opcode));
2195 gen_helper_fcmpo(cpu_fpr[rA(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], crf);
2196 tcg_temp_free_i32(crf);
2197 gen_helper_float_check_status();
2198 }
2199
2200 /* fcmpu */
2201 static void gen_fcmpu(DisasContext *ctx)
2202 {
2203 TCGv_i32 crf;
2204 if (unlikely(!ctx->fpu_enabled)) {
2205 gen_exception(ctx, POWERPC_EXCP_FPU);
2206 return;
2207 }
2208 /* NIP cannot be restored if the memory exception comes from an helper */
2209 gen_update_nip(ctx, ctx->nip - 4);
2210 gen_reset_fpstatus();
2211 crf = tcg_const_i32(crfD(ctx->opcode));
2212 gen_helper_fcmpu(cpu_fpr[rA(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], crf);
2213 tcg_temp_free_i32(crf);
2214 gen_helper_float_check_status();
2215 }
2216
2217 /*** Floating-point move ***/
2218 /* fabs */
2219 /* XXX: beware that fabs never checks for NaNs nor update FPSCR */
2220 GEN_FLOAT_B(abs, 0x08, 0x08, 0, PPC_FLOAT);
2221
2222 /* fmr - fmr. */
2223 /* XXX: beware that fmr never checks for NaNs nor update FPSCR */
2224 static void gen_fmr(DisasContext *ctx)
2225 {
2226 if (unlikely(!ctx->fpu_enabled)) {
2227 gen_exception(ctx, POWERPC_EXCP_FPU);
2228 return;
2229 }
2230 tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2231 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2232 }
2233
2234 /* fnabs */
2235 /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
2236 GEN_FLOAT_B(nabs, 0x08, 0x04, 0, PPC_FLOAT);
2237 /* fneg */
2238 /* XXX: beware that fneg never checks for NaNs nor update FPSCR */
2239 GEN_FLOAT_B(neg, 0x08, 0x01, 0, PPC_FLOAT);
2240
2241 /*** Floating-Point status & ctrl register ***/
2242
2243 /* mcrfs */
2244 static void gen_mcrfs(DisasContext *ctx)
2245 {
2246 int bfa;
2247
2248 if (unlikely(!ctx->fpu_enabled)) {
2249 gen_exception(ctx, POWERPC_EXCP_FPU);
2250 return;
2251 }
2252 bfa = 4 * (7 - crfS(ctx->opcode));
2253 tcg_gen_shri_i32(cpu_crf[crfD(ctx->opcode)], cpu_fpscr, bfa);
2254 tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
2255 tcg_gen_andi_i32(cpu_fpscr, cpu_fpscr, ~(0xF << bfa));
2256 }
2257
2258 /* mffs */
2259 static void gen_mffs(DisasContext *ctx)
2260 {
2261 if (unlikely(!ctx->fpu_enabled)) {
2262 gen_exception(ctx, POWERPC_EXCP_FPU);
2263 return;
2264 }
2265 gen_reset_fpstatus();
2266 tcg_gen_extu_i32_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpscr);
2267 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2268 }
2269
2270 /* mtfsb0 */
2271 static void gen_mtfsb0(DisasContext *ctx)
2272 {
2273 uint8_t crb;
2274
2275 if (unlikely(!ctx->fpu_enabled)) {
2276 gen_exception(ctx, POWERPC_EXCP_FPU);
2277 return;
2278 }
2279 crb = 31 - crbD(ctx->opcode);
2280 gen_reset_fpstatus();
2281 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
2282 TCGv_i32 t0;
2283 /* NIP cannot be restored if the memory exception comes from an helper */
2284 gen_update_nip(ctx, ctx->nip - 4);
2285 t0 = tcg_const_i32(crb);
2286 gen_helper_fpscr_clrbit(t0);
2287 tcg_temp_free_i32(t0);
2288 }
2289 if (unlikely(Rc(ctx->opcode) != 0)) {
2290 tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
2291 }
2292 }
2293
2294 /* mtfsb1 */
2295 static void gen_mtfsb1(DisasContext *ctx)
2296 {
2297 uint8_t crb;
2298
2299 if (unlikely(!ctx->fpu_enabled)) {
2300 gen_exception(ctx, POWERPC_EXCP_FPU);
2301 return;
2302 }
2303 crb = 31 - crbD(ctx->opcode);
2304 gen_reset_fpstatus();
2305 /* XXX: we pretend we can only do IEEE floating-point computations */
2306 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
2307 TCGv_i32 t0;
2308 /* NIP cannot be restored if the memory exception comes from an helper */
2309 gen_update_nip(ctx, ctx->nip - 4);
2310 t0 = tcg_const_i32(crb);
2311 gen_helper_fpscr_setbit(t0);
2312 tcg_temp_free_i32(t0);
2313 }
2314 if (unlikely(Rc(ctx->opcode) != 0)) {
2315 tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
2316 }
2317 /* We can raise a differed exception */
2318 gen_helper_float_check_status();
2319 }
2320
2321 /* mtfsf */
2322 static void gen_mtfsf(DisasContext *ctx)
2323 {
2324 TCGv_i32 t0;
2325 int L = ctx->opcode & 0x02000000;
2326
2327 if (unlikely(!ctx->fpu_enabled)) {
2328 gen_exception(ctx, POWERPC_EXCP_FPU);
2329 return;
2330 }
2331 /* NIP cannot be restored if the memory exception comes from an helper */
2332 gen_update_nip(ctx, ctx->nip - 4);
2333 gen_reset_fpstatus();
2334 if (L)
2335 t0 = tcg_const_i32(0xff);
2336 else
2337 t0 = tcg_const_i32(FM(ctx->opcode));
2338 gen_helper_store_fpscr(cpu_fpr[rB(ctx->opcode)], t0);
2339 tcg_temp_free_i32(t0);
2340 if (unlikely(Rc(ctx->opcode) != 0)) {
2341 tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
2342 }
2343 /* We can raise a differed exception */
2344 gen_helper_float_check_status();
2345 }
2346
2347 /* mtfsfi */
2348 static void gen_mtfsfi(DisasContext *ctx)
2349 {
2350 int bf, sh;
2351 TCGv_i64 t0;
2352 TCGv_i32 t1;
2353
2354 if (unlikely(!ctx->fpu_enabled)) {
2355 gen_exception(ctx, POWERPC_EXCP_FPU);
2356 return;
2357 }
2358 bf = crbD(ctx->opcode) >> 2;
2359 sh = 7 - bf;
2360 /* NIP cannot be restored if the memory exception comes from an helper */
2361 gen_update_nip(ctx, ctx->nip - 4);
2362 gen_reset_fpstatus();
2363 t0 = tcg_const_i64(FPIMM(ctx->opcode) << (4 * sh));
2364 t1 = tcg_const_i32(1 << sh);
2365 gen_helper_store_fpscr(t0, t1);
2366 tcg_temp_free_i64(t0);
2367 tcg_temp_free_i32(t1);
2368 if (unlikely(Rc(ctx->opcode) != 0)) {
2369 tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
2370 }
2371 /* We can raise a differed exception */
2372 gen_helper_float_check_status();
2373 }
2374
2375 /*** Addressing modes ***/
2376 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2377 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
2378 target_long maskl)
2379 {
2380 target_long simm = SIMM(ctx->opcode);
2381
2382 simm &= ~maskl;
2383 if (rA(ctx->opcode) == 0) {
2384 #if defined(TARGET_PPC64)
2385 if (!ctx->sf_mode) {
2386 tcg_gen_movi_tl(EA, (uint32_t)simm);
2387 } else
2388 #endif
2389 tcg_gen_movi_tl(EA, simm);
2390 } else if (likely(simm != 0)) {
2391 tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
2392 #if defined(TARGET_PPC64)
2393 if (!ctx->sf_mode) {
2394 tcg_gen_ext32u_tl(EA, EA);
2395 }
2396 #endif
2397 } else {
2398 #if defined(TARGET_PPC64)
2399 if (!ctx->sf_mode) {
2400 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2401 } else
2402 #endif
2403 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2404 }
2405 }
2406
2407 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
2408 {
2409 if (rA(ctx->opcode) == 0) {
2410 #if defined(TARGET_PPC64)
2411 if (!ctx->sf_mode) {
2412 tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2413 } else
2414 #endif
2415 tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2416 } else {
2417 tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2418 #if defined(TARGET_PPC64)
2419 if (!ctx->sf_mode) {
2420 tcg_gen_ext32u_tl(EA, EA);
2421 }
2422 #endif
2423 }
2424 }
2425
2426 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
2427 {
2428 if (rA(ctx->opcode) == 0) {
2429 tcg_gen_movi_tl(EA, 0);
2430 } else {
2431 #if defined(TARGET_PPC64)
2432 if (!ctx->sf_mode) {
2433 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2434 } else
2435 #endif
2436 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2437 }
2438 }
2439
2440 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
2441 target_long val)
2442 {
2443 tcg_gen_addi_tl(ret, arg1, val);
2444 #if defined(TARGET_PPC64)
2445 if (!ctx->sf_mode) {
2446 tcg_gen_ext32u_tl(ret, ret);
2447 }
2448 #endif
2449 }
2450
2451 static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask)
2452 {
2453 int l1 = gen_new_label();
2454 TCGv t0 = tcg_temp_new();
2455 TCGv_i32 t1, t2;
2456 /* NIP cannot be restored if the memory exception comes from an helper */
2457 gen_update_nip(ctx, ctx->nip - 4);
2458 tcg_gen_andi_tl(t0, EA, mask);
2459 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
2460 t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
2461 t2 = tcg_const_i32(0);
2462 gen_helper_raise_exception_err(t1, t2);
2463 tcg_temp_free_i32(t1);
2464 tcg_temp_free_i32(t2);
2465 gen_set_label(l1);
2466 tcg_temp_free(t0);
2467 }
2468
2469 /*** Integer load ***/
2470 static inline void gen_qemu_ld8u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2471 {
2472 tcg_gen_qemu_ld8u(arg1, arg2, ctx->mem_idx);
2473 }
2474
2475 static inline void gen_qemu_ld8s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2476 {
2477 tcg_gen_qemu_ld8s(arg1, arg2, ctx->mem_idx);
2478 }
2479
2480 static inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2481 {
2482 tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
2483 if (unlikely(ctx->le_mode)) {
2484 tcg_gen_bswap16_tl(arg1, arg1);
2485 }
2486 }
2487
2488 static inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2489 {
2490 if (unlikely(ctx->le_mode)) {
2491 tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
2492 tcg_gen_bswap16_tl(arg1, arg1);
2493 tcg_gen_ext16s_tl(arg1, arg1);
2494 } else {
2495 tcg_gen_qemu_ld16s(arg1, arg2, ctx->mem_idx);
2496 }
2497 }
2498
2499 static inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2500 {
2501 tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
2502 if (unlikely(ctx->le_mode)) {
2503 tcg_gen_bswap32_tl(arg1, arg1);
2504 }
2505 }
2506
2507 #if defined(TARGET_PPC64)
2508 static inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2509 {
2510 if (unlikely(ctx->le_mode)) {
2511 tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
2512 tcg_gen_bswap32_tl(arg1, arg1);
2513 tcg_gen_ext32s_tl(arg1, arg1);
2514 } else
2515 tcg_gen_qemu_ld32s(arg1, arg2, ctx->mem_idx);
2516 }
2517 #endif
2518
2519 static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
2520 {
2521 tcg_gen_qemu_ld64(arg1, arg2, ctx->mem_idx);
2522 if (unlikely(ctx->le_mode)) {
2523 tcg_gen_bswap64_i64(arg1, arg1);
2524 }
2525 }
2526
2527 static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
2528 {
2529 tcg_gen_qemu_st8(arg1, arg2, ctx->mem_idx);
2530 }
2531
2532 static inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2)
2533 {
2534 if (unlikely(ctx->le_mode)) {
2535 TCGv t0 = tcg_temp_new();
2536 tcg_gen_ext16u_tl(t0, arg1);
2537 tcg_gen_bswap16_tl(t0, t0);
2538 tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx);
2539 tcg_temp_free(t0);
2540 } else {
2541 tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx);
2542 }
2543 }
2544
2545 static inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2)
2546 {
2547 if (unlikely(ctx->le_mode)) {
2548 TCGv t0 = tcg_temp_new();
2549 tcg_gen_ext32u_tl(t0, arg1);
2550 tcg_gen_bswap32_tl(t0, t0);
2551 tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx);
2552 tcg_temp_free(t0);
2553 } else {
2554 tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx);
2555 }
2556 }
2557
2558 static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
2559 {
2560 if (unlikely(ctx->le_mode)) {
2561 TCGv_i64 t0 = tcg_temp_new_i64();
2562 tcg_gen_bswap64_i64(t0, arg1);
2563 tcg_gen_qemu_st64(t0, arg2, ctx->mem_idx);
2564 tcg_temp_free_i64(t0);
2565 } else
2566 tcg_gen_qemu_st64(arg1, arg2, ctx->mem_idx);
2567 }
2568
2569 #define GEN_LD(name, ldop, opc, type) \
2570 static void glue(gen_, name)(DisasContext *ctx) \
2571 { \
2572 TCGv EA; \
2573 gen_set_access_type(ctx, ACCESS_INT); \
2574 EA = tcg_temp_new(); \
2575 gen_addr_imm_index(ctx, EA, 0); \
2576 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2577 tcg_temp_free(EA); \
2578 }
2579
2580 #define GEN_LDU(name, ldop, opc, type) \
2581 static void glue(gen_, name##u)(DisasContext *ctx) \
2582 { \
2583 TCGv EA; \
2584 if (unlikely(rA(ctx->opcode) == 0 || \
2585 rA(ctx->opcode) == rD(ctx->opcode))) { \
2586 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2587 return; \
2588 } \
2589 gen_set_access_type(ctx, ACCESS_INT); \
2590 EA = tcg_temp_new(); \
2591 if (type == PPC_64B) \
2592 gen_addr_imm_index(ctx, EA, 0x03); \
2593 else \
2594 gen_addr_imm_index(ctx, EA, 0); \
2595 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2596 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2597 tcg_temp_free(EA); \
2598 }
2599
2600 #define GEN_LDUX(name, ldop, opc2, opc3, type) \
2601 static void glue(gen_, name##ux)(DisasContext *ctx) \
2602 { \
2603 TCGv EA; \
2604 if (unlikely(rA(ctx->opcode) == 0 || \
2605 rA(ctx->opcode) == rD(ctx->opcode))) { \
2606 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2607 return; \
2608 } \
2609 gen_set_access_type(ctx, ACCESS_INT); \
2610 EA = tcg_temp_new(); \
2611 gen_addr_reg_index(ctx, EA); \
2612 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2613 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2614 tcg_temp_free(EA); \
2615 }
2616
2617 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2618 static void glue(gen_, name##x)(DisasContext *ctx) \
2619 { \
2620 TCGv EA; \
2621 gen_set_access_type(ctx, ACCESS_INT); \
2622 EA = tcg_temp_new(); \
2623 gen_addr_reg_index(ctx, EA); \
2624 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2625 tcg_temp_free(EA); \
2626 }
2627
2628 #define GEN_LDS(name, ldop, op, type) \
2629 GEN_LD(name, ldop, op | 0x20, type); \
2630 GEN_LDU(name, ldop, op | 0x21, type); \
2631 GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
2632 GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2633
2634 /* lbz lbzu lbzux lbzx */
2635 GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER);
2636 /* lha lhau lhaux lhax */
2637 GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER);
2638 /* lhz lhzu lhzux lhzx */
2639 GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER);
2640 /* lwz lwzu lwzux lwzx */
2641 GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER);
2642 #if defined(TARGET_PPC64)
2643 /* lwaux */
2644 GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
2645 /* lwax */
2646 GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
2647 /* ldux */
2648 GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B);
2649 /* ldx */
2650 GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B);
2651
2652 static void gen_ld(DisasContext *ctx)
2653 {
2654 TCGv EA;
2655 if (Rc(ctx->opcode)) {
2656 if (unlikely(rA(ctx->opcode) == 0 ||
2657 rA(ctx->opcode) == rD(ctx->opcode))) {
2658 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2659 return;
2660 }
2661 }
2662 gen_set_access_type(ctx, ACCESS_INT);
2663 EA = tcg_temp_new();
2664 gen_addr_imm_index(ctx, EA, 0x03);
2665 if (ctx->opcode & 0x02) {
2666 /* lwa (lwau is undefined) */
2667 gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2668 } else {
2669 /* ld - ldu */
2670 gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2671 }
2672 if (Rc(ctx->opcode))
2673 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2674 tcg_temp_free(EA);
2675 }
2676
2677 /* lq */
2678 static void gen_lq(DisasContext *ctx)
2679 {
2680 #if defined(CONFIG_USER_ONLY)
2681 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2682 #else
2683 int ra, rd;
2684 TCGv EA;
2685
2686 /* Restore CPU state */
2687 if (unlikely(ctx->mem_idx == 0)) {
2688 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2689 return;
2690 }
2691 ra = rA(ctx->opcode);
2692 rd = rD(ctx->opcode);
2693 if (unlikely((rd & 1) || rd == ra)) {
2694 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2695 return;
2696 }
2697 if (unlikely(ctx->le_mode)) {
2698 /* Little-endian mode is not handled */
2699 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
2700 return;
2701 }
2702 gen_set_access_type(ctx, ACCESS_INT);
2703 EA = tcg_temp_new();
2704 gen_addr_imm_index(ctx, EA, 0x0F);
2705 gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
2706 gen_addr_add(ctx, EA, EA, 8);
2707 gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
2708 tcg_temp_free(EA);
2709 #endif
2710 }
2711 #endif
2712
2713 /*** Integer store ***/
2714 #define GEN_ST(name, stop, opc, type) \
2715 static void glue(gen_, name)(DisasContext *ctx) \
2716 { \
2717 TCGv EA; \
2718 gen_set_access_type(ctx, ACCESS_INT); \
2719 EA = tcg_temp_new(); \
2720 gen_addr_imm_index(ctx, EA, 0); \
2721 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2722 tcg_temp_free(EA); \
2723 }
2724
2725 #define GEN_STU(name, stop, opc, type) \
2726 static void glue(gen_, stop##u)(DisasContext *ctx) \
2727 { \
2728 TCGv EA; \
2729 if (unlikely(rA(ctx->opcode) == 0)) { \
2730 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2731 return; \
2732 } \
2733 gen_set_access_type(ctx, ACCESS_INT); \
2734 EA = tcg_temp_new(); \
2735 if (type == PPC_64B) \
2736 gen_addr_imm_index(ctx, EA, 0x03); \
2737 else \
2738 gen_addr_imm_index(ctx, EA, 0); \
2739 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2740 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2741 tcg_temp_free(EA); \
2742 }
2743
2744 #define GEN_STUX(name, stop, opc2, opc3, type) \
2745 static void glue(gen_, name##ux)(DisasContext *ctx) \
2746 { \
2747 TCGv EA; \
2748 if (unlikely(rA(ctx->opcode) == 0)) { \
2749 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2750 return; \
2751 } \
2752 gen_set_access_type(ctx, ACCESS_INT); \
2753 EA = tcg_temp_new(); \
2754 gen_addr_reg_index(ctx, EA); \
2755 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2756 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2757 tcg_temp_free(EA); \
2758 }
2759
2760 #define GEN_STX(name, stop, opc2, opc3, type) \
2761 static void glue(gen_, name##x)(DisasContext *ctx) \
2762 { \
2763 TCGv EA; \
2764 gen_set_access_type(ctx, ACCESS_INT); \
2765 EA = tcg_temp_new(); \
2766 gen_addr_reg_index(ctx, EA); \
2767 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2768 tcg_temp_free(EA); \
2769 }
2770
2771 #define GEN_STS(name, stop, op, type) \
2772 GEN_ST(name, stop, op | 0x20, type); \
2773 GEN_STU(name, stop, op | 0x21, type); \
2774 GEN_STUX(name, stop, 0x17, op | 0x01, type); \
2775 GEN_STX(name, stop, 0x17, op | 0x00, type)
2776
2777 /* stb stbu stbux stbx */
2778 GEN_STS(stb, st8, 0x06, PPC_INTEGER);
2779 /* sth sthu sthux sthx */
2780 GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
2781 /* stw stwu stwux stwx */
2782 GEN_STS(stw, st32, 0x04, PPC_INTEGER);
2783 #if defined(TARGET_PPC64)
2784 GEN_STUX(std, st64, 0x15, 0x05, PPC_64B);
2785 GEN_STX(std, st64, 0x15, 0x04, PPC_64B);
2786
2787 static void gen_std(DisasContext *ctx)
2788 {
2789 int rs;
2790 TCGv EA;
2791
2792 rs = rS(ctx->opcode);
2793 if ((ctx->opcode & 0x3) == 0x2) {
2794 #if defined(CONFIG_USER_ONLY)
2795 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2796 #else
2797 /* stq */
2798 if (unlikely(ctx->mem_idx == 0)) {
2799 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2800 return;
2801 }
2802 if (unlikely(rs & 1)) {
2803 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2804 return;
2805 }
2806 if (unlikely(ctx->le_mode)) {
2807 /* Little-endian mode is not handled */
2808 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
2809 return;
2810 }
2811 gen_set_access_type(ctx, ACCESS_INT);
2812 EA = tcg_temp_new();
2813 gen_addr_imm_index(ctx, EA, 0x03);
2814 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
2815 gen_addr_add(ctx, EA, EA, 8);
2816 gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
2817 tcg_temp_free(EA);
2818 #endif
2819 } else {
2820 /* std / stdu */
2821 if (Rc(ctx->opcode)) {
2822 if (unlikely(rA(ctx->opcode) == 0)) {
2823 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2824 return;
2825 }
2826 }
2827 gen_set_access_type(ctx, ACCESS_INT);
2828 EA = tcg_temp_new();
2829 gen_addr_imm_index(ctx, EA, 0x03);
2830 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
2831 if (Rc(ctx->opcode))
2832 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2833 tcg_temp_free(EA);
2834 }
2835 }
2836 #endif
2837 /*** Integer load and store with byte reverse ***/
2838 /* lhbrx */
2839 static inline void gen_qemu_ld16ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
2840 {
2841 tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
2842 if (likely(!ctx->le_mode)) {
2843 tcg_gen_bswap16_tl(arg1, arg1);
2844 }
2845 }
2846 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
2847
2848 /* lwbrx */
2849 static inline void gen_qemu_ld32ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
2850 {
2851 tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
2852 if (likely(!ctx->le_mode)) {
2853 tcg_gen_bswap32_tl(arg1, arg1);
2854 }
2855 }
2856 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
2857
2858 /* sthbrx */
2859 static inline void gen_qemu_st16r(DisasContext *ctx, TCGv arg1, TCGv arg2)
2860 {
2861 if (likely(!ctx->le_mode)) {
2862 TCGv t0 = tcg_temp_new();
2863 tcg_gen_ext16u_tl(t0, arg1);
2864 tcg_gen_bswap16_tl(t0, t0);
2865 tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx);
2866 tcg_temp_free(t0);
2867 } else {
2868 tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx);
2869 }
2870 }
2871 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
2872
2873 /* stwbrx */
2874 static inline void gen_qemu_st32r(DisasContext *ctx, TCGv arg1, TCGv arg2)
2875 {
2876 if (likely(!ctx->le_mode)) {
2877 TCGv t0 = tcg_temp_new();
2878 tcg_gen_ext32u_tl(t0, arg1);
2879 tcg_gen_bswap32_tl(t0, t0);
2880 tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx);
2881 tcg_temp_free(t0);
2882 } else {
2883 tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx);
2884 }
2885 }
2886 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
2887
2888 /*** Integer load and store multiple ***/
2889
2890 /* lmw */
2891 static void gen_lmw(DisasContext *ctx)
2892 {
2893 TCGv t0;
2894 TCGv_i32 t1;
2895 gen_set_access_type(ctx, ACCESS_INT);
2896 /* NIP cannot be restored if the memory exception comes from an helper */
2897 gen_update_nip(ctx, ctx->nip - 4);
2898 t0 = tcg_temp_new();
2899 t1 = tcg_const_i32(rD(ctx->opcode));
2900 gen_addr_imm_index(ctx, t0, 0);
2901 gen_helper_lmw(t0, t1);
2902 tcg_temp_free(t0);
2903 tcg_temp_free_i32(t1);
2904 }
2905
2906 /* stmw */
2907 static void gen_stmw(DisasContext *ctx)
2908 {
2909 TCGv t0;
2910 TCGv_i32 t1;
2911 gen_set_access_type(ctx, ACCESS_INT);
2912 /* NIP cannot be restored if the memory exception comes from an helper */
2913 gen_update_nip(ctx, ctx->nip - 4);
2914 t0 = tcg_temp_new();
2915 t1 = tcg_const_i32(rS(ctx->opcode));
2916 gen_addr_imm_index(ctx, t0, 0);
2917 gen_helper_stmw(t0, t1);
2918 tcg_temp_free(t0);
2919 tcg_temp_free_i32(t1);
2920 }
2921
2922 /*** Integer load and store strings ***/
2923
2924 /* lswi */
2925 /* PowerPC32 specification says we must generate an exception if
2926 * rA is in the range of registers to be loaded.
2927 * In an other hand, IBM says this is valid, but rA won't be loaded.
2928 * For now, I'll follow the spec...
2929 */
2930 static void gen_lswi(DisasContext *ctx)
2931 {
2932 TCGv t0;
2933 TCGv_i32 t1, t2;
2934 int nb = NB(ctx->opcode);
2935 int start = rD(ctx->opcode);
2936 int ra = rA(ctx->opcode);
2937 int nr;
2938
2939 if (nb == 0)
2940 nb = 32;
2941 nr = nb / 4;
2942 if (unlikely(((start + nr) > 32 &&
2943 start <= ra && (start + nr - 32) > ra) ||
2944 ((start + nr) <= 32 && start <= ra && (start + nr) > ra))) {
2945 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
2946 return;
2947 }
2948 gen_set_access_type(ctx, ACCESS_INT);
2949 /* NIP cannot be restored if the memory exception comes from an helper */
2950 gen_update_nip(ctx, ctx->nip - 4);
2951 t0 = tcg_temp_new();
2952 gen_addr_register(ctx, t0);
2953 t1 = tcg_const_i32(nb);
2954 t2 = tcg_const_i32(start);
2955 gen_helper_lsw(t0, t1, t2);
2956 tcg_temp_free(t0);
2957 tcg_temp_free_i32(t1);
2958 tcg_temp_free_i32(t2);
2959 }
2960
2961 /* lswx */
2962 static void gen_lswx(DisasContext *ctx)
2963 {
2964 TCGv t0;
2965 TCGv_i32 t1, t2, t3;
2966 gen_set_access_type(ctx, ACCESS_INT);
2967 /* NIP cannot be restored if the memory exception comes from an helper */
2968 gen_update_nip(ctx, ctx->nip - 4);
2969 t0 = tcg_temp_new();
2970 gen_addr_reg_index(ctx, t0);
2971 t1 = tcg_const_i32(rD(ctx->opcode));
2972 t2 = tcg_const_i32(rA(ctx->opcode));
2973 t3 = tcg_const_i32(rB(ctx->opcode));
2974 gen_helper_lswx(t0, t1, t2, t3);
2975 tcg_temp_free(t0);
2976 tcg_temp_free_i32(t1);
2977 tcg_temp_free_i32(t2);
2978 tcg_temp_free_i32(t3);
2979 }
2980
2981 /* stswi */
2982 static void gen_stswi(DisasContext *ctx)
2983 {
2984 TCGv t0;
2985 TCGv_i32 t1, t2;
2986 int nb = NB(ctx->opcode);
2987 gen_set_access_type(ctx, ACCESS_INT);
2988 /* NIP cannot be restored if the memory exception comes from an helper */
2989 gen_update_nip(ctx, ctx->nip - 4);
2990 t0 = tcg_temp_new();
2991 gen_addr_register(ctx, t0);
2992 if (nb == 0)
2993 nb = 32;
2994 t1 = tcg_const_i32(nb);
2995 t2 = tcg_const_i32(rS(ctx->opcode));
2996 gen_helper_stsw(t0, t1, t2);
2997 tcg_temp_free(t0);
2998 tcg_temp_free_i32(t1);
2999 tcg_temp_free_i32(t2);
3000 }
3001
3002 /* stswx */
3003 static void gen_stswx(DisasContext *ctx)
3004 {
3005 TCGv t0;
3006 TCGv_i32 t1, t2;
3007 gen_set_access_type(ctx, ACCESS_INT);
3008 /* NIP cannot be restored if the memory exception comes from an helper */
3009 gen_update_nip(ctx, ctx->nip - 4);
3010 t0 = tcg_temp_new();
3011 gen_addr_reg_index(ctx, t0);
3012 t1 = tcg_temp_new_i32();
3013 tcg_gen_trunc_tl_i32(t1, cpu_xer);
3014 tcg_gen_andi_i32(t1, t1, 0x7F);
3015 t2 = tcg_const_i32(rS(ctx->opcode));
3016 gen_helper_stsw(t0, t1, t2);
3017 tcg_temp_free(t0);
3018 tcg_temp_free_i32(t1);
3019 tcg_temp_free_i32(t2);
3020 }
3021
3022 /*** Memory synchronisation ***/
3023 /* eieio */
3024 static void gen_eieio(DisasContext *ctx)
3025 {
3026 }
3027
3028 /* isync */
3029 static void gen_isync(DisasContext *ctx)
3030 {
3031 gen_stop_exception(ctx);
3032 }
3033
3034 /* lwarx */
3035 static void gen_lwarx(DisasContext *ctx)
3036 {
3037 TCGv t0;
3038 TCGv gpr = cpu_gpr[rD(ctx->opcode)];
3039 gen_set_access_type(ctx, ACCESS_RES);
3040 t0 = tcg_temp_local_new();
3041 gen_addr_reg_index(ctx, t0);
3042 gen_check_align(ctx, t0, 0x03);
3043 gen_qemu_ld32u(ctx, gpr, t0);
3044 tcg_gen_mov_tl(cpu_reserve, t0);
3045 tcg_gen_st_tl(gpr, cpu_env, offsetof(CPUState, reserve_val));
3046 tcg_temp_free(t0);
3047 }
3048
3049 #if defined(CONFIG_USER_ONLY)
3050 static void gen_conditional_store (DisasContext *ctx, TCGv EA,
3051 int reg, int size)
3052 {
3053 TCGv t0 = tcg_temp_new();
3054 uint32_t save_exception = ctx->exception;
3055
3056 tcg_gen_st_tl(EA, cpu_env, offsetof(CPUState, reserve_ea));
3057 tcg_gen_movi_tl(t0, (size << 5) | reg);
3058 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, reserve_info));
3059 tcg_temp_free(t0);
3060 gen_update_nip(ctx, ctx->nip-4);
3061 ctx->exception = POWERPC_EXCP_BRANCH;
3062 gen_exception(ctx, POWERPC_EXCP_STCX);
3063 ctx->exception = save_exception;
3064 }
3065 #endif
3066
3067 /* stwcx. */
3068 static void gen_stwcx_(DisasContext *ctx)
3069 {
3070 TCGv t0;
3071 gen_set_access_type(ctx, ACCESS_RES);
3072 t0 = tcg_temp_local_new();
3073 gen_addr_reg_index(ctx, t0);
3074 gen_check_align(ctx, t0, 0x03);
3075 #if defined(CONFIG_USER_ONLY)
3076 gen_conditional_store(ctx, t0, rS(ctx->opcode), 4);
3077 #else
3078 {
3079 int l1;
3080
3081 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_xer);
3082 tcg_gen_shri_i32(cpu_crf[0], cpu_crf[0], XER_SO);
3083 tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 1);
3084 l1 = gen_new_label();
3085 tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
3086 tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
3087 gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], t0);
3088 gen_set_label(l1);
3089 tcg_gen_movi_tl(cpu_reserve, -1);
3090 }
3091 #endif
3092 tcg_temp_free(t0);
3093 }
3094
3095 #if defined(TARGET_PPC64)
3096 /* ldarx */
3097 static void gen_ldarx(DisasContext *ctx)
3098 {
3099 TCGv t0;
3100 TCGv gpr = cpu_gpr[rD(ctx->opcode)];
3101 gen_set_access_type(ctx, ACCESS_RES);
3102 t0 = tcg_temp_local_new();
3103 gen_addr_reg_index(ctx, t0);
3104 gen_check_align(ctx, t0, 0x07);
3105 gen_qemu_ld64(ctx, gpr, t0);
3106 tcg_gen_mov_tl(cpu_reserve, t0);
3107 tcg_gen_st_tl(gpr, cpu_env, offsetof(CPUState, reserve_val));
3108 tcg_temp_free(t0);
3109 }
3110
3111 /* stdcx. */
3112 static void gen_stdcx_(DisasContext *ctx)
3113 {
3114 TCGv t0;
3115 gen_set_access_type(ctx, ACCESS_RES);
3116 t0 = tcg_temp_local_new();
3117 gen_addr_reg_index(ctx, t0);
3118 gen_check_align(ctx, t0, 0x07);
3119 #if defined(CONFIG_USER_ONLY)
3120 gen_conditional_store(ctx, t0, rS(ctx->opcode), 8);
3121 #else
3122 {
3123 int l1;