trap signals for "-serial mon:stdio"
[qemu.git] / target-ppc / translate.c
1 /*
2 * PowerPC emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "tcg-op.h"
24 #include "qemu/host-utils.h"
25
26 #include "helper.h"
27 #define GEN_HELPER 1
28 #include "helper.h"
29
30 #define CPU_SINGLE_STEP 0x1
31 #define CPU_BRANCH_STEP 0x2
32 #define GDBSTUB_SINGLE_STEP 0x4
33
34 /* Include definitions for instructions classes and implementations flags */
35 //#define PPC_DEBUG_DISAS
36 //#define DO_PPC_STATISTICS
37
38 #ifdef PPC_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
43 /*****************************************************************************/
44 /* Code translation helpers */
45
46 /* global register indexes */
47 static TCGv_ptr cpu_env;
48 static char cpu_reg_names[10*3 + 22*4 /* GPR */
49 #if !defined(TARGET_PPC64)
50 + 10*4 + 22*5 /* SPE GPRh */
51 #endif
52 + 10*4 + 22*5 /* FPR */
53 + 2*(10*6 + 22*7) /* AVRh, AVRl */
54 + 8*5 /* CRF */];
55 static TCGv cpu_gpr[32];
56 #if !defined(TARGET_PPC64)
57 static TCGv cpu_gprh[32];
58 #endif
59 static TCGv_i64 cpu_fpr[32];
60 static TCGv_i64 cpu_avrh[32], cpu_avrl[32];
61 static TCGv_i32 cpu_crf[8];
62 static TCGv cpu_nip;
63 static TCGv cpu_msr;
64 static TCGv cpu_ctr;
65 static TCGv cpu_lr;
66 #if defined(TARGET_PPC64)
67 static TCGv cpu_cfar;
68 #endif
69 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca;
70 static TCGv cpu_reserve;
71 static TCGv cpu_fpscr;
72 static TCGv_i32 cpu_access_type;
73
74 #include "exec/gen-icount.h"
75
76 void ppc_translate_init(void)
77 {
78 int i;
79 char* p;
80 size_t cpu_reg_names_size;
81 static int done_init = 0;
82
83 if (done_init)
84 return;
85
86 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
87
88 p = cpu_reg_names;
89 cpu_reg_names_size = sizeof(cpu_reg_names);
90
91 for (i = 0; i < 8; i++) {
92 snprintf(p, cpu_reg_names_size, "crf%d", i);
93 cpu_crf[i] = tcg_global_mem_new_i32(TCG_AREG0,
94 offsetof(CPUPPCState, crf[i]), p);
95 p += 5;
96 cpu_reg_names_size -= 5;
97 }
98
99 for (i = 0; i < 32; i++) {
100 snprintf(p, cpu_reg_names_size, "r%d", i);
101 cpu_gpr[i] = tcg_global_mem_new(TCG_AREG0,
102 offsetof(CPUPPCState, gpr[i]), p);
103 p += (i < 10) ? 3 : 4;
104 cpu_reg_names_size -= (i < 10) ? 3 : 4;
105 #if !defined(TARGET_PPC64)
106 snprintf(p, cpu_reg_names_size, "r%dH", i);
107 cpu_gprh[i] = tcg_global_mem_new_i32(TCG_AREG0,
108 offsetof(CPUPPCState, gprh[i]), p);
109 p += (i < 10) ? 4 : 5;
110 cpu_reg_names_size -= (i < 10) ? 4 : 5;
111 #endif
112
113 snprintf(p, cpu_reg_names_size, "fp%d", i);
114 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
115 offsetof(CPUPPCState, fpr[i]), p);
116 p += (i < 10) ? 4 : 5;
117 cpu_reg_names_size -= (i < 10) ? 4 : 5;
118
119 snprintf(p, cpu_reg_names_size, "avr%dH", i);
120 #ifdef HOST_WORDS_BIGENDIAN
121 cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
122 offsetof(CPUPPCState, avr[i].u64[0]), p);
123 #else
124 cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
125 offsetof(CPUPPCState, avr[i].u64[1]), p);
126 #endif
127 p += (i < 10) ? 6 : 7;
128 cpu_reg_names_size -= (i < 10) ? 6 : 7;
129
130 snprintf(p, cpu_reg_names_size, "avr%dL", i);
131 #ifdef HOST_WORDS_BIGENDIAN
132 cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
133 offsetof(CPUPPCState, avr[i].u64[1]), p);
134 #else
135 cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
136 offsetof(CPUPPCState, avr[i].u64[0]), p);
137 #endif
138 p += (i < 10) ? 6 : 7;
139 cpu_reg_names_size -= (i < 10) ? 6 : 7;
140 }
141
142 cpu_nip = tcg_global_mem_new(TCG_AREG0,
143 offsetof(CPUPPCState, nip), "nip");
144
145 cpu_msr = tcg_global_mem_new(TCG_AREG0,
146 offsetof(CPUPPCState, msr), "msr");
147
148 cpu_ctr = tcg_global_mem_new(TCG_AREG0,
149 offsetof(CPUPPCState, ctr), "ctr");
150
151 cpu_lr = tcg_global_mem_new(TCG_AREG0,
152 offsetof(CPUPPCState, lr), "lr");
153
154 #if defined(TARGET_PPC64)
155 cpu_cfar = tcg_global_mem_new(TCG_AREG0,
156 offsetof(CPUPPCState, cfar), "cfar");
157 #endif
158
159 cpu_xer = tcg_global_mem_new(TCG_AREG0,
160 offsetof(CPUPPCState, xer), "xer");
161 cpu_so = tcg_global_mem_new(TCG_AREG0,
162 offsetof(CPUPPCState, so), "SO");
163 cpu_ov = tcg_global_mem_new(TCG_AREG0,
164 offsetof(CPUPPCState, ov), "OV");
165 cpu_ca = tcg_global_mem_new(TCG_AREG0,
166 offsetof(CPUPPCState, ca), "CA");
167
168 cpu_reserve = tcg_global_mem_new(TCG_AREG0,
169 offsetof(CPUPPCState, reserve_addr),
170 "reserve_addr");
171
172 cpu_fpscr = tcg_global_mem_new(TCG_AREG0,
173 offsetof(CPUPPCState, fpscr), "fpscr");
174
175 cpu_access_type = tcg_global_mem_new_i32(TCG_AREG0,
176 offsetof(CPUPPCState, access_type), "access_type");
177
178 /* register helpers */
179 #define GEN_HELPER 2
180 #include "helper.h"
181
182 done_init = 1;
183 }
184
185 /* internal defines */
186 typedef struct DisasContext {
187 struct TranslationBlock *tb;
188 target_ulong nip;
189 uint32_t opcode;
190 uint32_t exception;
191 /* Routine used to access memory */
192 int mem_idx;
193 int access_type;
194 /* Translation flags */
195 int le_mode;
196 #if defined(TARGET_PPC64)
197 int sf_mode;
198 int has_cfar;
199 #endif
200 int fpu_enabled;
201 int altivec_enabled;
202 int spe_enabled;
203 ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
204 int singlestep_enabled;
205 uint64_t insns_flags;
206 uint64_t insns_flags2;
207 } DisasContext;
208
209 /* True when active word size < size of target_long. */
210 #ifdef TARGET_PPC64
211 # define NARROW_MODE(C) (!(C)->sf_mode)
212 #else
213 # define NARROW_MODE(C) 0
214 #endif
215
216 struct opc_handler_t {
217 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
218 uint32_t inval1;
219 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
220 uint32_t inval2;
221 /* instruction type */
222 uint64_t type;
223 /* extended instruction type */
224 uint64_t type2;
225 /* handler */
226 void (*handler)(DisasContext *ctx);
227 #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
228 const char *oname;
229 #endif
230 #if defined(DO_PPC_STATISTICS)
231 uint64_t count;
232 #endif
233 };
234
235 static inline void gen_reset_fpstatus(void)
236 {
237 gen_helper_reset_fpstatus(cpu_env);
238 }
239
240 static inline void gen_compute_fprf(TCGv_i64 arg, int set_fprf, int set_rc)
241 {
242 TCGv_i32 t0 = tcg_temp_new_i32();
243
244 if (set_fprf != 0) {
245 /* This case might be optimized later */
246 tcg_gen_movi_i32(t0, 1);
247 gen_helper_compute_fprf(t0, cpu_env, arg, t0);
248 if (unlikely(set_rc)) {
249 tcg_gen_mov_i32(cpu_crf[1], t0);
250 }
251 gen_helper_float_check_status(cpu_env);
252 } else if (unlikely(set_rc)) {
253 /* We always need to compute fpcc */
254 tcg_gen_movi_i32(t0, 0);
255 gen_helper_compute_fprf(t0, cpu_env, arg, t0);
256 tcg_gen_mov_i32(cpu_crf[1], t0);
257 }
258
259 tcg_temp_free_i32(t0);
260 }
261
262 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
263 {
264 if (ctx->access_type != access_type) {
265 tcg_gen_movi_i32(cpu_access_type, access_type);
266 ctx->access_type = access_type;
267 }
268 }
269
270 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
271 {
272 if (NARROW_MODE(ctx)) {
273 nip = (uint32_t)nip;
274 }
275 tcg_gen_movi_tl(cpu_nip, nip);
276 }
277
278 static inline void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
279 {
280 TCGv_i32 t0, t1;
281 if (ctx->exception == POWERPC_EXCP_NONE) {
282 gen_update_nip(ctx, ctx->nip);
283 }
284 t0 = tcg_const_i32(excp);
285 t1 = tcg_const_i32(error);
286 gen_helper_raise_exception_err(cpu_env, t0, t1);
287 tcg_temp_free_i32(t0);
288 tcg_temp_free_i32(t1);
289 ctx->exception = (excp);
290 }
291
292 static inline void gen_exception(DisasContext *ctx, uint32_t excp)
293 {
294 TCGv_i32 t0;
295 if (ctx->exception == POWERPC_EXCP_NONE) {
296 gen_update_nip(ctx, ctx->nip);
297 }
298 t0 = tcg_const_i32(excp);
299 gen_helper_raise_exception(cpu_env, t0);
300 tcg_temp_free_i32(t0);
301 ctx->exception = (excp);
302 }
303
304 static inline void gen_debug_exception(DisasContext *ctx)
305 {
306 TCGv_i32 t0;
307
308 if ((ctx->exception != POWERPC_EXCP_BRANCH) &&
309 (ctx->exception != POWERPC_EXCP_SYNC)) {
310 gen_update_nip(ctx, ctx->nip);
311 }
312 t0 = tcg_const_i32(EXCP_DEBUG);
313 gen_helper_raise_exception(cpu_env, t0);
314 tcg_temp_free_i32(t0);
315 }
316
317 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
318 {
319 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | error);
320 }
321
322 /* Stop translation */
323 static inline void gen_stop_exception(DisasContext *ctx)
324 {
325 gen_update_nip(ctx, ctx->nip);
326 ctx->exception = POWERPC_EXCP_STOP;
327 }
328
329 /* No need to update nip here, as execution flow will change */
330 static inline void gen_sync_exception(DisasContext *ctx)
331 {
332 ctx->exception = POWERPC_EXCP_SYNC;
333 }
334
335 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
336 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
337
338 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
339 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
340
341 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
342 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
343
344 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
345 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
346
347 typedef struct opcode_t {
348 unsigned char opc1, opc2, opc3;
349 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
350 unsigned char pad[5];
351 #else
352 unsigned char pad[1];
353 #endif
354 opc_handler_t handler;
355 const char *oname;
356 } opcode_t;
357
358 /*****************************************************************************/
359 /*** Instruction decoding ***/
360 #define EXTRACT_HELPER(name, shift, nb) \
361 static inline uint32_t name(uint32_t opcode) \
362 { \
363 return (opcode >> (shift)) & ((1 << (nb)) - 1); \
364 }
365
366 #define EXTRACT_SHELPER(name, shift, nb) \
367 static inline int32_t name(uint32_t opcode) \
368 { \
369 return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
370 }
371
372 /* Opcode part 1 */
373 EXTRACT_HELPER(opc1, 26, 6);
374 /* Opcode part 2 */
375 EXTRACT_HELPER(opc2, 1, 5);
376 /* Opcode part 3 */
377 EXTRACT_HELPER(opc3, 6, 5);
378 /* Update Cr0 flags */
379 EXTRACT_HELPER(Rc, 0, 1);
380 /* Destination */
381 EXTRACT_HELPER(rD, 21, 5);
382 /* Source */
383 EXTRACT_HELPER(rS, 21, 5);
384 /* First operand */
385 EXTRACT_HELPER(rA, 16, 5);
386 /* Second operand */
387 EXTRACT_HELPER(rB, 11, 5);
388 /* Third operand */
389 EXTRACT_HELPER(rC, 6, 5);
390 /*** Get CRn ***/
391 EXTRACT_HELPER(crfD, 23, 3);
392 EXTRACT_HELPER(crfS, 18, 3);
393 EXTRACT_HELPER(crbD, 21, 5);
394 EXTRACT_HELPER(crbA, 16, 5);
395 EXTRACT_HELPER(crbB, 11, 5);
396 /* SPR / TBL */
397 EXTRACT_HELPER(_SPR, 11, 10);
398 static inline uint32_t SPR(uint32_t opcode)
399 {
400 uint32_t sprn = _SPR(opcode);
401
402 return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
403 }
404 /*** Get constants ***/
405 EXTRACT_HELPER(IMM, 12, 8);
406 /* 16 bits signed immediate value */
407 EXTRACT_SHELPER(SIMM, 0, 16);
408 /* 16 bits unsigned immediate value */
409 EXTRACT_HELPER(UIMM, 0, 16);
410 /* 5 bits signed immediate value */
411 EXTRACT_HELPER(SIMM5, 16, 5);
412 /* 5 bits signed immediate value */
413 EXTRACT_HELPER(UIMM5, 16, 5);
414 /* Bit count */
415 EXTRACT_HELPER(NB, 11, 5);
416 /* Shift count */
417 EXTRACT_HELPER(SH, 11, 5);
418 /* Vector shift count */
419 EXTRACT_HELPER(VSH, 6, 4);
420 /* Mask start */
421 EXTRACT_HELPER(MB, 6, 5);
422 /* Mask end */
423 EXTRACT_HELPER(ME, 1, 5);
424 /* Trap operand */
425 EXTRACT_HELPER(TO, 21, 5);
426
427 EXTRACT_HELPER(CRM, 12, 8);
428 EXTRACT_HELPER(SR, 16, 4);
429
430 /* mtfsf/mtfsfi */
431 EXTRACT_HELPER(FPBF, 19, 3);
432 EXTRACT_HELPER(FPIMM, 12, 4);
433 EXTRACT_HELPER(FPL, 21, 1);
434 EXTRACT_HELPER(FPFLM, 17, 8);
435 EXTRACT_HELPER(FPW, 16, 1);
436
437 /*** Jump target decoding ***/
438 /* Displacement */
439 EXTRACT_SHELPER(d, 0, 16);
440 /* Immediate address */
441 static inline target_ulong LI(uint32_t opcode)
442 {
443 return (opcode >> 0) & 0x03FFFFFC;
444 }
445
446 static inline uint32_t BD(uint32_t opcode)
447 {
448 return (opcode >> 0) & 0xFFFC;
449 }
450
451 EXTRACT_HELPER(BO, 21, 5);
452 EXTRACT_HELPER(BI, 16, 5);
453 /* Absolute/relative address */
454 EXTRACT_HELPER(AA, 1, 1);
455 /* Link */
456 EXTRACT_HELPER(LK, 0, 1);
457
458 /* Create a mask between <start> and <end> bits */
459 static inline target_ulong MASK(uint32_t start, uint32_t end)
460 {
461 target_ulong ret;
462
463 #if defined(TARGET_PPC64)
464 if (likely(start == 0)) {
465 ret = UINT64_MAX << (63 - end);
466 } else if (likely(end == 63)) {
467 ret = UINT64_MAX >> start;
468 }
469 #else
470 if (likely(start == 0)) {
471 ret = UINT32_MAX << (31 - end);
472 } else if (likely(end == 31)) {
473 ret = UINT32_MAX >> start;
474 }
475 #endif
476 else {
477 ret = (((target_ulong)(-1ULL)) >> (start)) ^
478 (((target_ulong)(-1ULL) >> (end)) >> 1);
479 if (unlikely(start > end))
480 return ~ret;
481 }
482
483 return ret;
484 }
485
486 /*****************************************************************************/
487 /* PowerPC instructions table */
488
489 #if defined(DO_PPC_STATISTICS)
490 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
491 { \
492 .opc1 = op1, \
493 .opc2 = op2, \
494 .opc3 = op3, \
495 .pad = { 0, }, \
496 .handler = { \
497 .inval1 = invl, \
498 .type = _typ, \
499 .type2 = _typ2, \
500 .handler = &gen_##name, \
501 .oname = stringify(name), \
502 }, \
503 .oname = stringify(name), \
504 }
505 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
506 { \
507 .opc1 = op1, \
508 .opc2 = op2, \
509 .opc3 = op3, \
510 .pad = { 0, }, \
511 .handler = { \
512 .inval1 = invl1, \
513 .inval2 = invl2, \
514 .type = _typ, \
515 .type2 = _typ2, \
516 .handler = &gen_##name, \
517 .oname = stringify(name), \
518 }, \
519 .oname = stringify(name), \
520 }
521 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
522 { \
523 .opc1 = op1, \
524 .opc2 = op2, \
525 .opc3 = op3, \
526 .pad = { 0, }, \
527 .handler = { \
528 .inval1 = invl, \
529 .type = _typ, \
530 .type2 = _typ2, \
531 .handler = &gen_##name, \
532 .oname = onam, \
533 }, \
534 .oname = onam, \
535 }
536 #else
537 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
538 { \
539 .opc1 = op1, \
540 .opc2 = op2, \
541 .opc3 = op3, \
542 .pad = { 0, }, \
543 .handler = { \
544 .inval1 = invl, \
545 .type = _typ, \
546 .type2 = _typ2, \
547 .handler = &gen_##name, \
548 }, \
549 .oname = stringify(name), \
550 }
551 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
552 { \
553 .opc1 = op1, \
554 .opc2 = op2, \
555 .opc3 = op3, \
556 .pad = { 0, }, \
557 .handler = { \
558 .inval1 = invl1, \
559 .inval2 = invl2, \
560 .type = _typ, \
561 .type2 = _typ2, \
562 .handler = &gen_##name, \
563 }, \
564 .oname = stringify(name), \
565 }
566 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
567 { \
568 .opc1 = op1, \
569 .opc2 = op2, \
570 .opc3 = op3, \
571 .pad = { 0, }, \
572 .handler = { \
573 .inval1 = invl, \
574 .type = _typ, \
575 .type2 = _typ2, \
576 .handler = &gen_##name, \
577 }, \
578 .oname = onam, \
579 }
580 #endif
581
582 /* SPR load/store helpers */
583 static inline void gen_load_spr(TCGv t, int reg)
584 {
585 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
586 }
587
588 static inline void gen_store_spr(int reg, TCGv t)
589 {
590 tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
591 }
592
593 /* Invalid instruction */
594 static void gen_invalid(DisasContext *ctx)
595 {
596 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
597 }
598
599 static opc_handler_t invalid_handler = {
600 .inval1 = 0xFFFFFFFF,
601 .inval2 = 0xFFFFFFFF,
602 .type = PPC_NONE,
603 .type2 = PPC_NONE,
604 .handler = gen_invalid,
605 };
606
607 /*** Integer comparison ***/
608
609 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
610 {
611 TCGv t0 = tcg_temp_new();
612 TCGv_i32 t1 = tcg_temp_new_i32();
613
614 tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
615
616 tcg_gen_setcond_tl((s ? TCG_COND_LT: TCG_COND_LTU), t0, arg0, arg1);
617 tcg_gen_trunc_tl_i32(t1, t0);
618 tcg_gen_shli_i32(t1, t1, CRF_LT);
619 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
620
621 tcg_gen_setcond_tl((s ? TCG_COND_GT: TCG_COND_GTU), t0, arg0, arg1);
622 tcg_gen_trunc_tl_i32(t1, t0);
623 tcg_gen_shli_i32(t1, t1, CRF_GT);
624 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
625
626 tcg_gen_setcond_tl(TCG_COND_EQ, t0, arg0, arg1);
627 tcg_gen_trunc_tl_i32(t1, t0);
628 tcg_gen_shli_i32(t1, t1, CRF_EQ);
629 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
630
631 tcg_temp_free(t0);
632 tcg_temp_free_i32(t1);
633 }
634
635 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
636 {
637 TCGv t0 = tcg_const_tl(arg1);
638 gen_op_cmp(arg0, t0, s, crf);
639 tcg_temp_free(t0);
640 }
641
642 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
643 {
644 TCGv t0, t1;
645 t0 = tcg_temp_new();
646 t1 = tcg_temp_new();
647 if (s) {
648 tcg_gen_ext32s_tl(t0, arg0);
649 tcg_gen_ext32s_tl(t1, arg1);
650 } else {
651 tcg_gen_ext32u_tl(t0, arg0);
652 tcg_gen_ext32u_tl(t1, arg1);
653 }
654 gen_op_cmp(t0, t1, s, crf);
655 tcg_temp_free(t1);
656 tcg_temp_free(t0);
657 }
658
659 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
660 {
661 TCGv t0 = tcg_const_tl(arg1);
662 gen_op_cmp32(arg0, t0, s, crf);
663 tcg_temp_free(t0);
664 }
665
666 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
667 {
668 if (NARROW_MODE(ctx)) {
669 gen_op_cmpi32(reg, 0, 1, 0);
670 } else {
671 gen_op_cmpi(reg, 0, 1, 0);
672 }
673 }
674
675 /* cmp */
676 static void gen_cmp(DisasContext *ctx)
677 {
678 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
679 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
680 1, crfD(ctx->opcode));
681 } else {
682 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
683 1, crfD(ctx->opcode));
684 }
685 }
686
687 /* cmpi */
688 static void gen_cmpi(DisasContext *ctx)
689 {
690 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
691 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
692 1, crfD(ctx->opcode));
693 } else {
694 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
695 1, crfD(ctx->opcode));
696 }
697 }
698
699 /* cmpl */
700 static void gen_cmpl(DisasContext *ctx)
701 {
702 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
703 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
704 0, crfD(ctx->opcode));
705 } else {
706 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
707 0, crfD(ctx->opcode));
708 }
709 }
710
711 /* cmpli */
712 static void gen_cmpli(DisasContext *ctx)
713 {
714 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
715 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
716 0, crfD(ctx->opcode));
717 } else {
718 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
719 0, crfD(ctx->opcode));
720 }
721 }
722
723 /* isel (PowerPC 2.03 specification) */
724 static void gen_isel(DisasContext *ctx)
725 {
726 int l1, l2;
727 uint32_t bi = rC(ctx->opcode);
728 uint32_t mask;
729 TCGv_i32 t0;
730
731 l1 = gen_new_label();
732 l2 = gen_new_label();
733
734 mask = 1 << (3 - (bi & 0x03));
735 t0 = tcg_temp_new_i32();
736 tcg_gen_andi_i32(t0, cpu_crf[bi >> 2], mask);
737 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
738 if (rA(ctx->opcode) == 0)
739 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
740 else
741 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
742 tcg_gen_br(l2);
743 gen_set_label(l1);
744 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
745 gen_set_label(l2);
746 tcg_temp_free_i32(t0);
747 }
748
749 /* cmpb: PowerPC 2.05 specification */
750 static void gen_cmpb(DisasContext *ctx)
751 {
752 gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
753 cpu_gpr[rB(ctx->opcode)]);
754 }
755
756 /*** Integer arithmetic ***/
757
758 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
759 TCGv arg1, TCGv arg2, int sub)
760 {
761 TCGv t0 = tcg_temp_new();
762
763 tcg_gen_xor_tl(cpu_ov, arg0, arg2);
764 tcg_gen_xor_tl(t0, arg1, arg2);
765 if (sub) {
766 tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
767 } else {
768 tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
769 }
770 tcg_temp_free(t0);
771 if (NARROW_MODE(ctx)) {
772 tcg_gen_ext32s_tl(cpu_ov, cpu_ov);
773 }
774 tcg_gen_shri_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1);
775 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
776 }
777
778 /* Common add function */
779 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
780 TCGv arg2, bool add_ca, bool compute_ca,
781 bool compute_ov, bool compute_rc0)
782 {
783 TCGv t0 = ret;
784
785 if (compute_ca || compute_ov) {
786 t0 = tcg_temp_new();
787 }
788
789 if (compute_ca) {
790 if (NARROW_MODE(ctx)) {
791 /* Caution: a non-obvious corner case of the spec is that we
792 must produce the *entire* 64-bit addition, but produce the
793 carry into bit 32. */
794 TCGv t1 = tcg_temp_new();
795 tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */
796 tcg_gen_add_tl(t0, arg1, arg2);
797 if (add_ca) {
798 tcg_gen_add_tl(t0, t0, cpu_ca);
799 }
800 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changed w/ carry */
801 tcg_temp_free(t1);
802 tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
803 tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
804 } else {
805 TCGv zero = tcg_const_tl(0);
806 if (add_ca) {
807 tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, cpu_ca, zero);
808 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, arg2, zero);
809 } else {
810 tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, arg2, zero);
811 }
812 tcg_temp_free(zero);
813 }
814 } else {
815 tcg_gen_add_tl(t0, arg1, arg2);
816 if (add_ca) {
817 tcg_gen_add_tl(t0, t0, cpu_ca);
818 }
819 }
820
821 if (compute_ov) {
822 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
823 }
824 if (unlikely(compute_rc0)) {
825 gen_set_Rc0(ctx, t0);
826 }
827
828 if (!TCGV_EQUAL(t0, ret)) {
829 tcg_gen_mov_tl(ret, t0);
830 tcg_temp_free(t0);
831 }
832 }
833 /* Add functions with two operands */
834 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
835 static void glue(gen_, name)(DisasContext *ctx) \
836 { \
837 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
838 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
839 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
840 }
841 /* Add functions with one operand and one immediate */
842 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
843 add_ca, compute_ca, compute_ov) \
844 static void glue(gen_, name)(DisasContext *ctx) \
845 { \
846 TCGv t0 = tcg_const_tl(const_val); \
847 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
848 cpu_gpr[rA(ctx->opcode)], t0, \
849 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
850 tcg_temp_free(t0); \
851 }
852
853 /* add add. addo addo. */
854 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
855 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
856 /* addc addc. addco addco. */
857 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
858 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
859 /* adde adde. addeo addeo. */
860 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
861 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
862 /* addme addme. addmeo addmeo. */
863 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
864 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
865 /* addze addze. addzeo addzeo.*/
866 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
867 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
868 /* addi */
869 static void gen_addi(DisasContext *ctx)
870 {
871 target_long simm = SIMM(ctx->opcode);
872
873 if (rA(ctx->opcode) == 0) {
874 /* li case */
875 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm);
876 } else {
877 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
878 cpu_gpr[rA(ctx->opcode)], simm);
879 }
880 }
881 /* addic addic.*/
882 static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
883 {
884 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
885 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
886 c, 0, 1, 0, compute_rc0);
887 tcg_temp_free(c);
888 }
889
890 static void gen_addic(DisasContext *ctx)
891 {
892 gen_op_addic(ctx, 0);
893 }
894
895 static void gen_addic_(DisasContext *ctx)
896 {
897 gen_op_addic(ctx, 1);
898 }
899
900 /* addis */
901 static void gen_addis(DisasContext *ctx)
902 {
903 target_long simm = SIMM(ctx->opcode);
904
905 if (rA(ctx->opcode) == 0) {
906 /* lis case */
907 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm << 16);
908 } else {
909 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
910 cpu_gpr[rA(ctx->opcode)], simm << 16);
911 }
912 }
913
914 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
915 TCGv arg2, int sign, int compute_ov)
916 {
917 int l1 = gen_new_label();
918 int l2 = gen_new_label();
919 TCGv_i32 t0 = tcg_temp_local_new_i32();
920 TCGv_i32 t1 = tcg_temp_local_new_i32();
921
922 tcg_gen_trunc_tl_i32(t0, arg1);
923 tcg_gen_trunc_tl_i32(t1, arg2);
924 tcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1);
925 if (sign) {
926 int l3 = gen_new_label();
927 tcg_gen_brcondi_i32(TCG_COND_NE, t1, -1, l3);
928 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, INT32_MIN, l1);
929 gen_set_label(l3);
930 tcg_gen_div_i32(t0, t0, t1);
931 } else {
932 tcg_gen_divu_i32(t0, t0, t1);
933 }
934 if (compute_ov) {
935 tcg_gen_movi_tl(cpu_ov, 0);
936 }
937 tcg_gen_br(l2);
938 gen_set_label(l1);
939 if (sign) {
940 tcg_gen_sari_i32(t0, t0, 31);
941 } else {
942 tcg_gen_movi_i32(t0, 0);
943 }
944 if (compute_ov) {
945 tcg_gen_movi_tl(cpu_ov, 1);
946 tcg_gen_movi_tl(cpu_so, 1);
947 }
948 gen_set_label(l2);
949 tcg_gen_extu_i32_tl(ret, t0);
950 tcg_temp_free_i32(t0);
951 tcg_temp_free_i32(t1);
952 if (unlikely(Rc(ctx->opcode) != 0))
953 gen_set_Rc0(ctx, ret);
954 }
955 /* Div functions */
956 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
957 static void glue(gen_, name)(DisasContext *ctx) \
958 { \
959 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
960 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
961 sign, compute_ov); \
962 }
963 /* divwu divwu. divwuo divwuo. */
964 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
965 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
966 /* divw divw. divwo divwo. */
967 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
968 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
969 #if defined(TARGET_PPC64)
970 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
971 TCGv arg2, int sign, int compute_ov)
972 {
973 int l1 = gen_new_label();
974 int l2 = gen_new_label();
975
976 tcg_gen_brcondi_i64(TCG_COND_EQ, arg2, 0, l1);
977 if (sign) {
978 int l3 = gen_new_label();
979 tcg_gen_brcondi_i64(TCG_COND_NE, arg2, -1, l3);
980 tcg_gen_brcondi_i64(TCG_COND_EQ, arg1, INT64_MIN, l1);
981 gen_set_label(l3);
982 tcg_gen_div_i64(ret, arg1, arg2);
983 } else {
984 tcg_gen_divu_i64(ret, arg1, arg2);
985 }
986 if (compute_ov) {
987 tcg_gen_movi_tl(cpu_ov, 0);
988 }
989 tcg_gen_br(l2);
990 gen_set_label(l1);
991 if (sign) {
992 tcg_gen_sari_i64(ret, arg1, 63);
993 } else {
994 tcg_gen_movi_i64(ret, 0);
995 }
996 if (compute_ov) {
997 tcg_gen_movi_tl(cpu_ov, 1);
998 tcg_gen_movi_tl(cpu_so, 1);
999 }
1000 gen_set_label(l2);
1001 if (unlikely(Rc(ctx->opcode) != 0))
1002 gen_set_Rc0(ctx, ret);
1003 }
1004 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1005 static void glue(gen_, name)(DisasContext *ctx) \
1006 { \
1007 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1008 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1009 sign, compute_ov); \
1010 }
1011 /* divwu divwu. divwuo divwuo. */
1012 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1013 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1014 /* divw divw. divwo divwo. */
1015 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1016 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1017 #endif
1018
1019 /* mulhw mulhw. */
1020 static void gen_mulhw(DisasContext *ctx)
1021 {
1022 TCGv_i32 t0 = tcg_temp_new_i32();
1023 TCGv_i32 t1 = tcg_temp_new_i32();
1024
1025 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1026 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1027 tcg_gen_muls2_i32(t0, t1, t0, t1);
1028 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1029 tcg_temp_free_i32(t0);
1030 tcg_temp_free_i32(t1);
1031 if (unlikely(Rc(ctx->opcode) != 0))
1032 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1033 }
1034
1035 /* mulhwu mulhwu. */
1036 static void gen_mulhwu(DisasContext *ctx)
1037 {
1038 TCGv_i32 t0 = tcg_temp_new_i32();
1039 TCGv_i32 t1 = tcg_temp_new_i32();
1040
1041 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1042 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1043 tcg_gen_mulu2_i32(t0, t1, t0, t1);
1044 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1045 tcg_temp_free_i32(t0);
1046 tcg_temp_free_i32(t1);
1047 if (unlikely(Rc(ctx->opcode) != 0))
1048 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1049 }
1050
1051 /* mullw mullw. */
1052 static void gen_mullw(DisasContext *ctx)
1053 {
1054 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1055 cpu_gpr[rB(ctx->opcode)]);
1056 tcg_gen_ext32s_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)]);
1057 if (unlikely(Rc(ctx->opcode) != 0))
1058 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1059 }
1060
1061 /* mullwo mullwo. */
1062 static void gen_mullwo(DisasContext *ctx)
1063 {
1064 TCGv_i32 t0 = tcg_temp_new_i32();
1065 TCGv_i32 t1 = tcg_temp_new_i32();
1066
1067 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1068 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1069 tcg_gen_muls2_i32(t0, t1, t0, t1);
1070 tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
1071
1072 tcg_gen_sari_i32(t0, t0, 31);
1073 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
1074 tcg_gen_extu_i32_tl(cpu_ov, t0);
1075 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1076
1077 tcg_temp_free_i32(t0);
1078 tcg_temp_free_i32(t1);
1079 if (unlikely(Rc(ctx->opcode) != 0))
1080 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1081 }
1082
1083 /* mulli */
1084 static void gen_mulli(DisasContext *ctx)
1085 {
1086 tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1087 SIMM(ctx->opcode));
1088 }
1089
1090 #if defined(TARGET_PPC64)
1091 /* mulhd mulhd. */
1092 static void gen_mulhd(DisasContext *ctx)
1093 {
1094 TCGv lo = tcg_temp_new();
1095 tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1096 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1097 tcg_temp_free(lo);
1098 if (unlikely(Rc(ctx->opcode) != 0)) {
1099 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1100 }
1101 }
1102
1103 /* mulhdu mulhdu. */
1104 static void gen_mulhdu(DisasContext *ctx)
1105 {
1106 TCGv lo = tcg_temp_new();
1107 tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1108 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1109 tcg_temp_free(lo);
1110 if (unlikely(Rc(ctx->opcode) != 0)) {
1111 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1112 }
1113 }
1114
1115 /* mulld mulld. */
1116 static void gen_mulld(DisasContext *ctx)
1117 {
1118 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1119 cpu_gpr[rB(ctx->opcode)]);
1120 if (unlikely(Rc(ctx->opcode) != 0))
1121 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1122 }
1123
1124 /* mulldo mulldo. */
1125 static void gen_mulldo(DisasContext *ctx)
1126 {
1127 gen_helper_mulldo(cpu_gpr[rD(ctx->opcode)], cpu_env,
1128 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1129 if (unlikely(Rc(ctx->opcode) != 0)) {
1130 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1131 }
1132 }
1133 #endif
1134
1135 /* Common subf function */
1136 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
1137 TCGv arg2, bool add_ca, bool compute_ca,
1138 bool compute_ov, bool compute_rc0)
1139 {
1140 TCGv t0 = ret;
1141
1142 if (compute_ca || compute_ov) {
1143 t0 = tcg_temp_new();
1144 }
1145
1146 if (compute_ca) {
1147 /* dest = ~arg1 + arg2 [+ ca]. */
1148 if (NARROW_MODE(ctx)) {
1149 /* Caution: a non-obvious corner case of the spec is that we
1150 must produce the *entire* 64-bit addition, but produce the
1151 carry into bit 32. */
1152 TCGv inv1 = tcg_temp_new();
1153 TCGv t1 = tcg_temp_new();
1154 tcg_gen_not_tl(inv1, arg1);
1155 if (add_ca) {
1156 tcg_gen_add_tl(t0, arg2, cpu_ca);
1157 } else {
1158 tcg_gen_addi_tl(t0, arg2, 1);
1159 }
1160 tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */
1161 tcg_gen_add_tl(t0, t0, inv1);
1162 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */
1163 tcg_temp_free(t1);
1164 tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
1165 tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
1166 } else if (add_ca) {
1167 TCGv zero, inv1 = tcg_temp_new();
1168 tcg_gen_not_tl(inv1, arg1);
1169 zero = tcg_const_tl(0);
1170 tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
1171 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
1172 tcg_temp_free(zero);
1173 tcg_temp_free(inv1);
1174 } else {
1175 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
1176 tcg_gen_sub_tl(t0, arg2, arg1);
1177 }
1178 } else if (add_ca) {
1179 /* Since we're ignoring carry-out, we can simplify the
1180 standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */
1181 tcg_gen_sub_tl(t0, arg2, arg1);
1182 tcg_gen_add_tl(t0, t0, cpu_ca);
1183 tcg_gen_subi_tl(t0, t0, 1);
1184 } else {
1185 tcg_gen_sub_tl(t0, arg2, arg1);
1186 }
1187
1188 if (compute_ov) {
1189 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
1190 }
1191 if (unlikely(compute_rc0)) {
1192 gen_set_Rc0(ctx, t0);
1193 }
1194
1195 if (!TCGV_EQUAL(t0, ret)) {
1196 tcg_gen_mov_tl(ret, t0);
1197 tcg_temp_free(t0);
1198 }
1199 }
1200 /* Sub functions with Two operands functions */
1201 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
1202 static void glue(gen_, name)(DisasContext *ctx) \
1203 { \
1204 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1205 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1206 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1207 }
1208 /* Sub functions with one operand and one immediate */
1209 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
1210 add_ca, compute_ca, compute_ov) \
1211 static void glue(gen_, name)(DisasContext *ctx) \
1212 { \
1213 TCGv t0 = tcg_const_tl(const_val); \
1214 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1215 cpu_gpr[rA(ctx->opcode)], t0, \
1216 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1217 tcg_temp_free(t0); \
1218 }
1219 /* subf subf. subfo subfo. */
1220 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
1221 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
1222 /* subfc subfc. subfco subfco. */
1223 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
1224 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
1225 /* subfe subfe. subfeo subfo. */
1226 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
1227 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
1228 /* subfme subfme. subfmeo subfmeo. */
1229 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
1230 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
1231 /* subfze subfze. subfzeo subfzeo.*/
1232 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
1233 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
1234
1235 /* subfic */
1236 static void gen_subfic(DisasContext *ctx)
1237 {
1238 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
1239 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1240 c, 0, 1, 0, 0);
1241 tcg_temp_free(c);
1242 }
1243
1244 /* neg neg. nego nego. */
1245 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
1246 {
1247 TCGv zero = tcg_const_tl(0);
1248 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1249 zero, 0, 0, compute_ov, Rc(ctx->opcode));
1250 tcg_temp_free(zero);
1251 }
1252
1253 static void gen_neg(DisasContext *ctx)
1254 {
1255 gen_op_arith_neg(ctx, 0);
1256 }
1257
1258 static void gen_nego(DisasContext *ctx)
1259 {
1260 gen_op_arith_neg(ctx, 1);
1261 }
1262
1263 /*** Integer logical ***/
1264 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
1265 static void glue(gen_, name)(DisasContext *ctx) \
1266 { \
1267 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
1268 cpu_gpr[rB(ctx->opcode)]); \
1269 if (unlikely(Rc(ctx->opcode) != 0)) \
1270 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1271 }
1272
1273 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
1274 static void glue(gen_, name)(DisasContext *ctx) \
1275 { \
1276 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
1277 if (unlikely(Rc(ctx->opcode) != 0)) \
1278 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1279 }
1280
1281 /* and & and. */
1282 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
1283 /* andc & andc. */
1284 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
1285
1286 /* andi. */
1287 static void gen_andi_(DisasContext *ctx)
1288 {
1289 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode));
1290 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1291 }
1292
1293 /* andis. */
1294 static void gen_andis_(DisasContext *ctx)
1295 {
1296 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16);
1297 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1298 }
1299
1300 /* cntlzw */
1301 static void gen_cntlzw(DisasContext *ctx)
1302 {
1303 gen_helper_cntlzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1304 if (unlikely(Rc(ctx->opcode) != 0))
1305 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1306 }
1307 /* eqv & eqv. */
1308 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
1309 /* extsb & extsb. */
1310 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
1311 /* extsh & extsh. */
1312 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
1313 /* nand & nand. */
1314 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
1315 /* nor & nor. */
1316 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
1317
1318 /* or & or. */
1319 static void gen_or(DisasContext *ctx)
1320 {
1321 int rs, ra, rb;
1322
1323 rs = rS(ctx->opcode);
1324 ra = rA(ctx->opcode);
1325 rb = rB(ctx->opcode);
1326 /* Optimisation for mr. ri case */
1327 if (rs != ra || rs != rb) {
1328 if (rs != rb)
1329 tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
1330 else
1331 tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
1332 if (unlikely(Rc(ctx->opcode) != 0))
1333 gen_set_Rc0(ctx, cpu_gpr[ra]);
1334 } else if (unlikely(Rc(ctx->opcode) != 0)) {
1335 gen_set_Rc0(ctx, cpu_gpr[rs]);
1336 #if defined(TARGET_PPC64)
1337 } else {
1338 int prio = 0;
1339
1340 switch (rs) {
1341 case 1:
1342 /* Set process priority to low */
1343 prio = 2;
1344 break;
1345 case 6:
1346 /* Set process priority to medium-low */
1347 prio = 3;
1348 break;
1349 case 2:
1350 /* Set process priority to normal */
1351 prio = 4;
1352 break;
1353 #if !defined(CONFIG_USER_ONLY)
1354 case 31:
1355 if (ctx->mem_idx > 0) {
1356 /* Set process priority to very low */
1357 prio = 1;
1358 }
1359 break;
1360 case 5:
1361 if (ctx->mem_idx > 0) {
1362 /* Set process priority to medium-hight */
1363 prio = 5;
1364 }
1365 break;
1366 case 3:
1367 if (ctx->mem_idx > 0) {
1368 /* Set process priority to high */
1369 prio = 6;
1370 }
1371 break;
1372 case 7:
1373 if (ctx->mem_idx > 1) {
1374 /* Set process priority to very high */
1375 prio = 7;
1376 }
1377 break;
1378 #endif
1379 default:
1380 /* nop */
1381 break;
1382 }
1383 if (prio) {
1384 TCGv t0 = tcg_temp_new();
1385 gen_load_spr(t0, SPR_PPR);
1386 tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
1387 tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
1388 gen_store_spr(SPR_PPR, t0);
1389 tcg_temp_free(t0);
1390 }
1391 #endif
1392 }
1393 }
1394 /* orc & orc. */
1395 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
1396
1397 /* xor & xor. */
1398 static void gen_xor(DisasContext *ctx)
1399 {
1400 /* Optimisation for "set to zero" case */
1401 if (rS(ctx->opcode) != rB(ctx->opcode))
1402 tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1403 else
1404 tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
1405 if (unlikely(Rc(ctx->opcode) != 0))
1406 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1407 }
1408
1409 /* ori */
1410 static void gen_ori(DisasContext *ctx)
1411 {
1412 target_ulong uimm = UIMM(ctx->opcode);
1413
1414 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1415 /* NOP */
1416 /* XXX: should handle special NOPs for POWER series */
1417 return;
1418 }
1419 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1420 }
1421
1422 /* oris */
1423 static void gen_oris(DisasContext *ctx)
1424 {
1425 target_ulong uimm = UIMM(ctx->opcode);
1426
1427 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1428 /* NOP */
1429 return;
1430 }
1431 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1432 }
1433
1434 /* xori */
1435 static void gen_xori(DisasContext *ctx)
1436 {
1437 target_ulong uimm = UIMM(ctx->opcode);
1438
1439 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1440 /* NOP */
1441 return;
1442 }
1443 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1444 }
1445
1446 /* xoris */
1447 static void gen_xoris(DisasContext *ctx)
1448 {
1449 target_ulong uimm = UIMM(ctx->opcode);
1450
1451 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1452 /* NOP */
1453 return;
1454 }
1455 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1456 }
1457
1458 /* popcntb : PowerPC 2.03 specification */
1459 static void gen_popcntb(DisasContext *ctx)
1460 {
1461 gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1462 }
1463
1464 static void gen_popcntw(DisasContext *ctx)
1465 {
1466 gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1467 }
1468
1469 #if defined(TARGET_PPC64)
1470 /* popcntd: PowerPC 2.06 specification */
1471 static void gen_popcntd(DisasContext *ctx)
1472 {
1473 gen_helper_popcntd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1474 }
1475 #endif
1476
1477 /* prtyw: PowerPC 2.05 specification */
1478 static void gen_prtyw(DisasContext *ctx)
1479 {
1480 TCGv ra = cpu_gpr[rA(ctx->opcode)];
1481 TCGv rs = cpu_gpr[rS(ctx->opcode)];
1482 TCGv t0 = tcg_temp_new();
1483 tcg_gen_shri_tl(t0, rs, 16);
1484 tcg_gen_xor_tl(ra, rs, t0);
1485 tcg_gen_shri_tl(t0, ra, 8);
1486 tcg_gen_xor_tl(ra, ra, t0);
1487 tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
1488 tcg_temp_free(t0);
1489 }
1490
1491 #if defined(TARGET_PPC64)
1492 /* prtyd: PowerPC 2.05 specification */
1493 static void gen_prtyd(DisasContext *ctx)
1494 {
1495 TCGv ra = cpu_gpr[rA(ctx->opcode)];
1496 TCGv rs = cpu_gpr[rS(ctx->opcode)];
1497 TCGv t0 = tcg_temp_new();
1498 tcg_gen_shri_tl(t0, rs, 32);
1499 tcg_gen_xor_tl(ra, rs, t0);
1500 tcg_gen_shri_tl(t0, ra, 16);
1501 tcg_gen_xor_tl(ra, ra, t0);
1502 tcg_gen_shri_tl(t0, ra, 8);
1503 tcg_gen_xor_tl(ra, ra, t0);
1504 tcg_gen_andi_tl(ra, ra, 1);
1505 tcg_temp_free(t0);
1506 }
1507 #endif
1508
1509 #if defined(TARGET_PPC64)
1510 /* extsw & extsw. */
1511 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
1512
1513 /* cntlzd */
1514 static void gen_cntlzd(DisasContext *ctx)
1515 {
1516 gen_helper_cntlzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1517 if (unlikely(Rc(ctx->opcode) != 0))
1518 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1519 }
1520 #endif
1521
1522 /*** Integer rotate ***/
1523
1524 /* rlwimi & rlwimi. */
1525 static void gen_rlwimi(DisasContext *ctx)
1526 {
1527 uint32_t mb, me, sh;
1528
1529 mb = MB(ctx->opcode);
1530 me = ME(ctx->opcode);
1531 sh = SH(ctx->opcode);
1532 if (likely(sh == 0 && mb == 0 && me == 31)) {
1533 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1534 } else {
1535 target_ulong mask;
1536 TCGv t1;
1537 TCGv t0 = tcg_temp_new();
1538 #if defined(TARGET_PPC64)
1539 TCGv_i32 t2 = tcg_temp_new_i32();
1540 tcg_gen_trunc_i64_i32(t2, cpu_gpr[rS(ctx->opcode)]);
1541 tcg_gen_rotli_i32(t2, t2, sh);
1542 tcg_gen_extu_i32_i64(t0, t2);
1543 tcg_temp_free_i32(t2);
1544 #else
1545 tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
1546 #endif
1547 #if defined(TARGET_PPC64)
1548 mb += 32;
1549 me += 32;
1550 #endif
1551 mask = MASK(mb, me);
1552 t1 = tcg_temp_new();
1553 tcg_gen_andi_tl(t0, t0, mask);
1554 tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
1555 tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1556 tcg_temp_free(t0);
1557 tcg_temp_free(t1);
1558 }
1559 if (unlikely(Rc(ctx->opcode) != 0))
1560 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1561 }
1562
1563 /* rlwinm & rlwinm. */
1564 static void gen_rlwinm(DisasContext *ctx)
1565 {
1566 uint32_t mb, me, sh;
1567
1568 sh = SH(ctx->opcode);
1569 mb = MB(ctx->opcode);
1570 me = ME(ctx->opcode);
1571
1572 if (likely(mb == 0 && me == (31 - sh))) {
1573 if (likely(sh == 0)) {
1574 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1575 } else {
1576 TCGv t0 = tcg_temp_new();
1577 tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
1578 tcg_gen_shli_tl(t0, t0, sh);
1579 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
1580 tcg_temp_free(t0);
1581 }
1582 } else if (likely(sh != 0 && me == 31 && sh == (32 - mb))) {
1583 TCGv t0 = tcg_temp_new();
1584 tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
1585 tcg_gen_shri_tl(t0, t0, mb);
1586 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
1587 tcg_temp_free(t0);
1588 } else {
1589 TCGv t0 = tcg_temp_new();
1590 #if defined(TARGET_PPC64)
1591 TCGv_i32 t1 = tcg_temp_new_i32();
1592 tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]);
1593 tcg_gen_rotli_i32(t1, t1, sh);
1594 tcg_gen_extu_i32_i64(t0, t1);
1595 tcg_temp_free_i32(t1);
1596 #else
1597 tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
1598 #endif
1599 #if defined(TARGET_PPC64)
1600 mb += 32;
1601 me += 32;
1602 #endif
1603 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1604 tcg_temp_free(t0);
1605 }
1606 if (unlikely(Rc(ctx->opcode) != 0))
1607 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1608 }
1609
1610 /* rlwnm & rlwnm. */
1611 static void gen_rlwnm(DisasContext *ctx)
1612 {
1613 uint32_t mb, me;
1614 TCGv t0;
1615 #if defined(TARGET_PPC64)
1616 TCGv_i32 t1, t2;
1617 #endif
1618
1619 mb = MB(ctx->opcode);
1620 me = ME(ctx->opcode);
1621 t0 = tcg_temp_new();
1622 tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1f);
1623 #if defined(TARGET_PPC64)
1624 t1 = tcg_temp_new_i32();
1625 t2 = tcg_temp_new_i32();
1626 tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]);
1627 tcg_gen_trunc_i64_i32(t2, t0);
1628 tcg_gen_rotl_i32(t1, t1, t2);
1629 tcg_gen_extu_i32_i64(t0, t1);
1630 tcg_temp_free_i32(t1);
1631 tcg_temp_free_i32(t2);
1632 #else
1633 tcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0);
1634 #endif
1635 if (unlikely(mb != 0 || me != 31)) {
1636 #if defined(TARGET_PPC64)
1637 mb += 32;
1638 me += 32;
1639 #endif
1640 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1641 } else {
1642 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
1643 }
1644 tcg_temp_free(t0);
1645 if (unlikely(Rc(ctx->opcode) != 0))
1646 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1647 }
1648
1649 #if defined(TARGET_PPC64)
1650 #define GEN_PPC64_R2(name, opc1, opc2) \
1651 static void glue(gen_, name##0)(DisasContext *ctx) \
1652 { \
1653 gen_##name(ctx, 0); \
1654 } \
1655 \
1656 static void glue(gen_, name##1)(DisasContext *ctx) \
1657 { \
1658 gen_##name(ctx, 1); \
1659 }
1660 #define GEN_PPC64_R4(name, opc1, opc2) \
1661 static void glue(gen_, name##0)(DisasContext *ctx) \
1662 { \
1663 gen_##name(ctx, 0, 0); \
1664 } \
1665 \
1666 static void glue(gen_, name##1)(DisasContext *ctx) \
1667 { \
1668 gen_##name(ctx, 0, 1); \
1669 } \
1670 \
1671 static void glue(gen_, name##2)(DisasContext *ctx) \
1672 { \
1673 gen_##name(ctx, 1, 0); \
1674 } \
1675 \
1676 static void glue(gen_, name##3)(DisasContext *ctx) \
1677 { \
1678 gen_##name(ctx, 1, 1); \
1679 }
1680
1681 static inline void gen_rldinm(DisasContext *ctx, uint32_t mb, uint32_t me,
1682 uint32_t sh)
1683 {
1684 if (likely(sh != 0 && mb == 0 && me == (63 - sh))) {
1685 tcg_gen_shli_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
1686 } else if (likely(sh != 0 && me == 63 && sh == (64 - mb))) {
1687 tcg_gen_shri_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], mb);
1688 } else {
1689 TCGv t0 = tcg_temp_new();
1690 tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
1691 if (likely(mb == 0 && me == 63)) {
1692 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
1693 } else {
1694 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1695 }
1696 tcg_temp_free(t0);
1697 }
1698 if (unlikely(Rc(ctx->opcode) != 0))
1699 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1700 }
1701 /* rldicl - rldicl. */
1702 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
1703 {
1704 uint32_t sh, mb;
1705
1706 sh = SH(ctx->opcode) | (shn << 5);
1707 mb = MB(ctx->opcode) | (mbn << 5);
1708 gen_rldinm(ctx, mb, 63, sh);
1709 }
1710 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
1711 /* rldicr - rldicr. */
1712 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
1713 {
1714 uint32_t sh, me;
1715
1716 sh = SH(ctx->opcode) | (shn << 5);
1717 me = MB(ctx->opcode) | (men << 5);
1718 gen_rldinm(ctx, 0, me, sh);
1719 }
1720 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
1721 /* rldic - rldic. */
1722 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
1723 {
1724 uint32_t sh, mb;
1725
1726 sh = SH(ctx->opcode) | (shn << 5);
1727 mb = MB(ctx->opcode) | (mbn << 5);
1728 gen_rldinm(ctx, mb, 63 - sh, sh);
1729 }
1730 GEN_PPC64_R4(rldic, 0x1E, 0x04);
1731
1732 static inline void gen_rldnm(DisasContext *ctx, uint32_t mb, uint32_t me)
1733 {
1734 TCGv t0;
1735
1736 t0 = tcg_temp_new();
1737 tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3f);
1738 tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1739 if (unlikely(mb != 0 || me != 63)) {
1740 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
1741 } else {
1742 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
1743 }
1744 tcg_temp_free(t0);
1745 if (unlikely(Rc(ctx->opcode) != 0))
1746 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1747 }
1748
1749 /* rldcl - rldcl. */
1750 static inline void gen_rldcl(DisasContext *ctx, int mbn)
1751 {
1752 uint32_t mb;
1753
1754 mb = MB(ctx->opcode) | (mbn << 5);
1755 gen_rldnm(ctx, mb, 63);
1756 }
1757 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
1758 /* rldcr - rldcr. */
1759 static inline void gen_rldcr(DisasContext *ctx, int men)
1760 {
1761 uint32_t me;
1762
1763 me = MB(ctx->opcode) | (men << 5);
1764 gen_rldnm(ctx, 0, me);
1765 }
1766 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
1767 /* rldimi - rldimi. */
1768 static inline void gen_rldimi(DisasContext *ctx, int mbn, int shn)
1769 {
1770 uint32_t sh, mb, me;
1771
1772 sh = SH(ctx->opcode) | (shn << 5);
1773 mb = MB(ctx->opcode) | (mbn << 5);
1774 me = 63 - sh;
1775 if (unlikely(sh == 0 && mb == 0)) {
1776 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1777 } else {
1778 TCGv t0, t1;
1779 target_ulong mask;
1780
1781 t0 = tcg_temp_new();
1782 tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
1783 t1 = tcg_temp_new();
1784 mask = MASK(mb, me);
1785 tcg_gen_andi_tl(t0, t0, mask);
1786 tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
1787 tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1788 tcg_temp_free(t0);
1789 tcg_temp_free(t1);
1790 }
1791 if (unlikely(Rc(ctx->opcode) != 0))
1792 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1793 }
1794 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
1795 #endif
1796
1797 /*** Integer shift ***/
1798
1799 /* slw & slw. */
1800 static void gen_slw(DisasContext *ctx)
1801 {
1802 TCGv t0, t1;
1803
1804 t0 = tcg_temp_new();
1805 /* AND rS with a mask that is 0 when rB >= 0x20 */
1806 #if defined(TARGET_PPC64)
1807 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
1808 tcg_gen_sari_tl(t0, t0, 0x3f);
1809 #else
1810 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
1811 tcg_gen_sari_tl(t0, t0, 0x1f);
1812 #endif
1813 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1814 t1 = tcg_temp_new();
1815 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
1816 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1817 tcg_temp_free(t1);
1818 tcg_temp_free(t0);
1819 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
1820 if (unlikely(Rc(ctx->opcode) != 0))
1821 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1822 }
1823
1824 /* sraw & sraw. */
1825 static void gen_sraw(DisasContext *ctx)
1826 {
1827 gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
1828 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1829 if (unlikely(Rc(ctx->opcode) != 0))
1830 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1831 }
1832
1833 /* srawi & srawi. */
1834 static void gen_srawi(DisasContext *ctx)
1835 {
1836 int sh = SH(ctx->opcode);
1837 TCGv dst = cpu_gpr[rA(ctx->opcode)];
1838 TCGv src = cpu_gpr[rS(ctx->opcode)];
1839 if (sh == 0) {
1840 tcg_gen_mov_tl(dst, src);
1841 tcg_gen_movi_tl(cpu_ca, 0);
1842 } else {
1843 TCGv t0;
1844 tcg_gen_ext32s_tl(dst, src);
1845 tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
1846 t0 = tcg_temp_new();
1847 tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
1848 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
1849 tcg_temp_free(t0);
1850 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
1851 tcg_gen_sari_tl(dst, dst, sh);
1852 }
1853 if (unlikely(Rc(ctx->opcode) != 0)) {
1854 gen_set_Rc0(ctx, dst);
1855 }
1856 }
1857
1858 /* srw & srw. */
1859 static void gen_srw(DisasContext *ctx)
1860 {
1861 TCGv t0, t1;
1862
1863 t0 = tcg_temp_new();
1864 /* AND rS with a mask that is 0 when rB >= 0x20 */
1865 #if defined(TARGET_PPC64)
1866 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
1867 tcg_gen_sari_tl(t0, t0, 0x3f);
1868 #else
1869 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
1870 tcg_gen_sari_tl(t0, t0, 0x1f);
1871 #endif
1872 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1873 tcg_gen_ext32u_tl(t0, t0);
1874 t1 = tcg_temp_new();
1875 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
1876 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1877 tcg_temp_free(t1);
1878 tcg_temp_free(t0);
1879 if (unlikely(Rc(ctx->opcode) != 0))
1880 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1881 }
1882
1883 #if defined(TARGET_PPC64)
1884 /* sld & sld. */
1885 static void gen_sld(DisasContext *ctx)
1886 {
1887 TCGv t0, t1;
1888
1889 t0 = tcg_temp_new();
1890 /* AND rS with a mask that is 0 when rB >= 0x40 */
1891 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
1892 tcg_gen_sari_tl(t0, t0, 0x3f);
1893 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1894 t1 = tcg_temp_new();
1895 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
1896 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1897 tcg_temp_free(t1);
1898 tcg_temp_free(t0);
1899 if (unlikely(Rc(ctx->opcode) != 0))
1900 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1901 }
1902
1903 /* srad & srad. */
1904 static void gen_srad(DisasContext *ctx)
1905 {
1906 gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
1907 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1908 if (unlikely(Rc(ctx->opcode) != 0))
1909 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1910 }
1911 /* sradi & sradi. */
1912 static inline void gen_sradi(DisasContext *ctx, int n)
1913 {
1914 int sh = SH(ctx->opcode) + (n << 5);
1915 TCGv dst = cpu_gpr[rA(ctx->opcode)];
1916 TCGv src = cpu_gpr[rS(ctx->opcode)];
1917 if (sh == 0) {
1918 tcg_gen_mov_tl(dst, src);
1919 tcg_gen_movi_tl(cpu_ca, 0);
1920 } else {
1921 TCGv t0;
1922 tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
1923 t0 = tcg_temp_new();
1924 tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
1925 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
1926 tcg_temp_free(t0);
1927 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
1928 tcg_gen_sari_tl(dst, src, sh);
1929 }
1930 if (unlikely(Rc(ctx->opcode) != 0)) {
1931 gen_set_Rc0(ctx, dst);
1932 }
1933 }
1934
1935 static void gen_sradi0(DisasContext *ctx)
1936 {
1937 gen_sradi(ctx, 0);
1938 }
1939
1940 static void gen_sradi1(DisasContext *ctx)
1941 {
1942 gen_sradi(ctx, 1);
1943 }
1944
1945 /* srd & srd. */
1946 static void gen_srd(DisasContext *ctx)
1947 {
1948 TCGv t0, t1;
1949
1950 t0 = tcg_temp_new();
1951 /* AND rS with a mask that is 0 when rB >= 0x40 */
1952 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
1953 tcg_gen_sari_tl(t0, t0, 0x3f);
1954 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
1955 t1 = tcg_temp_new();
1956 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
1957 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
1958 tcg_temp_free(t1);
1959 tcg_temp_free(t0);
1960 if (unlikely(Rc(ctx->opcode) != 0))
1961 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1962 }
1963 #endif
1964
1965 /*** Floating-Point arithmetic ***/
1966 #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
1967 static void gen_f##name(DisasContext *ctx) \
1968 { \
1969 if (unlikely(!ctx->fpu_enabled)) { \
1970 gen_exception(ctx, POWERPC_EXCP_FPU); \
1971 return; \
1972 } \
1973 /* NIP cannot be restored if the memory exception comes from an helper */ \
1974 gen_update_nip(ctx, ctx->nip - 4); \
1975 gen_reset_fpstatus(); \
1976 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
1977 cpu_fpr[rA(ctx->opcode)], \
1978 cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
1979 if (isfloat) { \
1980 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
1981 cpu_fpr[rD(ctx->opcode)]); \
1982 } \
1983 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], set_fprf, \
1984 Rc(ctx->opcode) != 0); \
1985 }
1986
1987 #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
1988 _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
1989 _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
1990
1991 #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
1992 static void gen_f##name(DisasContext *ctx) \
1993 { \
1994 if (unlikely(!ctx->fpu_enabled)) { \
1995 gen_exception(ctx, POWERPC_EXCP_FPU); \
1996 return; \
1997 } \
1998 /* NIP cannot be restored if the memory exception comes from an helper */ \
1999 gen_update_nip(ctx, ctx->nip - 4); \
2000 gen_reset_fpstatus(); \
2001 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2002 cpu_fpr[rA(ctx->opcode)], \
2003 cpu_fpr[rB(ctx->opcode)]); \
2004 if (isfloat) { \
2005 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2006 cpu_fpr[rD(ctx->opcode)]); \
2007 } \
2008 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2009 set_fprf, Rc(ctx->opcode) != 0); \
2010 }
2011 #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
2012 _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2013 _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2014
2015 #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
2016 static void gen_f##name(DisasContext *ctx) \
2017 { \
2018 if (unlikely(!ctx->fpu_enabled)) { \
2019 gen_exception(ctx, POWERPC_EXCP_FPU); \
2020 return; \
2021 } \
2022 /* NIP cannot be restored if the memory exception comes from an helper */ \
2023 gen_update_nip(ctx, ctx->nip - 4); \
2024 gen_reset_fpstatus(); \
2025 gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2026 cpu_fpr[rA(ctx->opcode)], \
2027 cpu_fpr[rC(ctx->opcode)]); \
2028 if (isfloat) { \
2029 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2030 cpu_fpr[rD(ctx->opcode)]); \
2031 } \
2032 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2033 set_fprf, Rc(ctx->opcode) != 0); \
2034 }
2035 #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
2036 _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
2037 _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
2038
2039 #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
2040 static void gen_f##name(DisasContext *ctx) \
2041 { \
2042 if (unlikely(!ctx->fpu_enabled)) { \
2043 gen_exception(ctx, POWERPC_EXCP_FPU); \
2044 return; \
2045 } \
2046 /* NIP cannot be restored if the memory exception comes from an helper */ \
2047 gen_update_nip(ctx, ctx->nip - 4); \
2048 gen_reset_fpstatus(); \
2049 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2050 cpu_fpr[rB(ctx->opcode)]); \
2051 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2052 set_fprf, Rc(ctx->opcode) != 0); \
2053 }
2054
2055 #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
2056 static void gen_f##name(DisasContext *ctx) \
2057 { \
2058 if (unlikely(!ctx->fpu_enabled)) { \
2059 gen_exception(ctx, POWERPC_EXCP_FPU); \
2060 return; \
2061 } \
2062 /* NIP cannot be restored if the memory exception comes from an helper */ \
2063 gen_update_nip(ctx, ctx->nip - 4); \
2064 gen_reset_fpstatus(); \
2065 gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
2066 cpu_fpr[rB(ctx->opcode)]); \
2067 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
2068 set_fprf, Rc(ctx->opcode) != 0); \
2069 }
2070
2071 /* fadd - fadds */
2072 GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT);
2073 /* fdiv - fdivs */
2074 GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT);
2075 /* fmul - fmuls */
2076 GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT);
2077
2078 /* fre */
2079 GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT);
2080
2081 /* fres */
2082 GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES);
2083
2084 /* frsqrte */
2085 GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE);
2086
2087 /* frsqrtes */
2088 static void gen_frsqrtes(DisasContext *ctx)
2089 {
2090 if (unlikely(!ctx->fpu_enabled)) {
2091 gen_exception(ctx, POWERPC_EXCP_FPU);
2092 return;
2093 }
2094 /* NIP cannot be restored if the memory exception comes from an helper */
2095 gen_update_nip(ctx, ctx->nip - 4);
2096 gen_reset_fpstatus();
2097 gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_env,
2098 cpu_fpr[rB(ctx->opcode)]);
2099 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
2100 cpu_fpr[rD(ctx->opcode)]);
2101 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
2102 }
2103
2104 /* fsel */
2105 _GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
2106 /* fsub - fsubs */
2107 GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
2108 /* Optional: */
2109
2110 /* fsqrt */
2111 static void gen_fsqrt(DisasContext *ctx)
2112 {
2113 if (unlikely(!ctx->fpu_enabled)) {
2114 gen_exception(ctx, POWERPC_EXCP_FPU);
2115 return;
2116 }
2117 /* NIP cannot be restored if the memory exception comes from an helper */
2118 gen_update_nip(ctx, ctx->nip - 4);
2119 gen_reset_fpstatus();
2120 gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
2121 cpu_fpr[rB(ctx->opcode)]);
2122 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
2123 }
2124
2125 static void gen_fsqrts(DisasContext *ctx)
2126 {
2127 if (unlikely(!ctx->fpu_enabled)) {
2128 gen_exception(ctx, POWERPC_EXCP_FPU);
2129 return;
2130 }
2131 /* NIP cannot be restored if the memory exception comes from an helper */
2132 gen_update_nip(ctx, ctx->nip - 4);
2133 gen_reset_fpstatus();
2134 gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
2135 cpu_fpr[rB(ctx->opcode)]);
2136 gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
2137 cpu_fpr[rD(ctx->opcode)]);
2138 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
2139 }
2140
2141 /*** Floating-Point multiply-and-add ***/
2142 /* fmadd - fmadds */
2143 GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
2144 /* fmsub - fmsubs */
2145 GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT);
2146 /* fnmadd - fnmadds */
2147 GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT);
2148 /* fnmsub - fnmsubs */
2149 GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT);
2150
2151 /*** Floating-Point round & convert ***/
2152 /* fctiw */
2153 GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT);
2154 /* fctiwz */
2155 GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT);
2156 /* frsp */
2157 GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT);
2158 #if defined(TARGET_PPC64)
2159 /* fcfid */
2160 GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC_64B);
2161 /* fctid */
2162 GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC_64B);
2163 /* fctidz */
2164 GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC_64B);
2165 #endif
2166
2167 /* frin */
2168 GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT);
2169 /* friz */
2170 GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT);
2171 /* frip */
2172 GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT);
2173 /* frim */
2174 GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT);
2175
2176 /*** Floating-Point compare ***/
2177
2178 /* fcmpo */
2179 static void gen_fcmpo(DisasContext *ctx)
2180 {
2181 TCGv_i32 crf;
2182 if (unlikely(!ctx->fpu_enabled)) {
2183 gen_exception(ctx, POWERPC_EXCP_FPU);
2184 return;
2185 }
2186 /* NIP cannot be restored if the memory exception comes from an helper */
2187 gen_update_nip(ctx, ctx->nip - 4);
2188 gen_reset_fpstatus();
2189 crf = tcg_const_i32(crfD(ctx->opcode));
2190 gen_helper_fcmpo(cpu_env, cpu_fpr[rA(ctx->opcode)],
2191 cpu_fpr[rB(ctx->opcode)], crf);
2192 tcg_temp_free_i32(crf);
2193 gen_helper_float_check_status(cpu_env);
2194 }
2195
2196 /* fcmpu */
2197 static void gen_fcmpu(DisasContext *ctx)
2198 {
2199 TCGv_i32 crf;
2200 if (unlikely(!ctx->fpu_enabled)) {
2201 gen_exception(ctx, POWERPC_EXCP_FPU);
2202 return;
2203 }
2204 /* NIP cannot be restored if the memory exception comes from an helper */
2205 gen_update_nip(ctx, ctx->nip - 4);
2206 gen_reset_fpstatus();
2207 crf = tcg_const_i32(crfD(ctx->opcode));
2208 gen_helper_fcmpu(cpu_env, cpu_fpr[rA(ctx->opcode)],
2209 cpu_fpr[rB(ctx->opcode)], crf);
2210 tcg_temp_free_i32(crf);
2211 gen_helper_float_check_status(cpu_env);
2212 }
2213
2214 /*** Floating-point move ***/
2215 /* fabs */
2216 /* XXX: beware that fabs never checks for NaNs nor update FPSCR */
2217 static void gen_fabs(DisasContext *ctx)
2218 {
2219 if (unlikely(!ctx->fpu_enabled)) {
2220 gen_exception(ctx, POWERPC_EXCP_FPU);
2221 return;
2222 }
2223 tcg_gen_andi_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
2224 ~(1ULL << 63));
2225 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2226 }
2227
2228 /* fmr - fmr. */
2229 /* XXX: beware that fmr never checks for NaNs nor update FPSCR */
2230 static void gen_fmr(DisasContext *ctx)
2231 {
2232 if (unlikely(!ctx->fpu_enabled)) {
2233 gen_exception(ctx, POWERPC_EXCP_FPU);
2234 return;
2235 }
2236 tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
2237 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2238 }
2239
2240 /* fnabs */
2241 /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
2242 static void gen_fnabs(DisasContext *ctx)
2243 {
2244 if (unlikely(!ctx->fpu_enabled)) {
2245 gen_exception(ctx, POWERPC_EXCP_FPU);
2246 return;
2247 }
2248 tcg_gen_ori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
2249 1ULL << 63);
2250 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2251 }
2252
2253 /* fneg */
2254 /* XXX: beware that fneg never checks for NaNs nor update FPSCR */
2255 static void gen_fneg(DisasContext *ctx)
2256 {
2257 if (unlikely(!ctx->fpu_enabled)) {
2258 gen_exception(ctx, POWERPC_EXCP_FPU);
2259 return;
2260 }
2261 tcg_gen_xori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
2262 1ULL << 63);
2263 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2264 }
2265
2266 /* fcpsgn: PowerPC 2.05 specification */
2267 /* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */
2268 static void gen_fcpsgn(DisasContext *ctx)
2269 {
2270 if (unlikely(!ctx->fpu_enabled)) {
2271 gen_exception(ctx, POWERPC_EXCP_FPU);
2272 return;
2273 }
2274 tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
2275 cpu_fpr[rB(ctx->opcode)], 0, 63);
2276 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2277 }
2278
2279 /*** Floating-Point status & ctrl register ***/
2280
2281 /* mcrfs */
2282 static void gen_mcrfs(DisasContext *ctx)
2283 {
2284 TCGv tmp = tcg_temp_new();
2285 int bfa;
2286
2287 if (unlikely(!ctx->fpu_enabled)) {
2288 gen_exception(ctx, POWERPC_EXCP_FPU);
2289 return;
2290 }
2291 bfa = 4 * (7 - crfS(ctx->opcode));
2292 tcg_gen_shri_tl(tmp, cpu_fpscr, bfa);
2293 tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
2294 tcg_temp_free(tmp);
2295 tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
2296 tcg_gen_andi_tl(cpu_fpscr, cpu_fpscr, ~(0xF << bfa));
2297 }
2298
2299 /* mffs */
2300 static void gen_mffs(DisasContext *ctx)
2301 {
2302 if (unlikely(!ctx->fpu_enabled)) {
2303 gen_exception(ctx, POWERPC_EXCP_FPU);
2304 return;
2305 }
2306 gen_reset_fpstatus();
2307 tcg_gen_extu_tl_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpscr);
2308 gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
2309 }
2310
2311 /* mtfsb0 */
2312 static void gen_mtfsb0(DisasContext *ctx)
2313 {
2314 uint8_t crb;
2315
2316 if (unlikely(!ctx->fpu_enabled)) {
2317 gen_exception(ctx, POWERPC_EXCP_FPU);
2318 return;
2319 }
2320 crb = 31 - crbD(ctx->opcode);
2321 gen_reset_fpstatus();
2322 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
2323 TCGv_i32 t0;
2324 /* NIP cannot be restored if the memory exception comes from an helper */
2325 gen_update_nip(ctx, ctx->nip - 4);
2326 t0 = tcg_const_i32(crb);
2327 gen_helper_fpscr_clrbit(cpu_env, t0);
2328 tcg_temp_free_i32(t0);
2329 }
2330 if (unlikely(Rc(ctx->opcode) != 0)) {
2331 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2332 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2333 }
2334 }
2335
2336 /* mtfsb1 */
2337 static void gen_mtfsb1(DisasContext *ctx)
2338 {
2339 uint8_t crb;
2340
2341 if (unlikely(!ctx->fpu_enabled)) {
2342 gen_exception(ctx, POWERPC_EXCP_FPU);
2343 return;
2344 }
2345 crb = 31 - crbD(ctx->opcode);
2346 gen_reset_fpstatus();
2347 /* XXX: we pretend we can only do IEEE floating-point computations */
2348 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
2349 TCGv_i32 t0;
2350 /* NIP cannot be restored if the memory exception comes from an helper */
2351 gen_update_nip(ctx, ctx->nip - 4);
2352 t0 = tcg_const_i32(crb);
2353 gen_helper_fpscr_setbit(cpu_env, t0);
2354 tcg_temp_free_i32(t0);
2355 }
2356 if (unlikely(Rc(ctx->opcode) != 0)) {
2357 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2358 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2359 }
2360 /* We can raise a differed exception */
2361 gen_helper_float_check_status(cpu_env);
2362 }
2363
2364 /* mtfsf */
2365 static void gen_mtfsf(DisasContext *ctx)
2366 {
2367 TCGv_i32 t0;
2368 int flm, l, w;
2369
2370 if (unlikely(!ctx->fpu_enabled)) {
2371 gen_exception(ctx, POWERPC_EXCP_FPU);
2372 return;
2373 }
2374 flm = FPFLM(ctx->opcode);
2375 l = FPL(ctx->opcode);
2376 w = FPW(ctx->opcode);
2377 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
2378 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2379 return;
2380 }
2381 /* NIP cannot be restored if the memory exception comes from an helper */
2382 gen_update_nip(ctx, ctx->nip - 4);
2383 gen_reset_fpstatus();
2384 if (l) {
2385 t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff);
2386 } else {
2387 t0 = tcg_const_i32(flm << (w * 8));
2388 }
2389 gen_helper_store_fpscr(cpu_env, cpu_fpr[rB(ctx->opcode)], t0);
2390 tcg_temp_free_i32(t0);
2391 if (unlikely(Rc(ctx->opcode) != 0)) {
2392 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2393 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2394 }
2395 /* We can raise a differed exception */
2396 gen_helper_float_check_status(cpu_env);
2397 }
2398
2399 /* mtfsfi */
2400 static void gen_mtfsfi(DisasContext *ctx)
2401 {
2402 int bf, sh, w;
2403 TCGv_i64 t0;
2404 TCGv_i32 t1;
2405
2406 if (unlikely(!ctx->fpu_enabled)) {
2407 gen_exception(ctx, POWERPC_EXCP_FPU);
2408 return;
2409 }
2410 w = FPW(ctx->opcode);
2411 bf = FPBF(ctx->opcode);
2412 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
2413 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2414 return;
2415 }
2416 sh = (8 * w) + 7 - bf;
2417 /* NIP cannot be restored if the memory exception comes from an helper */
2418 gen_update_nip(ctx, ctx->nip - 4);
2419 gen_reset_fpstatus();
2420 t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
2421 t1 = tcg_const_i32(1 << sh);
2422 gen_helper_store_fpscr(cpu_env, t0, t1);
2423 tcg_temp_free_i64(t0);
2424 tcg_temp_free_i32(t1);
2425 if (unlikely(Rc(ctx->opcode) != 0)) {
2426 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
2427 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
2428 }
2429 /* We can raise a differed exception */
2430 gen_helper_float_check_status(cpu_env);
2431 }
2432
2433 /*** Addressing modes ***/
2434 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2435 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
2436 target_long maskl)
2437 {
2438 target_long simm = SIMM(ctx->opcode);
2439
2440 simm &= ~maskl;
2441 if (rA(ctx->opcode) == 0) {
2442 if (NARROW_MODE(ctx)) {
2443 simm = (uint32_t)simm;
2444 }
2445 tcg_gen_movi_tl(EA, simm);
2446 } else if (likely(simm != 0)) {
2447 tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
2448 if (NARROW_MODE(ctx)) {
2449 tcg_gen_ext32u_tl(EA, EA);
2450 }
2451 } else {
2452 if (NARROW_MODE(ctx)) {
2453 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2454 } else {
2455 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2456 }
2457 }
2458 }
2459
2460 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
2461 {
2462 if (rA(ctx->opcode) == 0) {
2463 if (NARROW_MODE(ctx)) {
2464 tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2465 } else {
2466 tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2467 }
2468 } else {
2469 tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2470 if (NARROW_MODE(ctx)) {
2471 tcg_gen_ext32u_tl(EA, EA);
2472 }
2473 }
2474 }
2475
2476 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
2477 {
2478 if (rA(ctx->opcode) == 0) {
2479 tcg_gen_movi_tl(EA, 0);
2480 } else if (NARROW_MODE(ctx)) {
2481 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2482 } else {
2483 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2484 }
2485 }
2486
2487 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
2488 target_long val)
2489 {
2490 tcg_gen_addi_tl(ret, arg1, val);
2491 if (NARROW_MODE(ctx)) {
2492 tcg_gen_ext32u_tl(ret, ret);
2493 }
2494 }
2495
2496 static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask)
2497 {
2498 int l1 = gen_new_label();
2499 TCGv t0 = tcg_temp_new();
2500 TCGv_i32 t1, t2;
2501 /* NIP cannot be restored if the memory exception comes from an helper */
2502 gen_update_nip(ctx, ctx->nip - 4);
2503 tcg_gen_andi_tl(t0, EA, mask);
2504 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
2505 t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
2506 t2 = tcg_const_i32(0);
2507 gen_helper_raise_exception_err(cpu_env, t1, t2);
2508 tcg_temp_free_i32(t1);
2509 tcg_temp_free_i32(t2);
2510 gen_set_label(l1);
2511 tcg_temp_free(t0);
2512 }
2513
2514 /*** Integer load ***/
2515 static inline void gen_qemu_ld8u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2516 {
2517 tcg_gen_qemu_ld8u(arg1, arg2, ctx->mem_idx);
2518 }
2519
2520 static inline void gen_qemu_ld8s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2521 {
2522 tcg_gen_qemu_ld8s(arg1, arg2, ctx->mem_idx);
2523 }
2524
2525 static inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2526 {
2527 tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
2528 if (unlikely(ctx->le_mode)) {
2529 tcg_gen_bswap16_tl(arg1, arg1);
2530 }
2531 }
2532
2533 static inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2534 {
2535 if (unlikely(ctx->le_mode)) {
2536 tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
2537 tcg_gen_bswap16_tl(arg1, arg1);
2538 tcg_gen_ext16s_tl(arg1, arg1);
2539 } else {
2540 tcg_gen_qemu_ld16s(arg1, arg2, ctx->mem_idx);
2541 }
2542 }
2543
2544 static inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2)
2545 {
2546 tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
2547 if (unlikely(ctx->le_mode)) {
2548 tcg_gen_bswap32_tl(arg1, arg1);
2549 }
2550 }
2551
2552 static inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2)
2553 {
2554 if (unlikely(ctx->le_mode)) {
2555 tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
2556 tcg_gen_bswap32_tl(arg1, arg1);
2557 tcg_gen_ext32s_tl(arg1, arg1);
2558 } else
2559 tcg_gen_qemu_ld32s(arg1, arg2, ctx->mem_idx);
2560 }
2561
2562 static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
2563 {
2564 tcg_gen_qemu_ld64(arg1, arg2, ctx->mem_idx);
2565 if (unlikely(ctx->le_mode)) {
2566 tcg_gen_bswap64_i64(arg1, arg1);
2567 }
2568 }
2569
2570 static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
2571 {
2572 tcg_gen_qemu_st8(arg1, arg2, ctx->mem_idx);
2573 }
2574
2575 static inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2)
2576 {
2577 if (unlikely(ctx->le_mode)) {
2578 TCGv t0 = tcg_temp_new();
2579 tcg_gen_ext16u_tl(t0, arg1);
2580 tcg_gen_bswap16_tl(t0, t0);
2581 tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx);
2582 tcg_temp_free(t0);
2583 } else {
2584 tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx);
2585 }
2586 }
2587
2588 static inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2)
2589 {
2590 if (unlikely(ctx->le_mode)) {
2591 TCGv t0 = tcg_temp_new();
2592 tcg_gen_ext32u_tl(t0, arg1);
2593 tcg_gen_bswap32_tl(t0, t0);
2594 tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx);
2595 tcg_temp_free(t0);
2596 } else {
2597 tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx);
2598 }
2599 }
2600
2601 static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
2602 {
2603 if (unlikely(ctx->le_mode)) {
2604 TCGv_i64 t0 = tcg_temp_new_i64();
2605 tcg_gen_bswap64_i64(t0, arg1);
2606 tcg_gen_qemu_st64(t0, arg2, ctx->mem_idx);
2607 tcg_temp_free_i64(t0);
2608 } else
2609 tcg_gen_qemu_st64(arg1, arg2, ctx->mem_idx);
2610 }
2611
2612 #define GEN_LD(name, ldop, opc, type) \
2613 static void glue(gen_, name)(DisasContext *ctx) \
2614 { \
2615 TCGv EA; \
2616 gen_set_access_type(ctx, ACCESS_INT); \
2617 EA = tcg_temp_new(); \
2618 gen_addr_imm_index(ctx, EA, 0); \
2619 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2620 tcg_temp_free(EA); \
2621 }
2622
2623 #define GEN_LDU(name, ldop, opc, type) \
2624 static void glue(gen_, name##u)(DisasContext *ctx) \
2625 { \
2626 TCGv EA; \
2627 if (unlikely(rA(ctx->opcode) == 0 || \
2628 rA(ctx->opcode) == rD(ctx->opcode))) { \
2629 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2630 return; \
2631 } \
2632 gen_set_access_type(ctx, ACCESS_INT); \
2633 EA = tcg_temp_new(); \
2634 if (type == PPC_64B) \
2635 gen_addr_imm_index(ctx, EA, 0x03); \
2636 else \
2637 gen_addr_imm_index(ctx, EA, 0); \
2638 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2639 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2640 tcg_temp_free(EA); \
2641 }
2642
2643 #define GEN_LDUX(name, ldop, opc2, opc3, type) \
2644 static void glue(gen_, name##ux)(DisasContext *ctx) \
2645 { \
2646 TCGv EA; \
2647 if (unlikely(rA(ctx->opcode) == 0 || \
2648 rA(ctx->opcode) == rD(ctx->opcode))) { \
2649 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2650 return; \
2651 } \
2652 gen_set_access_type(ctx, ACCESS_INT); \
2653 EA = tcg_temp_new(); \
2654 gen_addr_reg_index(ctx, EA); \
2655 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2656 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2657 tcg_temp_free(EA); \
2658 }
2659
2660 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2) \
2661 static void glue(gen_, name##x)(DisasContext *ctx) \
2662 { \
2663 TCGv EA; \
2664 gen_set_access_type(ctx, ACCESS_INT); \
2665 EA = tcg_temp_new(); \
2666 gen_addr_reg_index(ctx, EA); \
2667 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2668 tcg_temp_free(EA); \
2669 }
2670 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2671 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE)
2672
2673 #define GEN_LDS(name, ldop, op, type) \
2674 GEN_LD(name, ldop, op | 0x20, type); \
2675 GEN_LDU(name, ldop, op | 0x21, type); \
2676 GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
2677 GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2678
2679 /* lbz lbzu lbzux lbzx */
2680 GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER);
2681 /* lha lhau lhaux lhax */
2682 GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER);
2683 /* lhz lhzu lhzux lhzx */
2684 GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER);
2685 /* lwz lwzu lwzux lwzx */
2686 GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER);
2687 #if defined(TARGET_PPC64)
2688 /* lwaux */
2689 GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
2690 /* lwax */
2691 GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
2692 /* ldux */
2693 GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B);
2694 /* ldx */
2695 GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B);
2696
2697 static void gen_ld(DisasContext *ctx)
2698 {
2699 TCGv EA;
2700 if (Rc(ctx->opcode)) {
2701 if (unlikely(rA(ctx->opcode) == 0 ||
2702 rA(ctx->opcode) == rD(ctx->opcode))) {
2703 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2704 return;
2705 }
2706 }
2707 gen_set_access_type(ctx, ACCESS_INT);
2708 EA = tcg_temp_new();
2709 gen_addr_imm_index(ctx, EA, 0x03);
2710 if (ctx->opcode & 0x02) {
2711 /* lwa (lwau is undefined) */
2712 gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2713 } else {
2714 /* ld - ldu */
2715 gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2716 }
2717 if (Rc(ctx->opcode))
2718 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2719 tcg_temp_free(EA);
2720 }
2721
2722 /* lq */
2723 static void gen_lq(DisasContext *ctx)
2724 {
2725 #if defined(CONFIG_USER_ONLY)
2726 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2727 #else
2728 int ra, rd;
2729 TCGv EA;
2730
2731 /* Restore CPU state */
2732 if (unlikely(ctx->mem_idx == 0)) {
2733 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2734 return;
2735 }
2736 ra = rA(ctx->opcode);
2737 rd = rD(ctx->opcode);
2738 if (unlikely((rd & 1) || rd == ra)) {
2739 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2740 return;
2741 }
2742 if (unlikely(ctx->le_mode)) {
2743 /* Little-endian mode is not handled */
2744 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
2745 return;
2746 }
2747 gen_set_access_type(ctx, ACCESS_INT);
2748 EA = tcg_temp_new();
2749 gen_addr_imm_index(ctx, EA, 0x0F);
2750 gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
2751 gen_addr_add(ctx, EA, EA, 8);
2752 gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
2753 tcg_temp_free(EA);
2754 #endif
2755 }
2756 #endif
2757
2758 /*** Integer store ***/
2759 #define GEN_ST(name, stop, opc, type) \
2760 static void glue(gen_, name)(DisasContext *ctx) \
2761 { \
2762 TCGv EA; \
2763 gen_set_access_type(ctx, ACCESS_INT); \
2764 EA = tcg_temp_new(); \
2765 gen_addr_imm_index(ctx, EA, 0); \
2766 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2767 tcg_temp_free(EA); \
2768 }
2769
2770 #define GEN_STU(name, stop, opc, type) \
2771 static void glue(gen_, stop##u)(DisasContext *ctx) \
2772 { \
2773 TCGv EA; \
2774 if (unlikely(rA(ctx->opcode) == 0)) { \
2775 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2776 return; \
2777 } \
2778 gen_set_access_type(ctx, ACCESS_INT); \
2779 EA = tcg_temp_new(); \
2780 if (type == PPC_64B) \
2781 gen_addr_imm_index(ctx, EA, 0x03); \
2782 else \
2783 gen_addr_imm_index(ctx, EA, 0); \
2784 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2785 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2786 tcg_temp_free(EA); \
2787 }
2788
2789 #define GEN_STUX(name, stop, opc2, opc3, type) \
2790 static void glue(gen_, name##ux)(DisasContext *ctx) \
2791 { \
2792 TCGv EA; \
2793 if (unlikely(rA(ctx->opcode) == 0)) { \
2794 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2795 return; \
2796 } \
2797 gen_set_access_type(ctx, ACCESS_INT); \
2798 EA = tcg_temp_new(); \
2799 gen_addr_reg_index(ctx, EA); \
2800 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2801 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2802 tcg_temp_free(EA); \
2803 }
2804
2805 #define GEN_STX_E(name, stop, opc2, opc3, type, type2) \
2806 static void glue(gen_, name##x)(DisasContext *ctx) \
2807 { \
2808 TCGv EA; \
2809 gen_set_access_type(ctx, ACCESS_INT); \
2810 EA = tcg_temp_new(); \
2811 gen_addr_reg_index(ctx, EA); \
2812 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2813 tcg_temp_free(EA); \
2814 }
2815 #define GEN_STX(name, stop, opc2, opc3, type) \
2816 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE)
2817
2818 #define GEN_STS(name, stop, op, type) \
2819 GEN_ST(name, stop, op | 0x20, type); \
2820 GEN_STU(name, stop, op | 0x21, type); \
2821 GEN_STUX(name, stop, 0x17, op | 0x01, type); \
2822 GEN_STX(name, stop, 0x17, op | 0x00, type)
2823
2824 /* stb stbu stbux stbx */
2825 GEN_STS(stb, st8, 0x06, PPC_INTEGER);
2826 /* sth sthu sthux sthx */
2827 GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
2828 /* stw stwu stwux stwx */
2829 GEN_STS(stw, st32, 0x04, PPC_INTEGER);
2830 #if defined(TARGET_PPC64)
2831 GEN_STUX(std, st64, 0x15, 0x05, PPC_64B);
2832 GEN_STX(std, st64, 0x15, 0x04, PPC_64B);
2833
2834 static void gen_std(DisasContext *ctx)
2835 {
2836 int rs;
2837 TCGv EA;
2838
2839 rs = rS(ctx->opcode);
2840 if ((ctx->opcode & 0x3) == 0x2) {
2841 #if defined(CONFIG_USER_ONLY)
2842 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2843 #else
2844 /* stq */
2845 if (unlikely(ctx->mem_idx == 0)) {
2846 gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2847 return;
2848 }
2849 if (unlikely(rs & 1)) {
2850 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2851 return;
2852 }
2853 if (unlikely(ctx->le_mode)) {
2854 /* Little-endian mode is not handled */
2855 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
2856 return;
2857 }
2858 gen_set_access_type(ctx, ACCESS_INT);
2859 EA = tcg_temp_new();
2860 gen_addr_imm_index(ctx, EA, 0x03);
2861 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
2862 gen_addr_add(ctx, EA, EA, 8);
2863 gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
2864 tcg_temp_free(EA);
2865 #endif
2866 } else {
2867 /* std / stdu */
2868 if (Rc(ctx->opcode)) {
2869 if (unlikely(rA(ctx->opcode) == 0)) {
2870 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2871 return;
2872 }
2873 }
2874 gen_set_access_type(ctx, ACCESS_INT);
2875 EA = tcg_temp_new();
2876 gen_addr_imm_index(ctx, EA, 0x03);
2877 gen_qemu_st64(ctx, cpu_gpr[rs], EA);
2878 if (Rc(ctx->opcode))
2879 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2880 tcg_temp_free(EA);
2881 }
2882 }
2883 #endif
2884 /*** Integer load and store with byte reverse ***/
2885 /* lhbrx */
2886 static inline void gen_qemu_ld16ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
2887 {
2888 tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
2889 if (likely(!ctx->le_mode)) {
2890 tcg_gen_bswap16_tl(arg1, arg1);
2891 }
2892 }
2893 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
2894
2895 /* lwbrx */
2896 static inline void gen_qemu_ld32ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
2897 {
2898 tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
2899 if (likely(!ctx->le_mode)) {
2900 tcg_gen_bswap32_tl(arg1, arg1);
2901 }
2902 }
2903 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
2904
2905 #if defined(TARGET_PPC64)
2906 /* ldbrx */
2907 static inline void gen_qemu_ld64ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
2908 {
2909 tcg_gen_qemu_ld64(arg1, arg2, ctx->mem_idx);
2910 if (likely(!ctx->le_mode)) {
2911 tcg_gen_bswap64_tl(arg1, arg1);
2912 }
2913 }
2914 GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX);
2915 #endif /* TARGET_PPC64 */
2916
2917 /* sthbrx */
2918 static inline void gen_qemu_st16r(DisasContext *ctx, TCGv arg1, TCGv arg2)
2919 {
2920 if (likely(!ctx->le_mode)) {
2921 TCGv t0 = tcg_temp_new();
2922 tcg_gen_ext16u_tl(t0, arg1);
2923 tcg_gen_bswap16_tl(t0, t0);
2924 tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx);
2925 tcg_temp_free(t0);
2926 } else {
2927 tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx);
2928 }
2929 }
2930 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
2931
2932 /* stwbrx */
2933 static inline void gen_qemu_st32r(DisasContext *ctx, TCGv arg1, TCGv arg2)
2934 {
2935 if (likely(!ctx->le_mode)) {
2936 TCGv t0 = tcg_temp_new();
2937 tcg_gen_ext32u_tl(t0, arg1);
2938 tcg_gen_bswap32_tl(t0, t0);
2939 tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx);
2940 tcg_temp_free(t0);
2941 } else {
2942 tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx);
2943 }
2944 }
2945 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
2946
2947 #if defined(TARGET_PPC64)
2948 /* stdbrx */
2949 static inline void gen_qemu_st64r(DisasContext *ctx, TCGv arg1, TCGv arg2)
2950 {
2951 if (likely(!ctx->le_mode)) {
2952 TCGv t0 = tcg_temp_new();
2953 tcg_gen_bswap64_tl(t0, arg1);
2954 tcg_gen_qemu_st64(t0, arg2, ctx->mem_idx);
2955 tcg_temp_free(t0);
2956 } else {
2957 tcg_gen_qemu_st64(arg1, arg2, ctx->mem_idx);
2958 }
2959 }
2960 GEN_STX_E(stdbr, st64r, 0x14, 0x14, PPC_NONE, PPC2_DBRX);
2961 #endif /* TARGET_PPC64 */
2962
2963 /*** Integer load and store multiple ***/
2964
2965 /* lmw */
2966 static void gen_lmw(DisasContext *ctx)
2967 {
2968 TCGv t0;
2969 TCGv_i32 t1;
2970 gen_set_access_type(ctx, ACCESS_INT);
2971 /* NIP cannot be restored if the memory exception comes from an helper */
2972 gen_update_nip(ctx, ctx->nip - 4);
2973 t0 = tcg_temp_new();
2974 t1 = tcg_const_i32(rD(ctx->opcode));
2975 gen_addr_imm_index(ctx, t0, 0);
2976 gen_helper_lmw(cpu_env, t0, t1);
2977 tcg_temp_free(t0);
2978 tcg_temp_free_i32(t1);
2979 }
2980
2981 /* stmw */
2982 static void gen_stmw(DisasContext *ctx)
2983 {
2984 TCGv t0;
2985 TCGv_i32 t1;
2986 gen_set_access_type(ctx, ACCESS_INT);
2987 /* NIP cannot be restored if the memory exception comes from an helper */
2988 gen_update_nip(ctx, ctx->nip - 4);
2989 t0 = tcg_temp_new();
2990 t1 = tcg_const_i32(rS(ctx->opcode));
2991 gen_addr_imm_index(ctx, t0, 0);
2992 gen_helper_stmw(cpu_env, t0, t1);
2993 tcg_temp_free(t0);
2994 tcg_temp_free_i32(t1);
2995 }
2996
2997 /*** Integer load and store strings ***/
2998
2999 /* lswi */
3000 /* PowerPC32 specification says we must generate an exception if
3001 * rA is in the range of registers to be loaded.
3002 * In an other hand, IBM says this is valid, but rA won't be loaded.
3003 * For now, I'll follow the spec...
3004 */
3005 static void gen_lswi(DisasContext *ctx)
3006 {
3007 TCGv t0;
3008 TCGv_i32 t1, t2;
3009 int nb = NB(ctx->opcode);
3010 int start = rD(ctx->opcode);
3011 int ra = rA(ctx->opcode);
3012 int nr;
3013
3014 if (nb == 0)
3015 nb = 32;
3016 nr = nb / 4;
3017 if (unlikely(((start + nr) > 32 &&
3018 start <= ra && (start + nr - 32) > ra) ||
3019 ((start + nr) <= 32 && start <= ra && (start + nr) > ra))) {
3020 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
3021 return;
3022 }
3023 gen_set_access_type(ctx, ACCESS_INT);
3024 /* NIP cannot be restored if the memory exception comes from an helper */
3025 gen_update_nip(ctx, ctx->nip - 4);
3026 t0 = tcg_temp_new();
3027 gen_addr_register(ctx, t0);
3028 t1 = tcg_const_i32(nb);
3029 t2 = tcg_const_i32(start);
3030 gen_helper_lsw(cpu_env, t0, t1, t2);
3031 tcg_temp_free(t0);
3032 tcg_temp_free_i32(t1);
3033 tcg_temp_free_i32(t2);
3034 }
3035
3036 /* lswx */
3037 static void gen_lswx(DisasContext *ctx)
3038 {
3039 TCGv t0;
3040 TCGv_i32 t1, t2, t3;
3041 gen_set_access_type(ctx, ACCESS_INT);
3042 /* NIP cannot be restored if the memory exception comes from an helper */
3043 gen_update_nip(ctx, ctx->nip - 4);
3044 t0 = tcg_temp_new();
3045 gen_addr_reg_index(ctx, t0);
3046 t1 = tcg_const_i32(rD(ctx->opcode));
3047 t2 = tcg_const_i32(rA(ctx->opcode));
3048 t3 = tcg_const_i32(rB(ctx->opcode));
3049 gen_helper_lswx(cpu_env, t0, t1, t2, t3);
3050 tcg_temp_free(t0);
3051 tcg_temp_free_i32(t1);
3052 tcg_temp_free_i32(t2);
3053 tcg_temp_free_i32(t3);
3054 }
3055
3056 /* stswi */
3057 static void gen_stswi(DisasContext *ctx)
3058 {
3059 TCGv t0;
3060 TCGv_i32 t1, t2;
3061 int nb = NB(ctx->opcode);
3062 gen_set_access_type(ctx, ACCESS_INT);
3063 /* NIP cannot be restored if the memory exception comes from an helper */
3064 gen_update_nip(ctx, ctx->nip - 4);
3065 t0 = tcg_temp_new();
3066 gen_addr_register(ctx, t0);
3067 if (nb == 0)
3068 nb = 32;
3069 t1 = tcg_const_i32(nb);
3070 t2 = tcg_const_i32(rS(ctx->opcode));
3071 gen_helper_stsw(cpu_env, t0, t1, t2);
3072 tcg_temp_free(t0);
3073 tcg_temp_free_i32(t1);
3074 tcg_temp_free_i32(t2);
3075 }
3076
3077 /* stswx */
3078 static void gen_stswx(DisasContext *ctx)
3079 {
3080 TCGv t0;
3081 TCGv_i32 t1, t2;
3082 gen_set_access_type(ctx, ACCESS_INT);
3083 /* NIP cannot be restored if the memory exception comes from an helper */
3084 gen_update_nip(ctx, ctx->nip - 4);
3085 t0 = tcg_temp_new();
3086 gen_addr_reg_index(ctx, t0);
3087 t1 = tcg_temp_new_i32();
3088 tcg_gen_trunc_tl_i32(t1, cpu_xer);
3089 tcg_gen_andi_i32(t1, t1, 0x7F);
3090 t2 = tcg_const_i32(rS(ctx->opcode));
3091 gen_helper_stsw(cpu_env, t0, t1, t2);
3092 tcg_temp_free(t0);
3093 tcg_temp_free_i32(t1);
3094 tcg_temp_free_i32(t2);
3095 }
3096
3097 /*** Memory synchronisation ***/
3098 /* eieio */
3099 static void gen_eieio(DisasContext *ctx)
3100 {
3101 }
3102
3103 /* isync */
3104 static void gen_isync(DisasContext *ctx)
3105 {
3106 gen_stop_exception(ctx);
3107 }
3108
3109 /* lwarx */
3110 static void gen_lwarx(DisasContext *ctx)
3111 {
3112 TCGv t0;
3113 TCGv gpr = cpu_gpr[rD(ctx->opcode)];
3114 gen_set_access_type(ctx, ACCESS_RES);
3115 t0 = tcg_temp_local_new();
3116 gen_addr_reg_index(ctx, t0);
3117 gen_check_align(ctx, t0, 0x03);
3118 gen_qemu_ld32u(ctx, gpr, t0);
3119 tcg_gen_mov_tl(cpu_reserve, t0);
3120 tcg_gen_st_tl(gpr, cpu_env, offsetof(CPUPPCState, reserve_val));
3121 tcg_temp_free(t0);
3122 }
3123
3124 #if defined(CONFIG_USER_ONLY)
3125 static void gen_conditional_store (DisasContext *ctx, TCGv EA,
3126 int reg, int size)
3127 {
3128 TCGv t0 = tcg_temp_new();
3129 uint32_t save_exception = ctx->exception;
3130
3131 tcg_gen_st_tl(EA, cpu_env, offsetof(CPUPPCState, reserve_ea));
3132 tcg_gen_movi_tl(t0, (size << 5) | reg);
3133 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, reserve_info));
3134 tcg_temp_free(t0);
3135 gen_update_nip(ctx, ctx->nip-4);
3136 ctx->exception = POWERPC_EXCP_BRANCH;
3137 gen_exception(ctx, POWERPC_EXCP_STCX);
3138 ctx->exception = save_exception;
3139 }
3140 #endif
3141
3142 /* stwcx. */
3143 static void gen_stwcx_(DisasContext *ctx)
3144 {
3145 TCGv t0;
3146 gen_set_access_type(ctx, ACCESS_RES);
3147 t0 = tcg_temp_local_new();
3148 gen_addr_reg_index(ctx, t0);
3149 gen_check_align(ctx, t0, 0x03);
3150 #if defined(CONFIG_USER_ONLY)
3151 gen_conditional_store(ctx, t0, rS(ctx->opcode), 4);
3152 #else
3153 {
3154 int l1;
3155
3156 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
3157 l1 = gen_new_label();
3158 tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
3159 tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
3160 gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], t0);
3161 gen_set_label(l1);
3162 tcg_gen_movi_tl(cpu_reserve, -1);
3163 }
3164 #endif
3165 tcg_temp_free(t0);
3166 }
3167
3168 #if defined(TARGET_PPC64)
3169 /* ldarx */
3170 static void gen_ldarx(DisasContext *ctx)
3171 {
3172 TCGv t0;
3173 TCGv gpr = cpu_gpr[rD(ctx->opcode)];
3174 gen_set_access_type(ctx, ACCESS_RES);
3175 t0 = tcg_temp_local_new();
3176 gen_addr_reg_index(ctx, t0);
3177 gen_check_align(ctx, t0, 0x07);
3178 gen_qemu_ld64(ctx, gpr, t0);
3179 tcg_gen_mov_tl(cpu_reserve, t0);
3180 tcg_gen_st_tl(gpr, cpu_env, offsetof(CPUPPCState, reserve_val));
3181 tcg_temp_free(t0);
3182 }
3183
3184 /* stdcx. */
3185 static void gen_stdcx_(DisasContext *ctx)
3186 {
3187 TCGv t0;
3188 gen_set_access_type(ctx, ACCESS_RES);
3189 t0 = tcg_temp_local_new();
3190 gen_addr_reg_index(ctx, t0);
3191 gen_check_align(ctx, t0, 0x07);
3192 #if defined(CONFIG_USER_ONLY)
3193 gen_conditional_store(ctx, t0, rS(ctx->opcode), 8);
3194 #else
3195 {
3196 int l1;
3197 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
3198 l1 = gen_new_label();
3199 tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
3200 tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
3201 gen_qemu_st64(ctx, cpu_gpr[rS(ctx->opcode)], t0);
3202 gen_set_label(l1);
3203 tcg_gen_movi_tl(cpu_reserve, -1);
3204 }
3205 #endif
3206 tcg_temp_free(t0);
3207 }
3208 #endif /* defined(TARGET_PPC64) */
3209
3210 /* sync */
3211 static void gen_sync(DisasContext *ctx)
3212 {
3213 }
3214
3215 /* wait */
3216 static void gen_wait(DisasContext *ctx)
3217 {
3218 TCGv_i32 t0 = tcg_temp_new_i32();
3219 tcg_gen_st_i32(t0, cpu_env,
3220 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
3221 tcg_temp_free_i32(t0);
3222 /* Stop translation, as the CPU is supposed to sleep from now */
3223 gen_exception_err(ctx, EXCP_HLT, 1);
3224 }
3225
3226 /*** Floating-point load ***/
3227 #define GEN_LDF(name, ldop, opc, type) \
3228 static void glue(gen_, name)(DisasContext *ctx) \
3229 { \
3230 TCGv EA; \
3231 if (unlikely(!ctx->fpu_enabled)) { \
3232 gen_exception(ctx, POWERPC_EXCP_FPU); \
3233 return; \
3234 } \
3235 gen_set_access_type(ctx, ACCESS_FLOAT); \
3236 EA = tcg_temp_new(); \
3237 gen_addr_imm_index(ctx, EA, 0); \
3238 gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
3239 tcg_temp_free(EA); \
3240 }
3241
3242 #define GEN_LDUF(name, ldop, opc, type) \
3243 static void glue(gen_, name##u)(DisasContext *ctx) \
3244 { \
3245 TCGv EA; \
3246 if (unlikely(!ctx->fpu_enabled)) { \
3247 gen_exception(ctx, POWERPC_EXCP_FPU); \
3248 return; \
3249 } \
3250 if (unlikely(rA(ctx->opcode) == 0)) { \
3251 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3252 return; \
3253 } \
3254 gen_set_access_type(ctx, ACCESS_FLOAT); \
3255 EA = tcg_temp_new(); \
3256 gen_addr_imm_index(ctx, EA, 0); \
3257 gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
3258 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3259 tcg_temp_free(EA); \
3260 }
3261
3262 #define GEN_LDUXF(name, ldop, opc, type) \
3263 static void glue(gen_, name##ux)(DisasContext *ctx) \
3264 { \
3265 TCGv EA; \
3266 if (unlikely(!ctx->fpu_enabled)) { \
3267 gen_exception(ctx, POWERPC_EXCP_FPU); \
3268 return; \
3269 } \
3270 if (unlikely(rA(ctx->opcode) == 0)) { \
3271 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3272 return; \
3273 } \
3274 gen_set_access_type(ctx, ACCESS_FLOAT); \
3275 EA = tcg_temp_new(); \
3276 gen_addr_reg_index(ctx, EA); \
3277 gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
3278 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3279 tcg_temp_free(EA); \
3280 }
3281
3282 #define GEN_LDXF(name, ldop, opc2, opc3, type) \
3283 static void glue(gen_, name##x)(DisasContext *ctx) \
3284 { \
3285 TCGv EA; \
3286 if (unlikely(!ctx->fpu_enabled)) { \
3287 gen_exception(ctx, POWERPC_EXCP_FPU); \
3288 return; \
3289 } \
3290 gen_set_access_type(ctx, ACCESS_FLOAT); \
3291 EA = tcg_temp_new(); \
3292 gen_addr_reg_index(ctx, EA); \
3293 gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
3294 tcg_temp_free(EA); \
3295 }
3296
3297 #define GEN_LDFS(name, ldop, op, type) \
3298 GEN_LDF(name, ldop, op | 0x20, type); \
3299 GEN_LDUF(name, ldop, op | 0x21, type); \
3300 GEN_LDUXF(name, ldop, op | 0x01, type); \
3301 GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
3302
3303 static inline void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
3304 {
3305 TCGv t0 = tcg_temp_new();
3306 TCGv_i32 t1 = tcg_temp_new_i32();
3307 gen_qemu_ld32u(ctx, t0, arg2);
3308 tcg_gen_trunc_tl_i32(t1, t0);
3309 tcg_temp_free(t0);
3310 gen_helper_float32_to_float64(arg1, cpu_env, t1);
3311 tcg_temp_free_i32(t1);
3312 }
3313
3314 /* lfd lfdu lfdux lfdx */
3315 GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT);
3316 /* lfs lfsu lfsux lfsx */
3317 GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT);
3318
3319 /* lfdp */
3320 static void gen_lfdp(DisasContext *ctx)
3321 {
3322 TCGv EA;
3323 if (unlikely(!ctx->fpu_enabled)) {
3324 gen_exception(ctx, POWERPC_EXCP_FPU);
3325 return;
3326 }
3327 gen_set_access_type(ctx, ACCESS_FLOAT);
3328 EA = tcg_temp_new();
3329 gen_addr_imm_index(ctx, EA, 0); \
3330 if (unlikely(ctx->le_mode)) {
3331 gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
3332 tcg_gen_addi_tl(EA, EA, 8);
3333 gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
3334 } else {
3335 gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
3336 tcg_gen_addi_tl(EA, EA, 8);
3337 gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
3338 }
3339 tcg_temp_free(EA);
3340 }
3341
3342 /* lfdpx */
3343 static void gen_lfdpx(DisasContext *ctx)
3344 {
3345 TCGv EA;
3346 if (unlikely(!ctx->fpu_enabled)) {
3347 gen_exception(ctx, POWERPC_EXCP_FPU);
3348 return;
3349 }
3350 gen_set_access_type(ctx, ACCESS_FLOAT);
3351 EA = tcg_temp_new();
3352 gen_addr_reg_index(ctx, EA);
3353 if (unlikely(ctx->le_mode)) {
3354 gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
3355 tcg_gen_addi_tl(EA, EA, 8);
3356 gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
3357 } else {
3358 gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
3359 tcg_gen_addi_tl(EA, EA, 8);
3360 gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
3361 }
3362 tcg_temp_free(EA);
3363 }
3364
3365 /* lfiwax */
3366 static void gen_lfiwax(DisasContext *ctx)
3367 {
3368 TCGv EA;
3369 TCGv t0;
3370 if (unlikely(!ctx->fpu_enabled)) {
3371 gen_exception(ctx, POWERPC_EXCP_FPU);
3372 return;
3373 }
3374 gen_set_access_type(ctx, ACCESS_FLOAT);
3375 EA = tcg_temp_new();
3376 t0 = tcg_temp_new();
3377 gen_addr_reg_index(ctx, EA);
3378 gen_qemu_ld32s(ctx, t0, EA);
3379 tcg_gen_ext_tl_i64(cpu_fpr[rD(ctx->opcode)], t0);
3380 tcg_temp_free(EA);
3381 tcg_temp_free(t0);
3382 }
3383
3384 /*** Floating-point store ***/
3385 #define GEN_STF(name, stop, opc, type) \
3386 static void glue(gen_, name)(DisasContext *ctx) \
3387 { \
3388 TCGv EA; \
3389 if (unlikely(!ctx->fpu_enabled)) { \
3390 gen_exception(ctx, POWERPC_EXCP_FPU); \
3391 return; \
3392 } \
3393 gen_set_access_type(ctx, ACCESS_FLOAT); \
3394 EA = tcg_temp_new(); \
3395 gen_addr_imm_index(ctx, EA, 0); \
3396 gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
3397 tcg_temp_free(EA); \
3398 }
3399
3400 #define GEN_STUF(name, stop, opc, type) \
3401 static void glue(gen_, name##u)(DisasContext *ctx) \
3402 { \
3403 TCGv EA; \
3404 if (unlikely(!ctx->fpu_enabled)) { \
3405 gen_exception(ctx, POWERPC_EXCP_FPU); \
3406 return; \
3407 } \
3408 if (unlikely(rA(ctx->opcode) == 0)) { \
3409 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3410 return; \
3411 } \
3412 gen_set_access_type(ctx, ACCESS_FLOAT); \
3413 EA = tcg_temp_new(); \
3414 gen_addr_imm_index(ctx, EA, 0); \
3415 gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
3416 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3417 tcg_temp_free(EA); \
3418 }
3419
3420 #define GEN_STUXF(name, stop, opc, type) \
3421 static void glue(gen_, name##ux)(DisasContext *ctx) \
3422 { \
3423 TCGv EA; \
3424 if (unlikely(!ctx->fpu_enabled)) { \
3425 gen_exception(ctx, POWERPC_EXCP_FPU); \
3426 return; \
3427 } \
3428 if (unlikely(rA(ctx->opcode) == 0)) { \
3429 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
3430 return; \
3431 } \
3432 gen_set_access_type(ctx, ACCESS_FLOAT); \
3433 EA = tcg_temp_new(); \
3434 gen_addr_reg_index(ctx, EA); \
3435 gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
3436 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
3437 tcg_temp_free(EA); \
3438 }
3439
3440 #define GEN_STXF(name, stop, opc2, opc3, type) \
3441 static void glue(gen_, name##x)(DisasContext *ctx) \
3442 { \
3443 TCGv EA; \
3444 if (unlikely(!ctx->fpu_enabled)) { \
3445 gen_exception(ctx, POWERPC_EXCP_FPU); \
3446 return; \
3447 } \
3448 gen_set_access_type(ctx, ACCESS_FLOAT); \
3449 EA = tcg_temp_new(); \
3450 gen_addr_reg_index(ctx, EA); \
3451 gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
3452 tcg_temp_free(EA); \
3453 }
3454
3455 #define GEN_STFS(name, stop, op, type) \
3456 GEN_STF(name, stop, op | 0x20, type); \
3457 GEN_STUF(name, stop, op | 0x21, type); \
3458 GEN_STUXF(name, stop, op | 0x01, type); \
3459 GEN_STXF(name, stop, 0x17, op | 0x00, type)
3460
3461 static inline void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
3462 {
3463 TCGv_i32 t0 = tcg_temp_new_i32();
3464 TCGv t1 = tcg_temp_new();
3465 gen_helper_float64_to_float32(t0, cpu_env, arg1);
3466 tcg_gen_extu_i32_tl(t1, t0);
3467 tcg_temp_free_i32(t0);
3468 gen_qemu_st32(ctx, t1, arg2);
3469 tcg_temp_free(t1);
3470 }
3471
3472 /* stfd stfdu stfdux stfdx */
3473 GEN_STFS(stfd, st64, 0x16, PPC_FLOAT);
3474 /* stfs stfsu stfsux stfsx */
3475 GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
3476