target/ppc: Use atomic min/max helpers
[qemu.git] / target / ppc / translate.c
1 /*
2 * PowerPC emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg-op.h"
27 #include "qemu/host-utils.h"
28 #include "exec/cpu_ldst.h"
29
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32
33 #include "trace-tcg.h"
34 #include "exec/translator.h"
35 #include "exec/log.h"
36
37
38 #define CPU_SINGLE_STEP 0x1
39 #define CPU_BRANCH_STEP 0x2
40 #define GDBSTUB_SINGLE_STEP 0x4
41
42 /* Include definitions for instructions classes and implementations flags */
43 //#define PPC_DEBUG_DISAS
44 //#define DO_PPC_STATISTICS
45
46 #ifdef PPC_DEBUG_DISAS
47 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
48 #else
49 # define LOG_DISAS(...) do { } while (0)
50 #endif
51 /*****************************************************************************/
52 /* Code translation helpers */
53
54 /* global register indexes */
55 static char cpu_reg_names[10*3 + 22*4 /* GPR */
56 + 10*4 + 22*5 /* SPE GPRh */
57 + 10*4 + 22*5 /* FPR */
58 + 2*(10*6 + 22*7) /* AVRh, AVRl */
59 + 10*5 + 22*6 /* VSR */
60 + 8*5 /* CRF */];
61 static TCGv cpu_gpr[32];
62 static TCGv cpu_gprh[32];
63 static TCGv_i64 cpu_fpr[32];
64 static TCGv_i64 cpu_avrh[32], cpu_avrl[32];
65 static TCGv_i64 cpu_vsr[32];
66 static TCGv_i32 cpu_crf[8];
67 static TCGv cpu_nip;
68 static TCGv cpu_msr;
69 static TCGv cpu_ctr;
70 static TCGv cpu_lr;
71 #if defined(TARGET_PPC64)
72 static TCGv cpu_cfar;
73 #endif
74 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32;
75 static TCGv cpu_reserve;
76 static TCGv cpu_reserve_val;
77 static TCGv cpu_fpscr;
78 static TCGv_i32 cpu_access_type;
79
80 #include "exec/gen-icount.h"
81
82 void ppc_translate_init(void)
83 {
84 int i;
85 char* p;
86 size_t cpu_reg_names_size;
87
88 p = cpu_reg_names;
89 cpu_reg_names_size = sizeof(cpu_reg_names);
90
91 for (i = 0; i < 8; i++) {
92 snprintf(p, cpu_reg_names_size, "crf%d", i);
93 cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
94 offsetof(CPUPPCState, crf[i]), p);
95 p += 5;
96 cpu_reg_names_size -= 5;
97 }
98
99 for (i = 0; i < 32; i++) {
100 snprintf(p, cpu_reg_names_size, "r%d", i);
101 cpu_gpr[i] = tcg_global_mem_new(cpu_env,
102 offsetof(CPUPPCState, gpr[i]), p);
103 p += (i < 10) ? 3 : 4;
104 cpu_reg_names_size -= (i < 10) ? 3 : 4;
105 snprintf(p, cpu_reg_names_size, "r%dH", i);
106 cpu_gprh[i] = tcg_global_mem_new(cpu_env,
107 offsetof(CPUPPCState, gprh[i]), p);
108 p += (i < 10) ? 4 : 5;
109 cpu_reg_names_size -= (i < 10) ? 4 : 5;
110
111 snprintf(p, cpu_reg_names_size, "fp%d", i);
112 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
113 offsetof(CPUPPCState, fpr[i]), p);
114 p += (i < 10) ? 4 : 5;
115 cpu_reg_names_size -= (i < 10) ? 4 : 5;
116
117 snprintf(p, cpu_reg_names_size, "avr%dH", i);
118 #ifdef HOST_WORDS_BIGENDIAN
119 cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
120 offsetof(CPUPPCState, avr[i].u64[0]), p);
121 #else
122 cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
123 offsetof(CPUPPCState, avr[i].u64[1]), p);
124 #endif
125 p += (i < 10) ? 6 : 7;
126 cpu_reg_names_size -= (i < 10) ? 6 : 7;
127
128 snprintf(p, cpu_reg_names_size, "avr%dL", i);
129 #ifdef HOST_WORDS_BIGENDIAN
130 cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
131 offsetof(CPUPPCState, avr[i].u64[1]), p);
132 #else
133 cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
134 offsetof(CPUPPCState, avr[i].u64[0]), p);
135 #endif
136 p += (i < 10) ? 6 : 7;
137 cpu_reg_names_size -= (i < 10) ? 6 : 7;
138 snprintf(p, cpu_reg_names_size, "vsr%d", i);
139 cpu_vsr[i] = tcg_global_mem_new_i64(cpu_env,
140 offsetof(CPUPPCState, vsr[i]), p);
141 p += (i < 10) ? 5 : 6;
142 cpu_reg_names_size -= (i < 10) ? 5 : 6;
143 }
144
145 cpu_nip = tcg_global_mem_new(cpu_env,
146 offsetof(CPUPPCState, nip), "nip");
147
148 cpu_msr = tcg_global_mem_new(cpu_env,
149 offsetof(CPUPPCState, msr), "msr");
150
151 cpu_ctr = tcg_global_mem_new(cpu_env,
152 offsetof(CPUPPCState, ctr), "ctr");
153
154 cpu_lr = tcg_global_mem_new(cpu_env,
155 offsetof(CPUPPCState, lr), "lr");
156
157 #if defined(TARGET_PPC64)
158 cpu_cfar = tcg_global_mem_new(cpu_env,
159 offsetof(CPUPPCState, cfar), "cfar");
160 #endif
161
162 cpu_xer = tcg_global_mem_new(cpu_env,
163 offsetof(CPUPPCState, xer), "xer");
164 cpu_so = tcg_global_mem_new(cpu_env,
165 offsetof(CPUPPCState, so), "SO");
166 cpu_ov = tcg_global_mem_new(cpu_env,
167 offsetof(CPUPPCState, ov), "OV");
168 cpu_ca = tcg_global_mem_new(cpu_env,
169 offsetof(CPUPPCState, ca), "CA");
170 cpu_ov32 = tcg_global_mem_new(cpu_env,
171 offsetof(CPUPPCState, ov32), "OV32");
172 cpu_ca32 = tcg_global_mem_new(cpu_env,
173 offsetof(CPUPPCState, ca32), "CA32");
174
175 cpu_reserve = tcg_global_mem_new(cpu_env,
176 offsetof(CPUPPCState, reserve_addr),
177 "reserve_addr");
178 cpu_reserve_val = tcg_global_mem_new(cpu_env,
179 offsetof(CPUPPCState, reserve_val),
180 "reserve_val");
181
182 cpu_fpscr = tcg_global_mem_new(cpu_env,
183 offsetof(CPUPPCState, fpscr), "fpscr");
184
185 cpu_access_type = tcg_global_mem_new_i32(cpu_env,
186 offsetof(CPUPPCState, access_type), "access_type");
187 }
188
189 /* internal defines */
190 struct DisasContext {
191 DisasContextBase base;
192 uint32_t opcode;
193 uint32_t exception;
194 /* Routine used to access memory */
195 bool pr, hv, dr, le_mode;
196 bool lazy_tlb_flush;
197 bool need_access_type;
198 int mem_idx;
199 int access_type;
200 /* Translation flags */
201 TCGMemOp default_tcg_memop_mask;
202 #if defined(TARGET_PPC64)
203 bool sf_mode;
204 bool has_cfar;
205 #endif
206 bool fpu_enabled;
207 bool altivec_enabled;
208 bool vsx_enabled;
209 bool spe_enabled;
210 bool tm_enabled;
211 bool gtse;
212 ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
213 int singlestep_enabled;
214 uint64_t insns_flags;
215 uint64_t insns_flags2;
216 };
217
218 /* Return true iff byteswap is needed in a scalar memop */
219 static inline bool need_byteswap(const DisasContext *ctx)
220 {
221 #if defined(TARGET_WORDS_BIGENDIAN)
222 return ctx->le_mode;
223 #else
224 return !ctx->le_mode;
225 #endif
226 }
227
228 /* True when active word size < size of target_long. */
229 #ifdef TARGET_PPC64
230 # define NARROW_MODE(C) (!(C)->sf_mode)
231 #else
232 # define NARROW_MODE(C) 0
233 #endif
234
235 struct opc_handler_t {
236 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
237 uint32_t inval1;
238 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
239 uint32_t inval2;
240 /* instruction type */
241 uint64_t type;
242 /* extended instruction type */
243 uint64_t type2;
244 /* handler */
245 void (*handler)(DisasContext *ctx);
246 #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
247 const char *oname;
248 #endif
249 #if defined(DO_PPC_STATISTICS)
250 uint64_t count;
251 #endif
252 };
253
254 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
255 {
256 if (ctx->need_access_type && ctx->access_type != access_type) {
257 tcg_gen_movi_i32(cpu_access_type, access_type);
258 ctx->access_type = access_type;
259 }
260 }
261
262 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
263 {
264 if (NARROW_MODE(ctx)) {
265 nip = (uint32_t)nip;
266 }
267 tcg_gen_movi_tl(cpu_nip, nip);
268 }
269
270 static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
271 {
272 TCGv_i32 t0, t1;
273
274 /* These are all synchronous exceptions, we set the PC back to
275 * the faulting instruction
276 */
277 if (ctx->exception == POWERPC_EXCP_NONE) {
278 gen_update_nip(ctx, ctx->base.pc_next - 4);
279 }
280 t0 = tcg_const_i32(excp);
281 t1 = tcg_const_i32(error);
282 gen_helper_raise_exception_err(cpu_env, t0, t1);
283 tcg_temp_free_i32(t0);
284 tcg_temp_free_i32(t1);
285 ctx->exception = (excp);
286 }
287
288 static void gen_exception(DisasContext *ctx, uint32_t excp)
289 {
290 TCGv_i32 t0;
291
292 /* These are all synchronous exceptions, we set the PC back to
293 * the faulting instruction
294 */
295 if (ctx->exception == POWERPC_EXCP_NONE) {
296 gen_update_nip(ctx, ctx->base.pc_next - 4);
297 }
298 t0 = tcg_const_i32(excp);
299 gen_helper_raise_exception(cpu_env, t0);
300 tcg_temp_free_i32(t0);
301 ctx->exception = (excp);
302 }
303
304 static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
305 target_ulong nip)
306 {
307 TCGv_i32 t0;
308
309 gen_update_nip(ctx, nip);
310 t0 = tcg_const_i32(excp);
311 gen_helper_raise_exception(cpu_env, t0);
312 tcg_temp_free_i32(t0);
313 ctx->exception = (excp);
314 }
315
316 static void gen_debug_exception(DisasContext *ctx)
317 {
318 TCGv_i32 t0;
319
320 /* These are all synchronous exceptions, we set the PC back to
321 * the faulting instruction
322 */
323 if ((ctx->exception != POWERPC_EXCP_BRANCH) &&
324 (ctx->exception != POWERPC_EXCP_SYNC)) {
325 gen_update_nip(ctx, ctx->base.pc_next);
326 }
327 t0 = tcg_const_i32(EXCP_DEBUG);
328 gen_helper_raise_exception(cpu_env, t0);
329 tcg_temp_free_i32(t0);
330 }
331
332 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
333 {
334 /* Will be converted to program check if needed */
335 gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error);
336 }
337
338 static inline void gen_priv_exception(DisasContext *ctx, uint32_t error)
339 {
340 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error);
341 }
342
343 static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error)
344 {
345 /* Will be converted to program check if needed */
346 gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error);
347 }
348
349 /* Stop translation */
350 static inline void gen_stop_exception(DisasContext *ctx)
351 {
352 gen_update_nip(ctx, ctx->base.pc_next);
353 ctx->exception = POWERPC_EXCP_STOP;
354 }
355
356 #ifndef CONFIG_USER_ONLY
357 /* No need to update nip here, as execution flow will change */
358 static inline void gen_sync_exception(DisasContext *ctx)
359 {
360 ctx->exception = POWERPC_EXCP_SYNC;
361 }
362 #endif
363
364 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
365 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
366
367 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
368 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
369
370 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
371 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
372
373 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
374 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
375
376 #define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2) \
377 GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2)
378
379 #define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \
380 GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2)
381
382 typedef struct opcode_t {
383 unsigned char opc1, opc2, opc3, opc4;
384 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
385 unsigned char pad[4];
386 #endif
387 opc_handler_t handler;
388 const char *oname;
389 } opcode_t;
390
391 /* Helpers for priv. check */
392 #define GEN_PRIV \
393 do { \
394 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; \
395 } while (0)
396
397 #if defined(CONFIG_USER_ONLY)
398 #define CHK_HV GEN_PRIV
399 #define CHK_SV GEN_PRIV
400 #define CHK_HVRM GEN_PRIV
401 #else
402 #define CHK_HV \
403 do { \
404 if (unlikely(ctx->pr || !ctx->hv)) { \
405 GEN_PRIV; \
406 } \
407 } while (0)
408 #define CHK_SV \
409 do { \
410 if (unlikely(ctx->pr)) { \
411 GEN_PRIV; \
412 } \
413 } while (0)
414 #define CHK_HVRM \
415 do { \
416 if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \
417 GEN_PRIV; \
418 } \
419 } while (0)
420 #endif
421
422 #define CHK_NONE
423
424 /*****************************************************************************/
425 /* PowerPC instructions table */
426
427 #if defined(DO_PPC_STATISTICS)
428 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
429 { \
430 .opc1 = op1, \
431 .opc2 = op2, \
432 .opc3 = op3, \
433 .opc4 = 0xff, \
434 .handler = { \
435 .inval1 = invl, \
436 .type = _typ, \
437 .type2 = _typ2, \
438 .handler = &gen_##name, \
439 .oname = stringify(name), \
440 }, \
441 .oname = stringify(name), \
442 }
443 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
444 { \
445 .opc1 = op1, \
446 .opc2 = op2, \
447 .opc3 = op3, \
448 .opc4 = 0xff, \
449 .handler = { \
450 .inval1 = invl1, \
451 .inval2 = invl2, \
452 .type = _typ, \
453 .type2 = _typ2, \
454 .handler = &gen_##name, \
455 .oname = stringify(name), \
456 }, \
457 .oname = stringify(name), \
458 }
459 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
460 { \
461 .opc1 = op1, \
462 .opc2 = op2, \
463 .opc3 = op3, \
464 .opc4 = 0xff, \
465 .handler = { \
466 .inval1 = invl, \
467 .type = _typ, \
468 .type2 = _typ2, \
469 .handler = &gen_##name, \
470 .oname = onam, \
471 }, \
472 .oname = onam, \
473 }
474 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \
475 { \
476 .opc1 = op1, \
477 .opc2 = op2, \
478 .opc3 = op3, \
479 .opc4 = op4, \
480 .handler = { \
481 .inval1 = invl, \
482 .type = _typ, \
483 .type2 = _typ2, \
484 .handler = &gen_##name, \
485 .oname = stringify(name), \
486 }, \
487 .oname = stringify(name), \
488 }
489 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \
490 { \
491 .opc1 = op1, \
492 .opc2 = op2, \
493 .opc3 = op3, \
494 .opc4 = op4, \
495 .handler = { \
496 .inval1 = invl, \
497 .type = _typ, \
498 .type2 = _typ2, \
499 .handler = &gen_##name, \
500 .oname = onam, \
501 }, \
502 .oname = onam, \
503 }
504 #else
505 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
506 { \
507 .opc1 = op1, \
508 .opc2 = op2, \
509 .opc3 = op3, \
510 .opc4 = 0xff, \
511 .handler = { \
512 .inval1 = invl, \
513 .type = _typ, \
514 .type2 = _typ2, \
515 .handler = &gen_##name, \
516 }, \
517 .oname = stringify(name), \
518 }
519 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
520 { \
521 .opc1 = op1, \
522 .opc2 = op2, \
523 .opc3 = op3, \
524 .opc4 = 0xff, \
525 .handler = { \
526 .inval1 = invl1, \
527 .inval2 = invl2, \
528 .type = _typ, \
529 .type2 = _typ2, \
530 .handler = &gen_##name, \
531 }, \
532 .oname = stringify(name), \
533 }
534 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
535 { \
536 .opc1 = op1, \
537 .opc2 = op2, \
538 .opc3 = op3, \
539 .opc4 = 0xff, \
540 .handler = { \
541 .inval1 = invl, \
542 .type = _typ, \
543 .type2 = _typ2, \
544 .handler = &gen_##name, \
545 }, \
546 .oname = onam, \
547 }
548 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \
549 { \
550 .opc1 = op1, \
551 .opc2 = op2, \
552 .opc3 = op3, \
553 .opc4 = op4, \
554 .handler = { \
555 .inval1 = invl, \
556 .type = _typ, \
557 .type2 = _typ2, \
558 .handler = &gen_##name, \
559 }, \
560 .oname = stringify(name), \
561 }
562 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \
563 { \
564 .opc1 = op1, \
565 .opc2 = op2, \
566 .opc3 = op3, \
567 .opc4 = op4, \
568 .handler = { \
569 .inval1 = invl, \
570 .type = _typ, \
571 .type2 = _typ2, \
572 .handler = &gen_##name, \
573 }, \
574 .oname = onam, \
575 }
576 #endif
577
578 /* SPR load/store helpers */
579 static inline void gen_load_spr(TCGv t, int reg)
580 {
581 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
582 }
583
584 static inline void gen_store_spr(int reg, TCGv t)
585 {
586 tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
587 }
588
589 /* Invalid instruction */
590 static void gen_invalid(DisasContext *ctx)
591 {
592 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
593 }
594
595 static opc_handler_t invalid_handler = {
596 .inval1 = 0xFFFFFFFF,
597 .inval2 = 0xFFFFFFFF,
598 .type = PPC_NONE,
599 .type2 = PPC_NONE,
600 .handler = gen_invalid,
601 };
602
603 /*** Integer comparison ***/
604
605 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
606 {
607 TCGv t0 = tcg_temp_new();
608 TCGv t1 = tcg_temp_new();
609 TCGv_i32 t = tcg_temp_new_i32();
610
611 tcg_gen_movi_tl(t0, CRF_EQ);
612 tcg_gen_movi_tl(t1, CRF_LT);
613 tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU), t0, arg0, arg1, t1, t0);
614 tcg_gen_movi_tl(t1, CRF_GT);
615 tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU), t0, arg0, arg1, t1, t0);
616
617 tcg_gen_trunc_tl_i32(t, t0);
618 tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
619 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t);
620
621 tcg_temp_free(t0);
622 tcg_temp_free(t1);
623 tcg_temp_free_i32(t);
624 }
625
626 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
627 {
628 TCGv t0 = tcg_const_tl(arg1);
629 gen_op_cmp(arg0, t0, s, crf);
630 tcg_temp_free(t0);
631 }
632
633 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
634 {
635 TCGv t0, t1;
636 t0 = tcg_temp_new();
637 t1 = tcg_temp_new();
638 if (s) {
639 tcg_gen_ext32s_tl(t0, arg0);
640 tcg_gen_ext32s_tl(t1, arg1);
641 } else {
642 tcg_gen_ext32u_tl(t0, arg0);
643 tcg_gen_ext32u_tl(t1, arg1);
644 }
645 gen_op_cmp(t0, t1, s, crf);
646 tcg_temp_free(t1);
647 tcg_temp_free(t0);
648 }
649
650 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
651 {
652 TCGv t0 = tcg_const_tl(arg1);
653 gen_op_cmp32(arg0, t0, s, crf);
654 tcg_temp_free(t0);
655 }
656
657 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
658 {
659 if (NARROW_MODE(ctx)) {
660 gen_op_cmpi32(reg, 0, 1, 0);
661 } else {
662 gen_op_cmpi(reg, 0, 1, 0);
663 }
664 }
665
666 /* cmp */
667 static void gen_cmp(DisasContext *ctx)
668 {
669 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
670 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
671 1, crfD(ctx->opcode));
672 } else {
673 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
674 1, crfD(ctx->opcode));
675 }
676 }
677
678 /* cmpi */
679 static void gen_cmpi(DisasContext *ctx)
680 {
681 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
682 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
683 1, crfD(ctx->opcode));
684 } else {
685 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
686 1, crfD(ctx->opcode));
687 }
688 }
689
690 /* cmpl */
691 static void gen_cmpl(DisasContext *ctx)
692 {
693 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
694 gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
695 0, crfD(ctx->opcode));
696 } else {
697 gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
698 0, crfD(ctx->opcode));
699 }
700 }
701
702 /* cmpli */
703 static void gen_cmpli(DisasContext *ctx)
704 {
705 if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
706 gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
707 0, crfD(ctx->opcode));
708 } else {
709 gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
710 0, crfD(ctx->opcode));
711 }
712 }
713
714 /* cmprb - range comparison: isupper, isaplha, islower*/
715 static void gen_cmprb(DisasContext *ctx)
716 {
717 TCGv_i32 src1 = tcg_temp_new_i32();
718 TCGv_i32 src2 = tcg_temp_new_i32();
719 TCGv_i32 src2lo = tcg_temp_new_i32();
720 TCGv_i32 src2hi = tcg_temp_new_i32();
721 TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)];
722
723 tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]);
724 tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]);
725
726 tcg_gen_andi_i32(src1, src1, 0xFF);
727 tcg_gen_ext8u_i32(src2lo, src2);
728 tcg_gen_shri_i32(src2, src2, 8);
729 tcg_gen_ext8u_i32(src2hi, src2);
730
731 tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
732 tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
733 tcg_gen_and_i32(crf, src2lo, src2hi);
734
735 if (ctx->opcode & 0x00200000) {
736 tcg_gen_shri_i32(src2, src2, 8);
737 tcg_gen_ext8u_i32(src2lo, src2);
738 tcg_gen_shri_i32(src2, src2, 8);
739 tcg_gen_ext8u_i32(src2hi, src2);
740 tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
741 tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
742 tcg_gen_and_i32(src2lo, src2lo, src2hi);
743 tcg_gen_or_i32(crf, crf, src2lo);
744 }
745 tcg_gen_shli_i32(crf, crf, CRF_GT_BIT);
746 tcg_temp_free_i32(src1);
747 tcg_temp_free_i32(src2);
748 tcg_temp_free_i32(src2lo);
749 tcg_temp_free_i32(src2hi);
750 }
751
752 #if defined(TARGET_PPC64)
753 /* cmpeqb */
754 static void gen_cmpeqb(DisasContext *ctx)
755 {
756 gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
757 cpu_gpr[rB(ctx->opcode)]);
758 }
759 #endif
760
761 /* isel (PowerPC 2.03 specification) */
762 static void gen_isel(DisasContext *ctx)
763 {
764 uint32_t bi = rC(ctx->opcode);
765 uint32_t mask = 0x08 >> (bi & 0x03);
766 TCGv t0 = tcg_temp_new();
767 TCGv zr;
768
769 tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
770 tcg_gen_andi_tl(t0, t0, mask);
771
772 zr = tcg_const_tl(0);
773 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
774 rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
775 cpu_gpr[rB(ctx->opcode)]);
776 tcg_temp_free(zr);
777 tcg_temp_free(t0);
778 }
779
780 /* cmpb: PowerPC 2.05 specification */
781 static void gen_cmpb(DisasContext *ctx)
782 {
783 gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
784 cpu_gpr[rB(ctx->opcode)]);
785 }
786
787 /*** Integer arithmetic ***/
788
789 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
790 TCGv arg1, TCGv arg2, int sub)
791 {
792 TCGv t0 = tcg_temp_new();
793
794 tcg_gen_xor_tl(cpu_ov, arg0, arg2);
795 tcg_gen_xor_tl(t0, arg1, arg2);
796 if (sub) {
797 tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
798 } else {
799 tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
800 }
801 tcg_temp_free(t0);
802 if (NARROW_MODE(ctx)) {
803 tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1);
804 if (is_isa300(ctx)) {
805 tcg_gen_mov_tl(cpu_ov32, cpu_ov);
806 }
807 } else {
808 if (is_isa300(ctx)) {
809 tcg_gen_extract_tl(cpu_ov32, cpu_ov, 31, 1);
810 }
811 tcg_gen_extract_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1);
812 }
813 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
814 }
815
816 static inline void gen_op_arith_compute_ca32(DisasContext *ctx,
817 TCGv res, TCGv arg0, TCGv arg1,
818 int sub)
819 {
820 TCGv t0;
821
822 if (!is_isa300(ctx)) {
823 return;
824 }
825
826 t0 = tcg_temp_new();
827 if (sub) {
828 tcg_gen_eqv_tl(t0, arg0, arg1);
829 } else {
830 tcg_gen_xor_tl(t0, arg0, arg1);
831 }
832 tcg_gen_xor_tl(t0, t0, res);
833 tcg_gen_extract_tl(cpu_ca32, t0, 32, 1);
834 tcg_temp_free(t0);
835 }
836
837 /* Common add function */
838 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
839 TCGv arg2, bool add_ca, bool compute_ca,
840 bool compute_ov, bool compute_rc0)
841 {
842 TCGv t0 = ret;
843
844 if (compute_ca || compute_ov) {
845 t0 = tcg_temp_new();
846 }
847
848 if (compute_ca) {
849 if (NARROW_MODE(ctx)) {
850 /* Caution: a non-obvious corner case of the spec is that we
851 must produce the *entire* 64-bit addition, but produce the
852 carry into bit 32. */
853 TCGv t1 = tcg_temp_new();
854 tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */
855 tcg_gen_add_tl(t0, arg1, arg2);
856 if (add_ca) {
857 tcg_gen_add_tl(t0, t0, cpu_ca);
858 }
859 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changed w/ carry */
860 tcg_temp_free(t1);
861 tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1);
862 if (is_isa300(ctx)) {
863 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
864 }
865 } else {
866 TCGv zero = tcg_const_tl(0);
867 if (add_ca) {
868 tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, cpu_ca, zero);
869 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, arg2, zero);
870 } else {
871 tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, arg2, zero);
872 }
873 gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, 0);
874 tcg_temp_free(zero);
875 }
876 } else {
877 tcg_gen_add_tl(t0, arg1, arg2);
878 if (add_ca) {
879 tcg_gen_add_tl(t0, t0, cpu_ca);
880 }
881 }
882
883 if (compute_ov) {
884 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
885 }
886 if (unlikely(compute_rc0)) {
887 gen_set_Rc0(ctx, t0);
888 }
889
890 if (t0 != ret) {
891 tcg_gen_mov_tl(ret, t0);
892 tcg_temp_free(t0);
893 }
894 }
895 /* Add functions with two operands */
896 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
897 static void glue(gen_, name)(DisasContext *ctx) \
898 { \
899 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
900 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
901 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
902 }
903 /* Add functions with one operand and one immediate */
904 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
905 add_ca, compute_ca, compute_ov) \
906 static void glue(gen_, name)(DisasContext *ctx) \
907 { \
908 TCGv t0 = tcg_const_tl(const_val); \
909 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
910 cpu_gpr[rA(ctx->opcode)], t0, \
911 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
912 tcg_temp_free(t0); \
913 }
914
915 /* add add. addo addo. */
916 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
917 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
918 /* addc addc. addco addco. */
919 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
920 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
921 /* adde adde. addeo addeo. */
922 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
923 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
924 /* addme addme. addmeo addmeo. */
925 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
926 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
927 /* addze addze. addzeo addzeo.*/
928 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
929 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
930 /* addi */
931 static void gen_addi(DisasContext *ctx)
932 {
933 target_long simm = SIMM(ctx->opcode);
934
935 if (rA(ctx->opcode) == 0) {
936 /* li case */
937 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm);
938 } else {
939 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
940 cpu_gpr[rA(ctx->opcode)], simm);
941 }
942 }
943 /* addic addic.*/
944 static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
945 {
946 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
947 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
948 c, 0, 1, 0, compute_rc0);
949 tcg_temp_free(c);
950 }
951
952 static void gen_addic(DisasContext *ctx)
953 {
954 gen_op_addic(ctx, 0);
955 }
956
957 static void gen_addic_(DisasContext *ctx)
958 {
959 gen_op_addic(ctx, 1);
960 }
961
962 /* addis */
963 static void gen_addis(DisasContext *ctx)
964 {
965 target_long simm = SIMM(ctx->opcode);
966
967 if (rA(ctx->opcode) == 0) {
968 /* lis case */
969 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm << 16);
970 } else {
971 tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
972 cpu_gpr[rA(ctx->opcode)], simm << 16);
973 }
974 }
975
976 /* addpcis */
977 static void gen_addpcis(DisasContext *ctx)
978 {
979 target_long d = DX(ctx->opcode);
980
981 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], ctx->base.pc_next + (d << 16));
982 }
983
984 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
985 TCGv arg2, int sign, int compute_ov)
986 {
987 TCGv_i32 t0 = tcg_temp_new_i32();
988 TCGv_i32 t1 = tcg_temp_new_i32();
989 TCGv_i32 t2 = tcg_temp_new_i32();
990 TCGv_i32 t3 = tcg_temp_new_i32();
991
992 tcg_gen_trunc_tl_i32(t0, arg1);
993 tcg_gen_trunc_tl_i32(t1, arg2);
994 if (sign) {
995 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
996 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
997 tcg_gen_and_i32(t2, t2, t3);
998 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
999 tcg_gen_or_i32(t2, t2, t3);
1000 tcg_gen_movi_i32(t3, 0);
1001 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1002 tcg_gen_div_i32(t3, t0, t1);
1003 tcg_gen_extu_i32_tl(ret, t3);
1004 } else {
1005 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0);
1006 tcg_gen_movi_i32(t3, 0);
1007 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1008 tcg_gen_divu_i32(t3, t0, t1);
1009 tcg_gen_extu_i32_tl(ret, t3);
1010 }
1011 if (compute_ov) {
1012 tcg_gen_extu_i32_tl(cpu_ov, t2);
1013 if (is_isa300(ctx)) {
1014 tcg_gen_extu_i32_tl(cpu_ov32, t2);
1015 }
1016 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1017 }
1018 tcg_temp_free_i32(t0);
1019 tcg_temp_free_i32(t1);
1020 tcg_temp_free_i32(t2);
1021 tcg_temp_free_i32(t3);
1022
1023 if (unlikely(Rc(ctx->opcode) != 0))
1024 gen_set_Rc0(ctx, ret);
1025 }
1026 /* Div functions */
1027 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
1028 static void glue(gen_, name)(DisasContext *ctx) \
1029 { \
1030 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
1031 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1032 sign, compute_ov); \
1033 }
1034 /* divwu divwu. divwuo divwuo. */
1035 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
1036 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
1037 /* divw divw. divwo divwo. */
1038 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
1039 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
1040
1041 /* div[wd]eu[o][.] */
1042 #define GEN_DIVE(name, hlpr, compute_ov) \
1043 static void gen_##name(DisasContext *ctx) \
1044 { \
1045 TCGv_i32 t0 = tcg_const_i32(compute_ov); \
1046 gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
1047 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1048 tcg_temp_free_i32(t0); \
1049 if (unlikely(Rc(ctx->opcode) != 0)) { \
1050 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
1051 } \
1052 }
1053
1054 GEN_DIVE(divweu, divweu, 0);
1055 GEN_DIVE(divweuo, divweu, 1);
1056 GEN_DIVE(divwe, divwe, 0);
1057 GEN_DIVE(divweo, divwe, 1);
1058
1059 #if defined(TARGET_PPC64)
1060 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
1061 TCGv arg2, int sign, int compute_ov)
1062 {
1063 TCGv_i64 t0 = tcg_temp_new_i64();
1064 TCGv_i64 t1 = tcg_temp_new_i64();
1065 TCGv_i64 t2 = tcg_temp_new_i64();
1066 TCGv_i64 t3 = tcg_temp_new_i64();
1067
1068 tcg_gen_mov_i64(t0, arg1);
1069 tcg_gen_mov_i64(t1, arg2);
1070 if (sign) {
1071 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1072 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1073 tcg_gen_and_i64(t2, t2, t3);
1074 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1075 tcg_gen_or_i64(t2, t2, t3);
1076 tcg_gen_movi_i64(t3, 0);
1077 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1078 tcg_gen_div_i64(ret, t0, t1);
1079 } else {
1080 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0);
1081 tcg_gen_movi_i64(t3, 0);
1082 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1083 tcg_gen_divu_i64(ret, t0, t1);
1084 }
1085 if (compute_ov) {
1086 tcg_gen_mov_tl(cpu_ov, t2);
1087 if (is_isa300(ctx)) {
1088 tcg_gen_mov_tl(cpu_ov32, t2);
1089 }
1090 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1091 }
1092 tcg_temp_free_i64(t0);
1093 tcg_temp_free_i64(t1);
1094 tcg_temp_free_i64(t2);
1095 tcg_temp_free_i64(t3);
1096
1097 if (unlikely(Rc(ctx->opcode) != 0))
1098 gen_set_Rc0(ctx, ret);
1099 }
1100
1101 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1102 static void glue(gen_, name)(DisasContext *ctx) \
1103 { \
1104 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1105 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1106 sign, compute_ov); \
1107 }
1108 /* divdu divdu. divduo divduo. */
1109 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1110 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1111 /* divd divd. divdo divdo. */
1112 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1113 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1114
1115 GEN_DIVE(divdeu, divdeu, 0);
1116 GEN_DIVE(divdeuo, divdeu, 1);
1117 GEN_DIVE(divde, divde, 0);
1118 GEN_DIVE(divdeo, divde, 1);
1119 #endif
1120
1121 static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1,
1122 TCGv arg2, int sign)
1123 {
1124 TCGv_i32 t0 = tcg_temp_new_i32();
1125 TCGv_i32 t1 = tcg_temp_new_i32();
1126
1127 tcg_gen_trunc_tl_i32(t0, arg1);
1128 tcg_gen_trunc_tl_i32(t1, arg2);
1129 if (sign) {
1130 TCGv_i32 t2 = tcg_temp_new_i32();
1131 TCGv_i32 t3 = tcg_temp_new_i32();
1132 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1133 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1134 tcg_gen_and_i32(t2, t2, t3);
1135 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1136 tcg_gen_or_i32(t2, t2, t3);
1137 tcg_gen_movi_i32(t3, 0);
1138 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1139 tcg_gen_rem_i32(t3, t0, t1);
1140 tcg_gen_ext_i32_tl(ret, t3);
1141 tcg_temp_free_i32(t2);
1142 tcg_temp_free_i32(t3);
1143 } else {
1144 TCGv_i32 t2 = tcg_const_i32(1);
1145 TCGv_i32 t3 = tcg_const_i32(0);
1146 tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
1147 tcg_gen_remu_i32(t3, t0, t1);
1148 tcg_gen_extu_i32_tl(ret, t3);
1149 tcg_temp_free_i32(t2);
1150 tcg_temp_free_i32(t3);
1151 }
1152 tcg_temp_free_i32(t0);
1153 tcg_temp_free_i32(t1);
1154 }
1155
1156 #define GEN_INT_ARITH_MODW(name, opc3, sign) \
1157 static void glue(gen_, name)(DisasContext *ctx) \
1158 { \
1159 gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)], \
1160 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1161 sign); \
1162 }
1163
1164 GEN_INT_ARITH_MODW(moduw, 0x08, 0);
1165 GEN_INT_ARITH_MODW(modsw, 0x18, 1);
1166
1167 #if defined(TARGET_PPC64)
1168 static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1,
1169 TCGv arg2, int sign)
1170 {
1171 TCGv_i64 t0 = tcg_temp_new_i64();
1172 TCGv_i64 t1 = tcg_temp_new_i64();
1173
1174 tcg_gen_mov_i64(t0, arg1);
1175 tcg_gen_mov_i64(t1, arg2);
1176 if (sign) {
1177 TCGv_i64 t2 = tcg_temp_new_i64();
1178 TCGv_i64 t3 = tcg_temp_new_i64();
1179 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1180 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1181 tcg_gen_and_i64(t2, t2, t3);
1182 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1183 tcg_gen_or_i64(t2, t2, t3);
1184 tcg_gen_movi_i64(t3, 0);
1185 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1186 tcg_gen_rem_i64(ret, t0, t1);
1187 tcg_temp_free_i64(t2);
1188 tcg_temp_free_i64(t3);
1189 } else {
1190 TCGv_i64 t2 = tcg_const_i64(1);
1191 TCGv_i64 t3 = tcg_const_i64(0);
1192 tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
1193 tcg_gen_remu_i64(ret, t0, t1);
1194 tcg_temp_free_i64(t2);
1195 tcg_temp_free_i64(t3);
1196 }
1197 tcg_temp_free_i64(t0);
1198 tcg_temp_free_i64(t1);
1199 }
1200
1201 #define GEN_INT_ARITH_MODD(name, opc3, sign) \
1202 static void glue(gen_, name)(DisasContext *ctx) \
1203 { \
1204 gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)], \
1205 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1206 sign); \
1207 }
1208
1209 GEN_INT_ARITH_MODD(modud, 0x08, 0);
1210 GEN_INT_ARITH_MODD(modsd, 0x18, 1);
1211 #endif
1212
1213 /* mulhw mulhw. */
1214 static void gen_mulhw(DisasContext *ctx)
1215 {
1216 TCGv_i32 t0 = tcg_temp_new_i32();
1217 TCGv_i32 t1 = tcg_temp_new_i32();
1218
1219 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1220 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1221 tcg_gen_muls2_i32(t0, t1, t0, t1);
1222 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1223 tcg_temp_free_i32(t0);
1224 tcg_temp_free_i32(t1);
1225 if (unlikely(Rc(ctx->opcode) != 0))
1226 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1227 }
1228
1229 /* mulhwu mulhwu. */
1230 static void gen_mulhwu(DisasContext *ctx)
1231 {
1232 TCGv_i32 t0 = tcg_temp_new_i32();
1233 TCGv_i32 t1 = tcg_temp_new_i32();
1234
1235 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1236 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1237 tcg_gen_mulu2_i32(t0, t1, t0, t1);
1238 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1239 tcg_temp_free_i32(t0);
1240 tcg_temp_free_i32(t1);
1241 if (unlikely(Rc(ctx->opcode) != 0))
1242 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1243 }
1244
1245 /* mullw mullw. */
1246 static void gen_mullw(DisasContext *ctx)
1247 {
1248 #if defined(TARGET_PPC64)
1249 TCGv_i64 t0, t1;
1250 t0 = tcg_temp_new_i64();
1251 t1 = tcg_temp_new_i64();
1252 tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
1253 tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
1254 tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1255 tcg_temp_free(t0);
1256 tcg_temp_free(t1);
1257 #else
1258 tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1259 cpu_gpr[rB(ctx->opcode)]);
1260 #endif
1261 if (unlikely(Rc(ctx->opcode) != 0))
1262 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1263 }
1264
1265 /* mullwo mullwo. */
1266 static void gen_mullwo(DisasContext *ctx)
1267 {
1268 TCGv_i32 t0 = tcg_temp_new_i32();
1269 TCGv_i32 t1 = tcg_temp_new_i32();
1270
1271 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1272 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1273 tcg_gen_muls2_i32(t0, t1, t0, t1);
1274 #if defined(TARGET_PPC64)
1275 tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1276 #else
1277 tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
1278 #endif
1279
1280 tcg_gen_sari_i32(t0, t0, 31);
1281 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
1282 tcg_gen_extu_i32_tl(cpu_ov, t0);
1283 if (is_isa300(ctx)) {
1284 tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1285 }
1286 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1287
1288 tcg_temp_free_i32(t0);
1289 tcg_temp_free_i32(t1);
1290 if (unlikely(Rc(ctx->opcode) != 0))
1291 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1292 }
1293
1294 /* mulli */
1295 static void gen_mulli(DisasContext *ctx)
1296 {
1297 tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1298 SIMM(ctx->opcode));
1299 }
1300
1301 #if defined(TARGET_PPC64)
1302 /* mulhd mulhd. */
1303 static void gen_mulhd(DisasContext *ctx)
1304 {
1305 TCGv lo = tcg_temp_new();
1306 tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1307 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1308 tcg_temp_free(lo);
1309 if (unlikely(Rc(ctx->opcode) != 0)) {
1310 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1311 }
1312 }
1313
1314 /* mulhdu mulhdu. */
1315 static void gen_mulhdu(DisasContext *ctx)
1316 {
1317 TCGv lo = tcg_temp_new();
1318 tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1319 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1320 tcg_temp_free(lo);
1321 if (unlikely(Rc(ctx->opcode) != 0)) {
1322 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1323 }
1324 }
1325
1326 /* mulld mulld. */
1327 static void gen_mulld(DisasContext *ctx)
1328 {
1329 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1330 cpu_gpr[rB(ctx->opcode)]);
1331 if (unlikely(Rc(ctx->opcode) != 0))
1332 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1333 }
1334
1335 /* mulldo mulldo. */
1336 static void gen_mulldo(DisasContext *ctx)
1337 {
1338 TCGv_i64 t0 = tcg_temp_new_i64();
1339 TCGv_i64 t1 = tcg_temp_new_i64();
1340
1341 tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
1342 cpu_gpr[rB(ctx->opcode)]);
1343 tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
1344
1345 tcg_gen_sari_i64(t0, t0, 63);
1346 tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
1347 if (is_isa300(ctx)) {
1348 tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1349 }
1350 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1351
1352 tcg_temp_free_i64(t0);
1353 tcg_temp_free_i64(t1);
1354
1355 if (unlikely(Rc(ctx->opcode) != 0)) {
1356 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1357 }
1358 }
1359 #endif
1360
1361 /* Common subf function */
1362 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
1363 TCGv arg2, bool add_ca, bool compute_ca,
1364 bool compute_ov, bool compute_rc0)
1365 {
1366 TCGv t0 = ret;
1367
1368 if (compute_ca || compute_ov) {
1369 t0 = tcg_temp_new();
1370 }
1371
1372 if (compute_ca) {
1373 /* dest = ~arg1 + arg2 [+ ca]. */
1374 if (NARROW_MODE(ctx)) {
1375 /* Caution: a non-obvious corner case of the spec is that we
1376 must produce the *entire* 64-bit addition, but produce the
1377 carry into bit 32. */
1378 TCGv inv1 = tcg_temp_new();
1379 TCGv t1 = tcg_temp_new();
1380 tcg_gen_not_tl(inv1, arg1);
1381 if (add_ca) {
1382 tcg_gen_add_tl(t0, arg2, cpu_ca);
1383 } else {
1384 tcg_gen_addi_tl(t0, arg2, 1);
1385 }
1386 tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */
1387 tcg_gen_add_tl(t0, t0, inv1);
1388 tcg_temp_free(inv1);
1389 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */
1390 tcg_temp_free(t1);
1391 tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1);
1392 if (is_isa300(ctx)) {
1393 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
1394 }
1395 } else if (add_ca) {
1396 TCGv zero, inv1 = tcg_temp_new();
1397 tcg_gen_not_tl(inv1, arg1);
1398 zero = tcg_const_tl(0);
1399 tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
1400 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
1401 gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, 0);
1402 tcg_temp_free(zero);
1403 tcg_temp_free(inv1);
1404 } else {
1405 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
1406 tcg_gen_sub_tl(t0, arg2, arg1);
1407 gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, 1);
1408 }
1409 } else if (add_ca) {
1410 /* Since we're ignoring carry-out, we can simplify the
1411 standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */
1412 tcg_gen_sub_tl(t0, arg2, arg1);
1413 tcg_gen_add_tl(t0, t0, cpu_ca);
1414 tcg_gen_subi_tl(t0, t0, 1);
1415 } else {
1416 tcg_gen_sub_tl(t0, arg2, arg1);
1417 }
1418
1419 if (compute_ov) {
1420 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
1421 }
1422 if (unlikely(compute_rc0)) {
1423 gen_set_Rc0(ctx, t0);
1424 }
1425
1426 if (t0 != ret) {
1427 tcg_gen_mov_tl(ret, t0);
1428 tcg_temp_free(t0);
1429 }
1430 }
1431 /* Sub functions with Two operands functions */
1432 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
1433 static void glue(gen_, name)(DisasContext *ctx) \
1434 { \
1435 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1436 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1437 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1438 }
1439 /* Sub functions with one operand and one immediate */
1440 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
1441 add_ca, compute_ca, compute_ov) \
1442 static void glue(gen_, name)(DisasContext *ctx) \
1443 { \
1444 TCGv t0 = tcg_const_tl(const_val); \
1445 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
1446 cpu_gpr[rA(ctx->opcode)], t0, \
1447 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1448 tcg_temp_free(t0); \
1449 }
1450 /* subf subf. subfo subfo. */
1451 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
1452 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
1453 /* subfc subfc. subfco subfco. */
1454 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
1455 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
1456 /* subfe subfe. subfeo subfo. */
1457 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
1458 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
1459 /* subfme subfme. subfmeo subfmeo. */
1460 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
1461 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
1462 /* subfze subfze. subfzeo subfzeo.*/
1463 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
1464 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
1465
1466 /* subfic */
1467 static void gen_subfic(DisasContext *ctx)
1468 {
1469 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
1470 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1471 c, 0, 1, 0, 0);
1472 tcg_temp_free(c);
1473 }
1474
1475 /* neg neg. nego nego. */
1476 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
1477 {
1478 TCGv zero = tcg_const_tl(0);
1479 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1480 zero, 0, 0, compute_ov, Rc(ctx->opcode));
1481 tcg_temp_free(zero);
1482 }
1483
1484 static void gen_neg(DisasContext *ctx)
1485 {
1486 tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
1487 if (unlikely(Rc(ctx->opcode))) {
1488 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1489 }
1490 }
1491
1492 static void gen_nego(DisasContext *ctx)
1493 {
1494 gen_op_arith_neg(ctx, 1);
1495 }
1496
1497 /*** Integer logical ***/
1498 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
1499 static void glue(gen_, name)(DisasContext *ctx) \
1500 { \
1501 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
1502 cpu_gpr[rB(ctx->opcode)]); \
1503 if (unlikely(Rc(ctx->opcode) != 0)) \
1504 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1505 }
1506
1507 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
1508 static void glue(gen_, name)(DisasContext *ctx) \
1509 { \
1510 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
1511 if (unlikely(Rc(ctx->opcode) != 0)) \
1512 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
1513 }
1514
1515 /* and & and. */
1516 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
1517 /* andc & andc. */
1518 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
1519
1520 /* andi. */
1521 static void gen_andi_(DisasContext *ctx)
1522 {
1523 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode));
1524 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1525 }
1526
1527 /* andis. */
1528 static void gen_andis_(DisasContext *ctx)
1529 {
1530 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16);
1531 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1532 }
1533
1534 /* cntlzw */
1535 static void gen_cntlzw(DisasContext *ctx)
1536 {
1537 TCGv_i32 t = tcg_temp_new_i32();
1538
1539 tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
1540 tcg_gen_clzi_i32(t, t, 32);
1541 tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
1542 tcg_temp_free_i32(t);
1543
1544 if (unlikely(Rc(ctx->opcode) != 0))
1545 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1546 }
1547
1548 /* cnttzw */
1549 static void gen_cnttzw(DisasContext *ctx)
1550 {
1551 TCGv_i32 t = tcg_temp_new_i32();
1552
1553 tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
1554 tcg_gen_ctzi_i32(t, t, 32);
1555 tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
1556 tcg_temp_free_i32(t);
1557
1558 if (unlikely(Rc(ctx->opcode) != 0)) {
1559 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1560 }
1561 }
1562
1563 /* eqv & eqv. */
1564 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
1565 /* extsb & extsb. */
1566 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
1567 /* extsh & extsh. */
1568 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
1569 /* nand & nand. */
1570 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
1571 /* nor & nor. */
1572 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
1573
1574 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
1575 static void gen_pause(DisasContext *ctx)
1576 {
1577 TCGv_i32 t0 = tcg_const_i32(0);
1578 tcg_gen_st_i32(t0, cpu_env,
1579 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
1580 tcg_temp_free_i32(t0);
1581
1582 /* Stop translation, this gives other CPUs a chance to run */
1583 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
1584 }
1585 #endif /* defined(TARGET_PPC64) */
1586
1587 /* or & or. */
1588 static void gen_or(DisasContext *ctx)
1589 {
1590 int rs, ra, rb;
1591
1592 rs = rS(ctx->opcode);
1593 ra = rA(ctx->opcode);
1594 rb = rB(ctx->opcode);
1595 /* Optimisation for mr. ri case */
1596 if (rs != ra || rs != rb) {
1597 if (rs != rb)
1598 tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
1599 else
1600 tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
1601 if (unlikely(Rc(ctx->opcode) != 0))
1602 gen_set_Rc0(ctx, cpu_gpr[ra]);
1603 } else if (unlikely(Rc(ctx->opcode) != 0)) {
1604 gen_set_Rc0(ctx, cpu_gpr[rs]);
1605 #if defined(TARGET_PPC64)
1606 } else if (rs != 0) { /* 0 is nop */
1607 int prio = 0;
1608
1609 switch (rs) {
1610 case 1:
1611 /* Set process priority to low */
1612 prio = 2;
1613 break;
1614 case 6:
1615 /* Set process priority to medium-low */
1616 prio = 3;
1617 break;
1618 case 2:
1619 /* Set process priority to normal */
1620 prio = 4;
1621 break;
1622 #if !defined(CONFIG_USER_ONLY)
1623 case 31:
1624 if (!ctx->pr) {
1625 /* Set process priority to very low */
1626 prio = 1;
1627 }
1628 break;
1629 case 5:
1630 if (!ctx->pr) {
1631 /* Set process priority to medium-hight */
1632 prio = 5;
1633 }
1634 break;
1635 case 3:
1636 if (!ctx->pr) {
1637 /* Set process priority to high */
1638 prio = 6;
1639 }
1640 break;
1641 case 7:
1642 if (ctx->hv && !ctx->pr) {
1643 /* Set process priority to very high */
1644 prio = 7;
1645 }
1646 break;
1647 #endif
1648 default:
1649 break;
1650 }
1651 if (prio) {
1652 TCGv t0 = tcg_temp_new();
1653 gen_load_spr(t0, SPR_PPR);
1654 tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
1655 tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
1656 gen_store_spr(SPR_PPR, t0);
1657 tcg_temp_free(t0);
1658 }
1659 #if !defined(CONFIG_USER_ONLY)
1660 /* Pause out of TCG otherwise spin loops with smt_low eat too much
1661 * CPU and the kernel hangs. This applies to all encodings other
1662 * than no-op, e.g., miso(rs=26), yield(27), mdoio(29), mdoom(30),
1663 * and all currently undefined.
1664 */
1665 gen_pause(ctx);
1666 #endif
1667 #endif
1668 }
1669 }
1670 /* orc & orc. */
1671 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
1672
1673 /* xor & xor. */
1674 static void gen_xor(DisasContext *ctx)
1675 {
1676 /* Optimisation for "set to zero" case */
1677 if (rS(ctx->opcode) != rB(ctx->opcode))
1678 tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1679 else
1680 tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
1681 if (unlikely(Rc(ctx->opcode) != 0))
1682 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1683 }
1684
1685 /* ori */
1686 static void gen_ori(DisasContext *ctx)
1687 {
1688 target_ulong uimm = UIMM(ctx->opcode);
1689
1690 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1691 return;
1692 }
1693 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1694 }
1695
1696 /* oris */
1697 static void gen_oris(DisasContext *ctx)
1698 {
1699 target_ulong uimm = UIMM(ctx->opcode);
1700
1701 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1702 /* NOP */
1703 return;
1704 }
1705 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1706 }
1707
1708 /* xori */
1709 static void gen_xori(DisasContext *ctx)
1710 {
1711 target_ulong uimm = UIMM(ctx->opcode);
1712
1713 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1714 /* NOP */
1715 return;
1716 }
1717 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1718 }
1719
1720 /* xoris */
1721 static void gen_xoris(DisasContext *ctx)
1722 {
1723 target_ulong uimm = UIMM(ctx->opcode);
1724
1725 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1726 /* NOP */
1727 return;
1728 }
1729 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
1730 }
1731
1732 /* popcntb : PowerPC 2.03 specification */
1733 static void gen_popcntb(DisasContext *ctx)
1734 {
1735 gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1736 }
1737
1738 static void gen_popcntw(DisasContext *ctx)
1739 {
1740 #if defined(TARGET_PPC64)
1741 gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1742 #else
1743 tcg_gen_ctpop_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1744 #endif
1745 }
1746
1747 #if defined(TARGET_PPC64)
1748 /* popcntd: PowerPC 2.06 specification */
1749 static void gen_popcntd(DisasContext *ctx)
1750 {
1751 tcg_gen_ctpop_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1752 }
1753 #endif
1754
1755 /* prtyw: PowerPC 2.05 specification */
1756 static void gen_prtyw(DisasContext *ctx)
1757 {
1758 TCGv ra = cpu_gpr[rA(ctx->opcode)];
1759 TCGv rs = cpu_gpr[rS(ctx->opcode)];
1760 TCGv t0 = tcg_temp_new();
1761 tcg_gen_shri_tl(t0, rs, 16);
1762 tcg_gen_xor_tl(ra, rs, t0);
1763 tcg_gen_shri_tl(t0, ra, 8);
1764 tcg_gen_xor_tl(ra, ra, t0);
1765 tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
1766 tcg_temp_free(t0);
1767 }
1768
1769 #if defined(TARGET_PPC64)
1770 /* prtyd: PowerPC 2.05 specification */
1771 static void gen_prtyd(DisasContext *ctx)
1772 {
1773 TCGv ra = cpu_gpr[rA(ctx->opcode)];
1774 TCGv rs = cpu_gpr[rS(ctx->opcode)];
1775 TCGv t0 = tcg_temp_new();
1776 tcg_gen_shri_tl(t0, rs, 32);
1777 tcg_gen_xor_tl(ra, rs, t0);
1778 tcg_gen_shri_tl(t0, ra, 16);
1779 tcg_gen_xor_tl(ra, ra, t0);
1780 tcg_gen_shri_tl(t0, ra, 8);
1781 tcg_gen_xor_tl(ra, ra, t0);
1782 tcg_gen_andi_tl(ra, ra, 1);
1783 tcg_temp_free(t0);
1784 }
1785 #endif
1786
1787 #if defined(TARGET_PPC64)
1788 /* bpermd */
1789 static void gen_bpermd(DisasContext *ctx)
1790 {
1791 gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
1792 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1793 }
1794 #endif
1795
1796 #if defined(TARGET_PPC64)
1797 /* extsw & extsw. */
1798 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
1799
1800 /* cntlzd */
1801 static void gen_cntlzd(DisasContext *ctx)
1802 {
1803 tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
1804 if (unlikely(Rc(ctx->opcode) != 0))
1805 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1806 }
1807
1808 /* cnttzd */
1809 static void gen_cnttzd(DisasContext *ctx)
1810 {
1811 tcg_gen_ctzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
1812 if (unlikely(Rc(ctx->opcode) != 0)) {
1813 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1814 }
1815 }
1816
1817 /* darn */
1818 static void gen_darn(DisasContext *ctx)
1819 {
1820 int l = L(ctx->opcode);
1821
1822 if (l == 0) {
1823 gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
1824 } else if (l <= 2) {
1825 /* Return 64-bit random for both CRN and RRN */
1826 gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
1827 } else {
1828 tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
1829 }
1830 }
1831 #endif
1832
1833 /*** Integer rotate ***/
1834
1835 /* rlwimi & rlwimi. */
1836 static void gen_rlwimi(DisasContext *ctx)
1837 {
1838 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1839 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1840 uint32_t sh = SH(ctx->opcode);
1841 uint32_t mb = MB(ctx->opcode);
1842 uint32_t me = ME(ctx->opcode);
1843
1844 if (sh == (31-me) && mb <= me) {
1845 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
1846 } else {
1847 target_ulong mask;
1848 TCGv t1;
1849
1850 #if defined(TARGET_PPC64)
1851 mb += 32;
1852 me += 32;
1853 #endif
1854 mask = MASK(mb, me);
1855
1856 t1 = tcg_temp_new();
1857 if (mask <= 0xffffffffu) {
1858 TCGv_i32 t0 = tcg_temp_new_i32();
1859 tcg_gen_trunc_tl_i32(t0, t_rs);
1860 tcg_gen_rotli_i32(t0, t0, sh);
1861 tcg_gen_extu_i32_tl(t1, t0);
1862 tcg_temp_free_i32(t0);
1863 } else {
1864 #if defined(TARGET_PPC64)
1865 tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32);
1866 tcg_gen_rotli_i64(t1, t1, sh);
1867 #else
1868 g_assert_not_reached();
1869 #endif
1870 }
1871
1872 tcg_gen_andi_tl(t1, t1, mask);
1873 tcg_gen_andi_tl(t_ra, t_ra, ~mask);
1874 tcg_gen_or_tl(t_ra, t_ra, t1);
1875 tcg_temp_free(t1);
1876 }
1877 if (unlikely(Rc(ctx->opcode) != 0)) {
1878 gen_set_Rc0(ctx, t_ra);
1879 }
1880 }
1881
1882 /* rlwinm & rlwinm. */
1883 static void gen_rlwinm(DisasContext *ctx)
1884 {
1885 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1886 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1887 int sh = SH(ctx->opcode);
1888 int mb = MB(ctx->opcode);
1889 int me = ME(ctx->opcode);
1890 int len = me - mb + 1;
1891 int rsh = (32 - sh) & 31;
1892
1893 if (sh != 0 && len > 0 && me == (31 - sh)) {
1894 tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
1895 } else if (me == 31 && rsh + len <= 32) {
1896 tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
1897 } else {
1898 target_ulong mask;
1899 #if defined(TARGET_PPC64)
1900 mb += 32;
1901 me += 32;
1902 #endif
1903 mask = MASK(mb, me);
1904 if (sh == 0) {
1905 tcg_gen_andi_tl(t_ra, t_rs, mask);
1906 } else if (mask <= 0xffffffffu) {
1907 TCGv_i32 t0 = tcg_temp_new_i32();
1908 tcg_gen_trunc_tl_i32(t0, t_rs);
1909 tcg_gen_rotli_i32(t0, t0, sh);
1910 tcg_gen_andi_i32(t0, t0, mask);
1911 tcg_gen_extu_i32_tl(t_ra, t0);
1912 tcg_temp_free_i32(t0);
1913 } else {
1914 #if defined(TARGET_PPC64)
1915 tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
1916 tcg_gen_rotli_i64(t_ra, t_ra, sh);
1917 tcg_gen_andi_i64(t_ra, t_ra, mask);
1918 #else
1919 g_assert_not_reached();
1920 #endif
1921 }
1922 }
1923 if (unlikely(Rc(ctx->opcode) != 0)) {
1924 gen_set_Rc0(ctx, t_ra);
1925 }
1926 }
1927
1928 /* rlwnm & rlwnm. */
1929 static void gen_rlwnm(DisasContext *ctx)
1930 {
1931 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1932 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1933 TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
1934 uint32_t mb = MB(ctx->opcode);
1935 uint32_t me = ME(ctx->opcode);
1936 target_ulong mask;
1937
1938 #if defined(TARGET_PPC64)
1939 mb += 32;
1940 me += 32;
1941 #endif
1942 mask = MASK(mb, me);
1943
1944 if (mask <= 0xffffffffu) {
1945 TCGv_i32 t0 = tcg_temp_new_i32();
1946 TCGv_i32 t1 = tcg_temp_new_i32();
1947 tcg_gen_trunc_tl_i32(t0, t_rb);
1948 tcg_gen_trunc_tl_i32(t1, t_rs);
1949 tcg_gen_andi_i32(t0, t0, 0x1f);
1950 tcg_gen_rotl_i32(t1, t1, t0);
1951 tcg_gen_extu_i32_tl(t_ra, t1);
1952 tcg_temp_free_i32(t0);
1953 tcg_temp_free_i32(t1);
1954 } else {
1955 #if defined(TARGET_PPC64)
1956 TCGv_i64 t0 = tcg_temp_new_i64();
1957 tcg_gen_andi_i64(t0, t_rb, 0x1f);
1958 tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
1959 tcg_gen_rotl_i64(t_ra, t_ra, t0);
1960 tcg_temp_free_i64(t0);
1961 #else
1962 g_assert_not_reached();
1963 #endif
1964 }
1965
1966 tcg_gen_andi_tl(t_ra, t_ra, mask);
1967
1968 if (unlikely(Rc(ctx->opcode) != 0)) {
1969 gen_set_Rc0(ctx, t_ra);
1970 }
1971 }
1972
1973 #if defined(TARGET_PPC64)
1974 #define GEN_PPC64_R2(name, opc1, opc2) \
1975 static void glue(gen_, name##0)(DisasContext *ctx) \
1976 { \
1977 gen_##name(ctx, 0); \
1978 } \
1979 \
1980 static void glue(gen_, name##1)(DisasContext *ctx) \
1981 { \
1982 gen_##name(ctx, 1); \
1983 }
1984 #define GEN_PPC64_R4(name, opc1, opc2) \
1985 static void glue(gen_, name##0)(DisasContext *ctx) \
1986 { \
1987 gen_##name(ctx, 0, 0); \
1988 } \
1989 \
1990 static void glue(gen_, name##1)(DisasContext *ctx) \
1991 { \
1992 gen_##name(ctx, 0, 1); \
1993 } \
1994 \
1995 static void glue(gen_, name##2)(DisasContext *ctx) \
1996 { \
1997 gen_##name(ctx, 1, 0); \
1998 } \
1999 \
2000 static void glue(gen_, name##3)(DisasContext *ctx) \
2001 { \
2002 gen_##name(ctx, 1, 1); \
2003 }
2004
2005 static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
2006 {
2007 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2008 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2009 int len = me - mb + 1;
2010 int rsh = (64 - sh) & 63;
2011
2012 if (sh != 0 && len > 0 && me == (63 - sh)) {
2013 tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2014 } else if (me == 63 && rsh + len <= 64) {
2015 tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2016 } else {
2017 tcg_gen_rotli_tl(t_ra, t_rs, sh);
2018 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2019 }
2020 if (unlikely(Rc(ctx->opcode) != 0)) {
2021 gen_set_Rc0(ctx, t_ra);
2022 }
2023 }
2024
2025 /* rldicl - rldicl. */
2026 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
2027 {
2028 uint32_t sh, mb;
2029
2030 sh = SH(ctx->opcode) | (shn << 5);
2031 mb = MB(ctx->opcode) | (mbn << 5);
2032 gen_rldinm(ctx, mb, 63, sh);
2033 }
2034 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
2035
2036 /* rldicr - rldicr. */
2037 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
2038 {
2039 uint32_t sh, me;
2040
2041 sh = SH(ctx->opcode) | (shn << 5);
2042 me = MB(ctx->opcode) | (men << 5);
2043 gen_rldinm(ctx, 0, me, sh);
2044 }
2045 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
2046
2047 /* rldic - rldic. */
2048 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
2049 {
2050 uint32_t sh, mb;
2051
2052 sh = SH(ctx->opcode) | (shn << 5);
2053 mb = MB(ctx->opcode) | (mbn << 5);
2054 gen_rldinm(ctx, mb, 63 - sh, sh);
2055 }
2056 GEN_PPC64_R4(rldic, 0x1E, 0x04);
2057
2058 static void gen_rldnm(DisasContext *ctx, int mb, int me)
2059 {
2060 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2061 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2062 TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2063 TCGv t0;
2064
2065 t0 = tcg_temp_new();
2066 tcg_gen_andi_tl(t0, t_rb, 0x3f);
2067 tcg_gen_rotl_tl(t_ra, t_rs, t0);
2068 tcg_temp_free(t0);
2069
2070 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2071 if (unlikely(Rc(ctx->opcode) != 0)) {
2072 gen_set_Rc0(ctx, t_ra);
2073 }
2074 }
2075
2076 /* rldcl - rldcl. */
2077 static inline void gen_rldcl(DisasContext *ctx, int mbn)
2078 {
2079 uint32_t mb;
2080
2081 mb = MB(ctx->opcode) | (mbn << 5);
2082 gen_rldnm(ctx, mb, 63);
2083 }
2084 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
2085
2086 /* rldcr - rldcr. */
2087 static inline void gen_rldcr(DisasContext *ctx, int men)
2088 {
2089 uint32_t me;
2090
2091 me = MB(ctx->opcode) | (men << 5);
2092 gen_rldnm(ctx, 0, me);
2093 }
2094 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
2095
2096 /* rldimi - rldimi. */
2097 static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
2098 {
2099 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2100 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2101 uint32_t sh = SH(ctx->opcode) | (shn << 5);
2102 uint32_t mb = MB(ctx->opcode) | (mbn << 5);
2103 uint32_t me = 63 - sh;
2104
2105 if (mb <= me) {
2106 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2107 } else {
2108 target_ulong mask = MASK(mb, me);
2109 TCGv t1 = tcg_temp_new();
2110
2111 tcg_gen_rotli_tl(t1, t_rs, sh);
2112 tcg_gen_andi_tl(t1, t1, mask);
2113 tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2114 tcg_gen_or_tl(t_ra, t_ra, t1);
2115 tcg_temp_free(t1);
2116 }
2117 if (unlikely(Rc(ctx->opcode) != 0)) {
2118 gen_set_Rc0(ctx, t_ra);
2119 }
2120 }
2121 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
2122 #endif
2123
2124 /*** Integer shift ***/
2125
2126 /* slw & slw. */
2127 static void gen_slw(DisasContext *ctx)
2128 {
2129 TCGv t0, t1;
2130
2131 t0 = tcg_temp_new();
2132 /* AND rS with a mask that is 0 when rB >= 0x20 */
2133 #if defined(TARGET_PPC64)
2134 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2135 tcg_gen_sari_tl(t0, t0, 0x3f);
2136 #else
2137 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2138 tcg_gen_sari_tl(t0, t0, 0x1f);
2139 #endif
2140 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2141 t1 = tcg_temp_new();
2142 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2143 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2144 tcg_temp_free(t1);
2145 tcg_temp_free(t0);
2146 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2147 if (unlikely(Rc(ctx->opcode) != 0))
2148 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2149 }
2150
2151 /* sraw & sraw. */
2152 static void gen_sraw(DisasContext *ctx)
2153 {
2154 gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
2155 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2156 if (unlikely(Rc(ctx->opcode) != 0))
2157 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2158 }
2159
2160 /* srawi & srawi. */
2161 static void gen_srawi(DisasContext *ctx)
2162 {
2163 int sh = SH(ctx->opcode);
2164 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2165 TCGv src = cpu_gpr[rS(ctx->opcode)];
2166 if (sh == 0) {
2167 tcg_gen_ext32s_tl(dst, src);
2168 tcg_gen_movi_tl(cpu_ca, 0);
2169 if (is_isa300(ctx)) {
2170 tcg_gen_movi_tl(cpu_ca32, 0);
2171 }
2172 } else {
2173 TCGv t0;
2174 tcg_gen_ext32s_tl(dst, src);
2175 tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
2176 t0 = tcg_temp_new();
2177 tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
2178 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2179 tcg_temp_free(t0);
2180 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2181 if (is_isa300(ctx)) {
2182 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2183 }
2184 tcg_gen_sari_tl(dst, dst, sh);
2185 }
2186 if (unlikely(Rc(ctx->opcode) != 0)) {
2187 gen_set_Rc0(ctx, dst);
2188 }
2189 }
2190
2191 /* srw & srw. */
2192 static void gen_srw(DisasContext *ctx)
2193 {
2194 TCGv t0, t1;
2195
2196 t0 = tcg_temp_new();
2197 /* AND rS with a mask that is 0 when rB >= 0x20 */
2198 #if defined(TARGET_PPC64)
2199 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2200 tcg_gen_sari_tl(t0, t0, 0x3f);
2201 #else
2202 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2203 tcg_gen_sari_tl(t0, t0, 0x1f);
2204 #endif
2205 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2206 tcg_gen_ext32u_tl(t0, t0);
2207 t1 = tcg_temp_new();
2208 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2209 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2210 tcg_temp_free(t1);
2211 tcg_temp_free(t0);
2212 if (unlikely(Rc(ctx->opcode) != 0))
2213 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2214 }
2215
2216 #if defined(TARGET_PPC64)
2217 /* sld & sld. */
2218 static void gen_sld(DisasContext *ctx)
2219 {
2220 TCGv t0, t1;
2221
2222 t0 = tcg_temp_new();
2223 /* AND rS with a mask that is 0 when rB >= 0x40 */
2224 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2225 tcg_gen_sari_tl(t0, t0, 0x3f);
2226 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2227 t1 = tcg_temp_new();
2228 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2229 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2230 tcg_temp_free(t1);
2231 tcg_temp_free(t0);
2232 if (unlikely(Rc(ctx->opcode) != 0))
2233 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2234 }
2235
2236 /* srad & srad. */
2237 static void gen_srad(DisasContext *ctx)
2238 {
2239 gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
2240 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2241 if (unlikely(Rc(ctx->opcode) != 0))
2242 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2243 }
2244 /* sradi & sradi. */
2245 static inline void gen_sradi(DisasContext *ctx, int n)
2246 {
2247 int sh = SH(ctx->opcode) + (n << 5);
2248 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2249 TCGv src = cpu_gpr[rS(ctx->opcode)];
2250 if (sh == 0) {
2251 tcg_gen_mov_tl(dst, src);
2252 tcg_gen_movi_tl(cpu_ca, 0);
2253 if (is_isa300(ctx)) {
2254 tcg_gen_movi_tl(cpu_ca32, 0);
2255 }
2256 } else {
2257 TCGv t0;
2258 tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
2259 t0 = tcg_temp_new();
2260 tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
2261 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2262 tcg_temp_free(t0);
2263 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2264 if (is_isa300(ctx)) {
2265 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2266 }
2267 tcg_gen_sari_tl(dst, src, sh);
2268 }
2269 if (unlikely(Rc(ctx->opcode) != 0)) {
2270 gen_set_Rc0(ctx, dst);
2271 }
2272 }
2273
2274 static void gen_sradi0(DisasContext *ctx)
2275 {
2276 gen_sradi(ctx, 0);
2277 }
2278
2279 static void gen_sradi1(DisasContext *ctx)
2280 {
2281 gen_sradi(ctx, 1);
2282 }
2283
2284 /* extswsli & extswsli. */
2285 static inline void gen_extswsli(DisasContext *ctx, int n)
2286 {
2287 int sh = SH(ctx->opcode) + (n << 5);
2288 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2289 TCGv src = cpu_gpr[rS(ctx->opcode)];
2290
2291 tcg_gen_ext32s_tl(dst, src);
2292 tcg_gen_shli_tl(dst, dst, sh);
2293 if (unlikely(Rc(ctx->opcode) != 0)) {
2294 gen_set_Rc0(ctx, dst);
2295 }
2296 }
2297
2298 static void gen_extswsli0(DisasContext *ctx)
2299 {
2300 gen_extswsli(ctx, 0);
2301 }
2302
2303 static void gen_extswsli1(DisasContext *ctx)
2304 {
2305 gen_extswsli(ctx, 1);
2306 }
2307
2308 /* srd & srd. */
2309 static void gen_srd(DisasContext *ctx)
2310 {
2311 TCGv t0, t1;
2312
2313 t0 = tcg_temp_new();
2314 /* AND rS with a mask that is 0 when rB >= 0x40 */
2315 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2316 tcg_gen_sari_tl(t0, t0, 0x3f);
2317 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2318 t1 = tcg_temp_new();
2319 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2320 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2321 tcg_temp_free(t1);
2322 tcg_temp_free(t0);
2323 if (unlikely(Rc(ctx->opcode) != 0))
2324 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2325 }
2326 #endif
2327
2328 /*** Addressing modes ***/
2329 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2330 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
2331 target_long maskl)
2332 {
2333 target_long simm = SIMM(ctx->opcode);
2334
2335 simm &= ~maskl;
2336 if (rA(ctx->opcode) == 0) {
2337 if (NARROW_MODE(ctx)) {
2338 simm = (uint32_t)simm;
2339 }
2340 tcg_gen_movi_tl(EA, simm);
2341 } else if (likely(simm != 0)) {
2342 tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
2343 if (NARROW_MODE(ctx)) {
2344 tcg_gen_ext32u_tl(EA, EA);
2345 }
2346 } else {
2347 if (NARROW_MODE(ctx)) {
2348 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2349 } else {
2350 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2351 }
2352 }
2353 }
2354
2355 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
2356 {
2357 if (rA(ctx->opcode) == 0) {
2358 if (NARROW_MODE(ctx)) {
2359 tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2360 } else {
2361 tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2362 }
2363 } else {
2364 tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2365 if (NARROW_MODE(ctx)) {
2366 tcg_gen_ext32u_tl(EA, EA);
2367 }
2368 }
2369 }
2370
2371 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
2372 {
2373 if (rA(ctx->opcode) == 0) {
2374 tcg_gen_movi_tl(EA, 0);
2375 } else if (NARROW_MODE(ctx)) {
2376 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2377 } else {
2378 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2379 }
2380 }
2381
2382 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
2383 target_long val)
2384 {
2385 tcg_gen_addi_tl(ret, arg1, val);
2386 if (NARROW_MODE(ctx)) {
2387 tcg_gen_ext32u_tl(ret, ret);
2388 }
2389 }
2390
2391 static inline void gen_align_no_le(DisasContext *ctx)
2392 {
2393 gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
2394 (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
2395 }
2396
2397 /*** Integer load ***/
2398 #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
2399 #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
2400
2401 #define GEN_QEMU_LOAD_TL(ldop, op) \
2402 static void glue(gen_qemu_, ldop)(DisasContext *ctx, \
2403 TCGv val, \
2404 TCGv addr) \
2405 { \
2406 tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op); \
2407 }
2408
2409 GEN_QEMU_LOAD_TL(ld8u, DEF_MEMOP(MO_UB))
2410 GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
2411 GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
2412 GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
2413 GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
2414
2415 GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
2416 GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
2417
2418 #define GEN_QEMU_LOAD_64(ldop, op) \
2419 static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \
2420 TCGv_i64 val, \
2421 TCGv addr) \
2422 { \
2423 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op); \
2424 }
2425
2426 GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB))
2427 GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
2428 GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
2429 GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
2430 GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_Q))
2431
2432 #if defined(TARGET_PPC64)
2433 GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_Q))
2434 #endif
2435
2436 #define GEN_QEMU_STORE_TL(stop, op) \
2437 static void glue(gen_qemu_, stop)(DisasContext *ctx, \
2438 TCGv val, \
2439 TCGv addr) \
2440 { \
2441 tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op); \
2442 }
2443
2444 GEN_QEMU_STORE_TL(st8, DEF_MEMOP(MO_UB))
2445 GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
2446 GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
2447
2448 GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
2449 GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
2450
2451 #define GEN_QEMU_STORE_64(stop, op) \
2452 static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
2453 TCGv_i64 val, \
2454 TCGv addr) \
2455 { \
2456 tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op); \
2457 }
2458
2459 GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB))
2460 GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
2461 GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
2462 GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q))
2463
2464 #if defined(TARGET_PPC64)
2465 GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q))
2466 #endif
2467
2468 #define GEN_LD(name, ldop, opc, type) \
2469 static void glue(gen_, name)(DisasContext *ctx) \
2470 { \
2471 TCGv EA; \
2472 gen_set_access_type(ctx, ACCESS_INT); \
2473 EA = tcg_temp_new(); \
2474 gen_addr_imm_index(ctx, EA, 0); \
2475 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2476 tcg_temp_free(EA); \
2477 }
2478
2479 #define GEN_LDU(name, ldop, opc, type) \
2480 static void glue(gen_, name##u)(DisasContext *ctx) \
2481 { \
2482 TCGv EA; \
2483 if (unlikely(rA(ctx->opcode) == 0 || \
2484 rA(ctx->opcode) == rD(ctx->opcode))) { \
2485 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2486 return; \
2487 } \
2488 gen_set_access_type(ctx, ACCESS_INT); \
2489 EA = tcg_temp_new(); \
2490 if (type == PPC_64B) \
2491 gen_addr_imm_index(ctx, EA, 0x03); \
2492 else \
2493 gen_addr_imm_index(ctx, EA, 0); \
2494 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2495 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2496 tcg_temp_free(EA); \
2497 }
2498
2499 #define GEN_LDUX(name, ldop, opc2, opc3, type) \
2500 static void glue(gen_, name##ux)(DisasContext *ctx) \
2501 { \
2502 TCGv EA; \
2503 if (unlikely(rA(ctx->opcode) == 0 || \
2504 rA(ctx->opcode) == rD(ctx->opcode))) { \
2505 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2506 return; \
2507 } \
2508 gen_set_access_type(ctx, ACCESS_INT); \
2509 EA = tcg_temp_new(); \
2510 gen_addr_reg_index(ctx, EA); \
2511 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2512 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2513 tcg_temp_free(EA); \
2514 }
2515
2516 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
2517 static void glue(gen_, name##x)(DisasContext *ctx) \
2518 { \
2519 TCGv EA; \
2520 chk; \
2521 gen_set_access_type(ctx, ACCESS_INT); \
2522 EA = tcg_temp_new(); \
2523 gen_addr_reg_index(ctx, EA); \
2524 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2525 tcg_temp_free(EA); \
2526 }
2527
2528 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2529 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE)
2530
2531 #define GEN_LDX_HVRM(name, ldop, opc2, opc3, type) \
2532 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
2533
2534 #define GEN_LDS(name, ldop, op, type) \
2535 GEN_LD(name, ldop, op | 0x20, type); \
2536 GEN_LDU(name, ldop, op | 0x21, type); \
2537 GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
2538 GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2539
2540 /* lbz lbzu lbzux lbzx */
2541 GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER);
2542 /* lha lhau lhaux lhax */
2543 GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER);
2544 /* lhz lhzu lhzux lhzx */
2545 GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER);
2546 /* lwz lwzu lwzux lwzx */
2547 GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER);
2548 #if defined(TARGET_PPC64)
2549 /* lwaux */
2550 GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
2551 /* lwax */
2552 GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
2553 /* ldux */
2554 GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B);
2555 /* ldx */
2556 GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B);
2557
2558 /* CI load/store variants */
2559 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
2560 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
2561 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
2562 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
2563
2564 static void gen_ld(DisasContext *ctx)
2565 {
2566 TCGv EA;
2567 if (Rc(ctx->opcode)) {
2568 if (unlikely(rA(ctx->opcode) == 0 ||
2569 rA(ctx->opcode) == rD(ctx->opcode))) {
2570 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2571 return;
2572 }
2573 }
2574 gen_set_access_type(ctx, ACCESS_INT);
2575 EA = tcg_temp_new();
2576 gen_addr_imm_index(ctx, EA, 0x03);
2577 if (ctx->opcode & 0x02) {
2578 /* lwa (lwau is undefined) */
2579 gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2580 } else {
2581 /* ld - ldu */
2582 gen_qemu_ld64_i64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2583 }
2584 if (Rc(ctx->opcode))
2585 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2586 tcg_temp_free(EA);
2587 }
2588
2589 /* lq */
2590 static void gen_lq(DisasContext *ctx)
2591 {
2592 int ra, rd;
2593 TCGv EA, hi, lo;
2594
2595 /* lq is a legal user mode instruction starting in ISA 2.07 */
2596 bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2597 bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2598
2599 if (!legal_in_user_mode && ctx->pr) {
2600 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2601 return;
2602 }
2603
2604 if (!le_is_supported && ctx->le_mode) {
2605 gen_align_no_le(ctx);
2606 return;
2607 }
2608 ra = rA(ctx->opcode);
2609 rd = rD(ctx->opcode);
2610 if (unlikely((rd & 1) || rd == ra)) {
2611 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2612 return;
2613 }
2614
2615 gen_set_access_type(ctx, ACCESS_INT);
2616 EA = tcg_temp_new();
2617 gen_addr_imm_index(ctx, EA, 0x0F);
2618
2619 /* Note that the low part is always in RD+1, even in LE mode. */
2620 lo = cpu_gpr[rd + 1];
2621 hi = cpu_gpr[rd];
2622
2623 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2624 #ifdef CONFIG_ATOMIC128
2625 TCGv_i32 oi = tcg_temp_new_i32();
2626 if (ctx->le_mode) {
2627 tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
2628 gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
2629 } else {
2630 tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
2631 gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
2632 }
2633 tcg_temp_free_i32(oi);
2634 tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
2635 #else
2636 /* Restart with exclusive lock. */
2637 gen_helper_exit_atomic(cpu_env);
2638 ctx->base.is_jmp = DISAS_NORETURN;
2639 #endif
2640 } else if (ctx->le_mode) {
2641 tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ);
2642 gen_addr_add(ctx, EA, EA, 8);
2643 tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ);
2644 } else {
2645 tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ);
2646 gen_addr_add(ctx, EA, EA, 8);
2647 tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ);
2648 }
2649 tcg_temp_free(EA);
2650 }
2651 #endif
2652
2653 /*** Integer store ***/
2654 #define GEN_ST(name, stop, opc, type) \
2655 static void glue(gen_, name)(DisasContext *ctx) \
2656 { \
2657 TCGv EA; \
2658 gen_set_access_type(ctx, ACCESS_INT); \
2659 EA = tcg_temp_new(); \
2660 gen_addr_imm_index(ctx, EA, 0); \
2661 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2662 tcg_temp_free(EA); \
2663 }
2664
2665 #define GEN_STU(name, stop, opc, type) \
2666 static void glue(gen_, stop##u)(DisasContext *ctx) \
2667 { \
2668 TCGv EA; \
2669 if (unlikely(rA(ctx->opcode) == 0)) { \
2670 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2671 return; \
2672 } \
2673 gen_set_access_type(ctx, ACCESS_INT); \
2674 EA = tcg_temp_new(); \
2675 if (type == PPC_64B) \
2676 gen_addr_imm_index(ctx, EA, 0x03); \
2677 else \
2678 gen_addr_imm_index(ctx, EA, 0); \
2679 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2680 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2681 tcg_temp_free(EA); \
2682 }
2683
2684 #define GEN_STUX(name, stop, opc2, opc3, type) \
2685 static void glue(gen_, name##ux)(DisasContext *ctx) \
2686 { \
2687 TCGv EA; \
2688 if (unlikely(rA(ctx->opcode) == 0)) { \
2689 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
2690 return; \
2691 } \
2692 gen_set_access_type(ctx, ACCESS_INT); \
2693 EA = tcg_temp_new(); \
2694 gen_addr_reg_index(ctx, EA); \
2695 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2696 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
2697 tcg_temp_free(EA); \
2698 }
2699
2700 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \
2701 static void glue(gen_, name##x)(DisasContext *ctx) \
2702 { \
2703 TCGv EA; \
2704 chk; \
2705 gen_set_access_type(ctx, ACCESS_INT); \
2706 EA = tcg_temp_new(); \
2707 gen_addr_reg_index(ctx, EA); \
2708 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2709 tcg_temp_free(EA); \
2710 }
2711 #define GEN_STX(name, stop, opc2, opc3, type) \
2712 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE)
2713
2714 #define GEN_STX_HVRM(name, stop, opc2, opc3, type) \
2715 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
2716
2717 #define GEN_STS(name, stop, op, type) \
2718 GEN_ST(name, stop, op | 0x20, type); \
2719 GEN_STU(name, stop, op | 0x21, type); \
2720 GEN_STUX(name, stop, 0x17, op | 0x01, type); \
2721 GEN_STX(name, stop, 0x17, op | 0x00, type)
2722
2723 /* stb stbu stbux stbx */
2724 GEN_STS(stb, st8, 0x06, PPC_INTEGER);
2725 /* sth sthu sthux sthx */
2726 GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
2727 /* stw stwu stwux stwx */
2728 GEN_STS(stw, st32, 0x04, PPC_INTEGER);
2729 #if defined(TARGET_PPC64)
2730 GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B);
2731 GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B);
2732 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
2733 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
2734 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
2735 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
2736
2737 static void gen_std(DisasContext *ctx)
2738 {
2739 int rs;
2740 TCGv EA;
2741
2742 rs = rS(ctx->opcode);
2743 if ((ctx->opcode & 0x3) == 0x2) { /* stq */
2744 bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2745 bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2746 TCGv hi, lo;
2747
2748 if (!(ctx->insns_flags & PPC_64BX)) {
2749 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2750 }
2751
2752 if (!legal_in_user_mode && ctx->pr) {
2753 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2754 return;
2755 }
2756
2757 if (!le_is_supported && ctx->le_mode) {
2758 gen_align_no_le(ctx);
2759 return;
2760 }
2761
2762 if (unlikely(rs & 1)) {
2763 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2764 return;
2765 }
2766 gen_set_access_type(ctx, ACCESS_INT);
2767 EA = tcg_temp_new();
2768 gen_addr_imm_index(ctx, EA, 0x03);
2769
2770 /* Note that the low part is always in RS+1, even in LE mode. */
2771 lo = cpu_gpr[rs + 1];
2772 hi = cpu_gpr[rs];
2773
2774 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2775 #ifdef CONFIG_ATOMIC128
2776 TCGv_i32 oi = tcg_temp_new_i32();
2777 if (ctx->le_mode) {
2778 tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
2779 gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi);
2780 } else {
2781 tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
2782 gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi);
2783 }
2784 tcg_temp_free_i32(oi);
2785 #else
2786 /* Restart with exclusive lock. */
2787 gen_helper_exit_atomic(cpu_env);
2788 ctx->base.is_jmp = DISAS_NORETURN;
2789 #endif
2790 } else if (ctx->le_mode) {
2791 tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_LEQ);
2792 gen_addr_add(ctx, EA, EA, 8);
2793 tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_LEQ);
2794 } else {
2795 tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_BEQ);
2796 gen_addr_add(ctx, EA, EA, 8);
2797 tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_BEQ);
2798 }
2799 tcg_temp_free(EA);
2800 } else {
2801 /* std / stdu */
2802 if (Rc(ctx->opcode)) {
2803 if (unlikely(rA(ctx->opcode) == 0)) {
2804 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2805 return;
2806 }
2807 }
2808 gen_set_access_type(ctx, ACCESS_INT);
2809 EA = tcg_temp_new();
2810 gen_addr_imm_index(ctx, EA, 0x03);
2811 gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
2812 if (Rc(ctx->opcode))
2813 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2814 tcg_temp_free(EA);
2815 }
2816 }
2817 #endif
2818 /*** Integer load and store with byte reverse ***/
2819
2820 /* lhbrx */
2821 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
2822
2823 /* lwbrx */
2824 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
2825
2826 #if defined(TARGET_PPC64)
2827 /* ldbrx */
2828 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
2829 /* stdbrx */
2830 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
2831 #endif /* TARGET_PPC64 */
2832
2833 /* sthbrx */
2834 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
2835 /* stwbrx */
2836 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
2837
2838 /*** Integer load and store multiple ***/
2839
2840 /* lmw */
2841 static void gen_lmw(DisasContext *ctx)
2842 {
2843 TCGv t0;
2844 TCGv_i32 t1;
2845
2846 if (ctx->le_mode) {
2847 gen_align_no_le(ctx);
2848 return;
2849 }
2850 gen_set_access_type(ctx, ACCESS_INT);
2851 t0 = tcg_temp_new();
2852 t1 = tcg_const_i32(rD(ctx->opcode));
2853 gen_addr_imm_index(ctx, t0, 0);
2854 gen_helper_lmw(cpu_env, t0, t1);
2855 tcg_temp_free(t0);
2856 tcg_temp_free_i32(t1);
2857 }
2858
2859 /* stmw */
2860 static void gen_stmw(DisasContext *ctx)
2861 {
2862 TCGv t0;
2863 TCGv_i32 t1;
2864
2865 if (ctx->le_mode) {
2866 gen_align_no_le(ctx);
2867 return;
2868 }
2869 gen_set_access_type(ctx, ACCESS_INT);
2870 t0 = tcg_temp_new();
2871 t1 = tcg_const_i32(rS(ctx->opcode));
2872 gen_addr_imm_index(ctx, t0, 0);
2873 gen_helper_stmw(cpu_env, t0, t1);
2874 tcg_temp_free(t0);
2875 tcg_temp_free_i32(t1);
2876 }
2877
2878 /*** Integer load and store strings ***/
2879
2880 /* lswi */
2881 /* PowerPC32 specification says we must generate an exception if
2882 * rA is in the range of registers to be loaded.
2883 * In an other hand, IBM says this is valid, but rA won't be loaded.
2884 * For now, I'll follow the spec...
2885 */
2886 static void gen_lswi(DisasContext *ctx)
2887 {
2888 TCGv t0;
2889 TCGv_i32 t1, t2;
2890 int nb = NB(ctx->opcode);
2891 int start = rD(ctx->opcode);
2892 int ra = rA(ctx->opcode);
2893 int nr;
2894
2895 if (ctx->le_mode) {
2896 gen_align_no_le(ctx);
2897 return;
2898 }
2899 if (nb == 0)
2900 nb = 32;
2901 nr = DIV_ROUND_UP(nb, 4);
2902 if (unlikely(lsw_reg_in_range(start, nr, ra))) {
2903 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
2904 return;
2905 }
2906 gen_set_access_type(ctx, ACCESS_INT);
2907 t0 = tcg_temp_new();
2908 gen_addr_register(ctx, t0);
2909 t1 = tcg_const_i32(nb);
2910 t2 = tcg_const_i32(start);
2911 gen_helper_lsw(cpu_env, t0, t1, t2);
2912 tcg_temp_free(t0);
2913 tcg_temp_free_i32(t1);
2914 tcg_temp_free_i32(t2);
2915 }
2916
2917 /* lswx */
2918 static void gen_lswx(DisasContext *ctx)
2919 {
2920 TCGv t0;
2921 TCGv_i32 t1, t2, t3;
2922
2923 if (ctx->le_mode) {
2924 gen_align_no_le(ctx);
2925 return;
2926 }
2927 gen_set_access_type(ctx, ACCESS_INT);
2928 t0 = tcg_temp_new();
2929 gen_addr_reg_index(ctx, t0);
2930 t1 = tcg_const_i32(rD(ctx->opcode));
2931 t2 = tcg_const_i32(rA(ctx->opcode));
2932 t3 = tcg_const_i32(rB(ctx->opcode));
2933 gen_helper_lswx(cpu_env, t0, t1, t2, t3);
2934 tcg_temp_free(t0);
2935 tcg_temp_free_i32(t1);
2936 tcg_temp_free_i32(t2);
2937 tcg_temp_free_i32(t3);
2938 }
2939
2940 /* stswi */
2941 static void gen_stswi(DisasContext *ctx)
2942 {
2943 TCGv t0;
2944 TCGv_i32 t1, t2;
2945 int nb = NB(ctx->opcode);
2946
2947 if (ctx->le_mode) {
2948 gen_align_no_le(ctx);
2949 return;
2950 }
2951 gen_set_access_type(ctx, ACCESS_INT);
2952 t0 = tcg_temp_new();
2953 gen_addr_register(ctx, t0);
2954 if (nb == 0)
2955 nb = 32;
2956 t1 = tcg_const_i32(nb);
2957 t2 = tcg_const_i32(rS(ctx->opcode));
2958 gen_helper_stsw(cpu_env, t0, t1, t2);
2959 tcg_temp_free(t0);
2960 tcg_temp_free_i32(t1);
2961 tcg_temp_free_i32(t2);
2962 }
2963
2964 /* stswx */
2965 static void gen_stswx(DisasContext *ctx)
2966 {
2967 TCGv t0;
2968 TCGv_i32 t1, t2;
2969
2970 if (ctx->le_mode) {
2971 gen_align_no_le(ctx);
2972 return;
2973 }
2974 gen_set_access_type(ctx, ACCESS_INT);
2975 t0 = tcg_temp_new();
2976 gen_addr_reg_index(ctx, t0);
2977 t1 = tcg_temp_new_i32();
2978 tcg_gen_trunc_tl_i32(t1, cpu_xer);
2979 tcg_gen_andi_i32(t1, t1, 0x7F);
2980 t2 = tcg_const_i32(rS(ctx->opcode));
2981 gen_helper_stsw(cpu_env, t0, t1, t2);
2982 tcg_temp_free(t0);
2983 tcg_temp_free_i32(t1);
2984 tcg_temp_free_i32(t2);
2985 }
2986
2987 /*** Memory synchronisation ***/
2988 /* eieio */
2989 static void gen_eieio(DisasContext *ctx)
2990 {
2991 TCGBar bar = TCG_MO_LD_ST;
2992
2993 /*
2994 * POWER9 has a eieio instruction variant using bit 6 as a hint to
2995 * tell the CPU it is a store-forwarding barrier.
2996 */
2997 if (ctx->opcode & 0x2000000) {
2998 /*
2999 * ISA says that "Reserved fields in instructions are ignored
3000 * by the processor". So ignore the bit 6 on non-POWER9 CPU but
3001 * as this is not an instruction software should be using,
3002 * complain to the user.
3003 */
3004 if (!(ctx->insns_flags2 & PPC2_ISA300)) {
3005 qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @"
3006 TARGET_FMT_lx "\n", ctx->base.pc_next - 4);
3007 } else {
3008 bar = TCG_MO_ST_LD;
3009 }
3010 }
3011
3012 tcg_gen_mb(bar | TCG_BAR_SC);
3013 }
3014
3015 #if !defined(CONFIG_USER_ONLY)
3016 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
3017 {
3018 TCGv_i32 t;
3019 TCGLabel *l;
3020
3021 if (!ctx->lazy_tlb_flush) {
3022 return;
3023 }
3024 l = gen_new_label();
3025 t = tcg_temp_new_i32();
3026 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
3027 tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
3028 if (global) {
3029 gen_helper_check_tlb_flush_global(cpu_env);
3030 } else {
3031 gen_helper_check_tlb_flush_local(cpu_env);
3032 }
3033 gen_set_label(l);
3034 tcg_temp_free_i32(t);
3035 }
3036 #else
3037 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
3038 #endif
3039
3040 /* isync */
3041 static void gen_isync(DisasContext *ctx)
3042 {
3043 /*
3044 * We need to check for a pending TLB flush. This can only happen in
3045 * kernel mode however so check MSR_PR
3046 */
3047 if (!ctx->pr) {
3048 gen_check_tlb_flush(ctx, false);
3049 }
3050 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
3051 gen_stop_exception(ctx);
3052 }
3053
3054 #define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE))
3055
3056 static void gen_load_locked(DisasContext *ctx, TCGMemOp memop)
3057 {
3058 TCGv gpr = cpu_gpr[rD(ctx->opcode)];
3059 TCGv t0 = tcg_temp_new();
3060
3061 gen_set_access_type(ctx, ACCESS_RES);
3062 gen_addr_reg_index(ctx, t0);
3063 tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN);
3064 tcg_gen_mov_tl(cpu_reserve, t0);
3065 tcg_gen_mov_tl(cpu_reserve_val, gpr);
3066 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3067 tcg_temp_free(t0);
3068 }
3069
3070 #define LARX(name, memop) \
3071 static void gen_##name(DisasContext *ctx) \
3072 { \
3073 gen_load_locked(ctx, memop); \
3074 }
3075
3076 /* lwarx */
3077 LARX(lbarx, DEF_MEMOP(MO_UB))
3078 LARX(lharx, DEF_MEMOP(MO_UW))
3079 LARX(lwarx, DEF_MEMOP(MO_UL))
3080
3081 static void gen_ld_atomic(DisasContext *ctx, TCGMemOp memop)
3082 {
3083 uint32_t gpr_FC = FC(ctx->opcode);
3084 TCGv EA = tcg_temp_new();
3085 TCGv src, dst;
3086
3087 gen_addr_register(ctx, EA);
3088 dst = cpu_gpr[rD(ctx->opcode)];
3089 src = cpu_gpr[rD(ctx->opcode) + 1];
3090
3091 memop |= MO_ALIGN;
3092 switch (gpr_FC) {
3093 case 0: /* Fetch and add */
3094 tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
3095 break;
3096 case 1: /* Fetch and xor */
3097 tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
3098 break;
3099 case 2: /* Fetch and or */
3100 tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
3101 break;
3102 case 3: /* Fetch and 'and' */
3103 tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
3104 break;
3105 case 4: /* Fetch and max unsigned */
3106 tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
3107 break;
3108 case 5: /* Fetch and max signed */
3109 tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
3110 break;
3111 case 6: /* Fetch and min unsigned */
3112 tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
3113 break;
3114 case 7: /* Fetch and min signed */
3115 tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
3116 break;
3117 case 8: /* Swap */
3118 tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
3119 break;
3120 case 16: /* compare and swap not equal */
3121 case 24: /* Fetch and increment bounded */
3122 case 25: /* Fetch and increment equal */
3123 case 28: /* Fetch and decrement bounded */
3124 gen_invalid(ctx);
3125 break;
3126 default:
3127 /* invoke data storage error handler */
3128 gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3129 }
3130 tcg_temp_free(EA);
3131 }
3132
3133 static void gen_lwat(DisasContext *ctx)
3134 {
3135 gen_ld_atomic(ctx, DEF_MEMOP(MO_UL));
3136 }
3137
3138 #ifdef TARGET_PPC64
3139 static void gen_ldat(DisasContext *ctx)
3140 {
3141 gen_ld_atomic(ctx, DEF_MEMOP(MO_Q));
3142 }
3143 #endif
3144
3145 static void gen_st_atomic(DisasContext *ctx, TCGMemOp memop)
3146 {
3147 uint32_t gpr_FC = FC(ctx->opcode);
3148 TCGv EA = tcg_temp_new();
3149 TCGv src, discard;
3150
3151 gen_addr_register(ctx, EA);
3152 src = cpu_gpr[rD(ctx->opcode)];
3153 discard = tcg_temp_new();
3154
3155 memop |= MO_ALIGN;
3156 switch (gpr_FC) {
3157 case 0: /* add and Store */
3158 tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3159 break;
3160 case 1: /* xor and Store */
3161 tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3162 break