hw/arm/virt: Merge VirtBoardInfo and VirtMachineState
[qemu.git] / target / s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
35 #include "tcg-op.h"
36 #include "qemu/log.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
39
40 /* global register indexes */
41 static TCGv_env cpu_env;
42
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
46
47 #include "trace-tcg.h"
48 #include "exec/log.h"
49
50
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
55
56 struct DisasContext {
57 struct TranslationBlock *tb;
58 const DisasInsn *insn;
59 DisasFields *fields;
60 uint64_t pc, next_pc;
61 enum cc_op cc_op;
62 bool singlestep_enabled;
63 };
64
65 /* Information carried about a condition to be evaluated. */
66 typedef struct {
67 TCGCond cond:8;
68 bool is_64;
69 bool g1;
70 bool g2;
71 union {
72 struct { TCGv_i64 a, b; } s64;
73 struct { TCGv_i32 a, b; } s32;
74 } u;
75 } DisasCompare;
76
77 #define DISAS_EXCP 4
78
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit[CC_OP_MAX];
81 static uint64_t inline_branch_miss[CC_OP_MAX];
82 #endif
83
84 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
85 {
86 if (!(s->tb->flags & FLAG_MASK_64)) {
87 if (s->tb->flags & FLAG_MASK_32) {
88 return pc | 0x80000000;
89 }
90 }
91 return pc;
92 }
93
94 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
95 int flags)
96 {
97 S390CPU *cpu = S390_CPU(cs);
98 CPUS390XState *env = &cpu->env;
99 int i;
100
101 if (env->cc_op > 3) {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
103 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
104 } else {
105 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
106 env->psw.mask, env->psw.addr, env->cc_op);
107 }
108
109 for (i = 0; i < 16; i++) {
110 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
111 if ((i % 4) == 3) {
112 cpu_fprintf(f, "\n");
113 } else {
114 cpu_fprintf(f, " ");
115 }
116 }
117
118 for (i = 0; i < 16; i++) {
119 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
120 if ((i % 4) == 3) {
121 cpu_fprintf(f, "\n");
122 } else {
123 cpu_fprintf(f, " ");
124 }
125 }
126
127 for (i = 0; i < 32; i++) {
128 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
129 env->vregs[i][0].ll, env->vregs[i][1].ll);
130 cpu_fprintf(f, (i % 2) ? "\n" : " ");
131 }
132
133 #ifndef CONFIG_USER_ONLY
134 for (i = 0; i < 16; i++) {
135 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
136 if ((i % 4) == 3) {
137 cpu_fprintf(f, "\n");
138 } else {
139 cpu_fprintf(f, " ");
140 }
141 }
142 #endif
143
144 #ifdef DEBUG_INLINE_BRANCHES
145 for (i = 0; i < CC_OP_MAX; i++) {
146 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
147 inline_branch_miss[i], inline_branch_hit[i]);
148 }
149 #endif
150
151 cpu_fprintf(f, "\n");
152 }
153
154 static TCGv_i64 psw_addr;
155 static TCGv_i64 psw_mask;
156 static TCGv_i64 gbea;
157
158 static TCGv_i32 cc_op;
159 static TCGv_i64 cc_src;
160 static TCGv_i64 cc_dst;
161 static TCGv_i64 cc_vr;
162
163 static char cpu_reg_names[32][4];
164 static TCGv_i64 regs[16];
165 static TCGv_i64 fregs[16];
166
167 void s390x_translate_init(void)
168 {
169 int i;
170
171 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
172 tcg_ctx.tcg_env = cpu_env;
173 psw_addr = tcg_global_mem_new_i64(cpu_env,
174 offsetof(CPUS390XState, psw.addr),
175 "psw_addr");
176 psw_mask = tcg_global_mem_new_i64(cpu_env,
177 offsetof(CPUS390XState, psw.mask),
178 "psw_mask");
179 gbea = tcg_global_mem_new_i64(cpu_env,
180 offsetof(CPUS390XState, gbea),
181 "gbea");
182
183 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
184 "cc_op");
185 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
186 "cc_src");
187 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
188 "cc_dst");
189 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
190 "cc_vr");
191
192 for (i = 0; i < 16; i++) {
193 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
194 regs[i] = tcg_global_mem_new(cpu_env,
195 offsetof(CPUS390XState, regs[i]),
196 cpu_reg_names[i]);
197 }
198
199 for (i = 0; i < 16; i++) {
200 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
201 fregs[i] = tcg_global_mem_new(cpu_env,
202 offsetof(CPUS390XState, vregs[i][0].d),
203 cpu_reg_names[i + 16]);
204 }
205 }
206
207 static TCGv_i64 load_reg(int reg)
208 {
209 TCGv_i64 r = tcg_temp_new_i64();
210 tcg_gen_mov_i64(r, regs[reg]);
211 return r;
212 }
213
214 static TCGv_i64 load_freg32_i64(int reg)
215 {
216 TCGv_i64 r = tcg_temp_new_i64();
217 tcg_gen_shri_i64(r, fregs[reg], 32);
218 return r;
219 }
220
221 static void store_reg(int reg, TCGv_i64 v)
222 {
223 tcg_gen_mov_i64(regs[reg], v);
224 }
225
226 static void store_freg(int reg, TCGv_i64 v)
227 {
228 tcg_gen_mov_i64(fregs[reg], v);
229 }
230
231 static void store_reg32_i64(int reg, TCGv_i64 v)
232 {
233 /* 32 bit register writes keep the upper half */
234 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
235 }
236
237 static void store_reg32h_i64(int reg, TCGv_i64 v)
238 {
239 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
240 }
241
242 static void store_freg32_i64(int reg, TCGv_i64 v)
243 {
244 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
245 }
246
247 static void return_low128(TCGv_i64 dest)
248 {
249 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
250 }
251
252 static void update_psw_addr(DisasContext *s)
253 {
254 /* psw.addr */
255 tcg_gen_movi_i64(psw_addr, s->pc);
256 }
257
258 static void per_branch(DisasContext *s, bool to_next)
259 {
260 #ifndef CONFIG_USER_ONLY
261 tcg_gen_movi_i64(gbea, s->pc);
262
263 if (s->tb->flags & FLAG_MASK_PER) {
264 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
265 gen_helper_per_branch(cpu_env, gbea, next_pc);
266 if (to_next) {
267 tcg_temp_free_i64(next_pc);
268 }
269 }
270 #endif
271 }
272
273 static void per_branch_cond(DisasContext *s, TCGCond cond,
274 TCGv_i64 arg1, TCGv_i64 arg2)
275 {
276 #ifndef CONFIG_USER_ONLY
277 if (s->tb->flags & FLAG_MASK_PER) {
278 TCGLabel *lab = gen_new_label();
279 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
280
281 tcg_gen_movi_i64(gbea, s->pc);
282 gen_helper_per_branch(cpu_env, gbea, psw_addr);
283
284 gen_set_label(lab);
285 } else {
286 TCGv_i64 pc = tcg_const_i64(s->pc);
287 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
288 tcg_temp_free_i64(pc);
289 }
290 #endif
291 }
292
293 static void per_breaking_event(DisasContext *s)
294 {
295 tcg_gen_movi_i64(gbea, s->pc);
296 }
297
298 static void update_cc_op(DisasContext *s)
299 {
300 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
301 tcg_gen_movi_i32(cc_op, s->cc_op);
302 }
303 }
304
305 static void potential_page_fault(DisasContext *s)
306 {
307 update_psw_addr(s);
308 update_cc_op(s);
309 }
310
311 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
312 {
313 return (uint64_t)cpu_lduw_code(env, pc);
314 }
315
316 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
317 {
318 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
319 }
320
321 static int get_mem_index(DisasContext *s)
322 {
323 switch (s->tb->flags & FLAG_MASK_ASC) {
324 case PSW_ASC_PRIMARY >> 32:
325 return 0;
326 case PSW_ASC_SECONDARY >> 32:
327 return 1;
328 case PSW_ASC_HOME >> 32:
329 return 2;
330 default:
331 tcg_abort();
332 break;
333 }
334 }
335
336 static void gen_exception(int excp)
337 {
338 TCGv_i32 tmp = tcg_const_i32(excp);
339 gen_helper_exception(cpu_env, tmp);
340 tcg_temp_free_i32(tmp);
341 }
342
343 static void gen_program_exception(DisasContext *s, int code)
344 {
345 TCGv_i32 tmp;
346
347 /* Remember what pgm exeption this was. */
348 tmp = tcg_const_i32(code);
349 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
350 tcg_temp_free_i32(tmp);
351
352 tmp = tcg_const_i32(s->next_pc - s->pc);
353 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
354 tcg_temp_free_i32(tmp);
355
356 /* Advance past instruction. */
357 s->pc = s->next_pc;
358 update_psw_addr(s);
359
360 /* Save off cc. */
361 update_cc_op(s);
362
363 /* Trigger exception. */
364 gen_exception(EXCP_PGM);
365 }
366
367 static inline void gen_illegal_opcode(DisasContext *s)
368 {
369 gen_program_exception(s, PGM_OPERATION);
370 }
371
372 static inline void gen_trap(DisasContext *s)
373 {
374 TCGv_i32 t;
375
376 /* Set DXC to 0xff. */
377 t = tcg_temp_new_i32();
378 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
379 tcg_gen_ori_i32(t, t, 0xff00);
380 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
381 tcg_temp_free_i32(t);
382
383 gen_program_exception(s, PGM_DATA);
384 }
385
386 #ifndef CONFIG_USER_ONLY
387 static void check_privileged(DisasContext *s)
388 {
389 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
390 gen_program_exception(s, PGM_PRIVILEGED);
391 }
392 }
393 #endif
394
395 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
396 {
397 TCGv_i64 tmp = tcg_temp_new_i64();
398 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
399
400 /* Note that d2 is limited to 20 bits, signed. If we crop negative
401 displacements early we create larger immedate addends. */
402
403 /* Note that addi optimizes the imm==0 case. */
404 if (b2 && x2) {
405 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
406 tcg_gen_addi_i64(tmp, tmp, d2);
407 } else if (b2) {
408 tcg_gen_addi_i64(tmp, regs[b2], d2);
409 } else if (x2) {
410 tcg_gen_addi_i64(tmp, regs[x2], d2);
411 } else {
412 if (need_31) {
413 d2 &= 0x7fffffff;
414 need_31 = false;
415 }
416 tcg_gen_movi_i64(tmp, d2);
417 }
418 if (need_31) {
419 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
420 }
421
422 return tmp;
423 }
424
425 static inline bool live_cc_data(DisasContext *s)
426 {
427 return (s->cc_op != CC_OP_DYNAMIC
428 && s->cc_op != CC_OP_STATIC
429 && s->cc_op > 3);
430 }
431
432 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
433 {
434 if (live_cc_data(s)) {
435 tcg_gen_discard_i64(cc_src);
436 tcg_gen_discard_i64(cc_dst);
437 tcg_gen_discard_i64(cc_vr);
438 }
439 s->cc_op = CC_OP_CONST0 + val;
440 }
441
442 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
443 {
444 if (live_cc_data(s)) {
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_discard_i64(cc_vr);
447 }
448 tcg_gen_mov_i64(cc_dst, dst);
449 s->cc_op = op;
450 }
451
452 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
453 TCGv_i64 dst)
454 {
455 if (live_cc_data(s)) {
456 tcg_gen_discard_i64(cc_vr);
457 }
458 tcg_gen_mov_i64(cc_src, src);
459 tcg_gen_mov_i64(cc_dst, dst);
460 s->cc_op = op;
461 }
462
463 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
464 TCGv_i64 dst, TCGv_i64 vr)
465 {
466 tcg_gen_mov_i64(cc_src, src);
467 tcg_gen_mov_i64(cc_dst, dst);
468 tcg_gen_mov_i64(cc_vr, vr);
469 s->cc_op = op;
470 }
471
472 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
473 {
474 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
475 }
476
477 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
478 {
479 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
480 }
481
482 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
483 {
484 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
485 }
486
487 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
488 {
489 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
490 }
491
492 /* CC value is in env->cc_op */
493 static void set_cc_static(DisasContext *s)
494 {
495 if (live_cc_data(s)) {
496 tcg_gen_discard_i64(cc_src);
497 tcg_gen_discard_i64(cc_dst);
498 tcg_gen_discard_i64(cc_vr);
499 }
500 s->cc_op = CC_OP_STATIC;
501 }
502
503 /* calculates cc into cc_op */
504 static void gen_op_calc_cc(DisasContext *s)
505 {
506 TCGv_i32 local_cc_op;
507 TCGv_i64 dummy;
508
509 TCGV_UNUSED_I32(local_cc_op);
510 TCGV_UNUSED_I64(dummy);
511 switch (s->cc_op) {
512 default:
513 dummy = tcg_const_i64(0);
514 /* FALLTHRU */
515 case CC_OP_ADD_64:
516 case CC_OP_ADDU_64:
517 case CC_OP_ADDC_64:
518 case CC_OP_SUB_64:
519 case CC_OP_SUBU_64:
520 case CC_OP_SUBB_64:
521 case CC_OP_ADD_32:
522 case CC_OP_ADDU_32:
523 case CC_OP_ADDC_32:
524 case CC_OP_SUB_32:
525 case CC_OP_SUBU_32:
526 case CC_OP_SUBB_32:
527 local_cc_op = tcg_const_i32(s->cc_op);
528 break;
529 case CC_OP_CONST0:
530 case CC_OP_CONST1:
531 case CC_OP_CONST2:
532 case CC_OP_CONST3:
533 case CC_OP_STATIC:
534 case CC_OP_DYNAMIC:
535 break;
536 }
537
538 switch (s->cc_op) {
539 case CC_OP_CONST0:
540 case CC_OP_CONST1:
541 case CC_OP_CONST2:
542 case CC_OP_CONST3:
543 /* s->cc_op is the cc value */
544 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
545 break;
546 case CC_OP_STATIC:
547 /* env->cc_op already is the cc value */
548 break;
549 case CC_OP_NZ:
550 case CC_OP_ABS_64:
551 case CC_OP_NABS_64:
552 case CC_OP_ABS_32:
553 case CC_OP_NABS_32:
554 case CC_OP_LTGT0_32:
555 case CC_OP_LTGT0_64:
556 case CC_OP_COMP_32:
557 case CC_OP_COMP_64:
558 case CC_OP_NZ_F32:
559 case CC_OP_NZ_F64:
560 case CC_OP_FLOGR:
561 /* 1 argument */
562 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
563 break;
564 case CC_OP_ICM:
565 case CC_OP_LTGT_32:
566 case CC_OP_LTGT_64:
567 case CC_OP_LTUGTU_32:
568 case CC_OP_LTUGTU_64:
569 case CC_OP_TM_32:
570 case CC_OP_TM_64:
571 case CC_OP_SLA_32:
572 case CC_OP_SLA_64:
573 case CC_OP_NZ_F128:
574 /* 2 arguments */
575 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
576 break;
577 case CC_OP_ADD_64:
578 case CC_OP_ADDU_64:
579 case CC_OP_ADDC_64:
580 case CC_OP_SUB_64:
581 case CC_OP_SUBU_64:
582 case CC_OP_SUBB_64:
583 case CC_OP_ADD_32:
584 case CC_OP_ADDU_32:
585 case CC_OP_ADDC_32:
586 case CC_OP_SUB_32:
587 case CC_OP_SUBU_32:
588 case CC_OP_SUBB_32:
589 /* 3 arguments */
590 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
591 break;
592 case CC_OP_DYNAMIC:
593 /* unknown operation - assume 3 arguments and cc_op in env */
594 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
595 break;
596 default:
597 tcg_abort();
598 }
599
600 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
601 tcg_temp_free_i32(local_cc_op);
602 }
603 if (!TCGV_IS_UNUSED_I64(dummy)) {
604 tcg_temp_free_i64(dummy);
605 }
606
607 /* We now have cc in cc_op as constant */
608 set_cc_static(s);
609 }
610
611 static int use_goto_tb(DisasContext *s, uint64_t dest)
612 {
613 if (unlikely(s->singlestep_enabled) ||
614 (s->tb->cflags & CF_LAST_IO) ||
615 (s->tb->flags & FLAG_MASK_PER)) {
616 return false;
617 }
618 #ifndef CONFIG_USER_ONLY
619 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
620 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
621 #else
622 return true;
623 #endif
624 }
625
626 static void account_noninline_branch(DisasContext *s, int cc_op)
627 {
628 #ifdef DEBUG_INLINE_BRANCHES
629 inline_branch_miss[cc_op]++;
630 #endif
631 }
632
633 static void account_inline_branch(DisasContext *s, int cc_op)
634 {
635 #ifdef DEBUG_INLINE_BRANCHES
636 inline_branch_hit[cc_op]++;
637 #endif
638 }
639
640 /* Table of mask values to comparison codes, given a comparison as input.
641 For such, CC=3 should not be possible. */
642 static const TCGCond ltgt_cond[16] = {
643 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
644 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
645 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
646 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
647 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
648 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
649 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
650 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
651 };
652
653 /* Table of mask values to comparison codes, given a logic op as input.
654 For such, only CC=0 and CC=1 should be possible. */
655 static const TCGCond nz_cond[16] = {
656 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
657 TCG_COND_NEVER, TCG_COND_NEVER,
658 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
659 TCG_COND_NE, TCG_COND_NE,
660 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
661 TCG_COND_EQ, TCG_COND_EQ,
662 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
663 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
664 };
665
666 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
667 details required to generate a TCG comparison. */
668 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
669 {
670 TCGCond cond;
671 enum cc_op old_cc_op = s->cc_op;
672
673 if (mask == 15 || mask == 0) {
674 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
675 c->u.s32.a = cc_op;
676 c->u.s32.b = cc_op;
677 c->g1 = c->g2 = true;
678 c->is_64 = false;
679 return;
680 }
681
682 /* Find the TCG condition for the mask + cc op. */
683 switch (old_cc_op) {
684 case CC_OP_LTGT0_32:
685 case CC_OP_LTGT0_64:
686 case CC_OP_LTGT_32:
687 case CC_OP_LTGT_64:
688 cond = ltgt_cond[mask];
689 if (cond == TCG_COND_NEVER) {
690 goto do_dynamic;
691 }
692 account_inline_branch(s, old_cc_op);
693 break;
694
695 case CC_OP_LTUGTU_32:
696 case CC_OP_LTUGTU_64:
697 cond = tcg_unsigned_cond(ltgt_cond[mask]);
698 if (cond == TCG_COND_NEVER) {
699 goto do_dynamic;
700 }
701 account_inline_branch(s, old_cc_op);
702 break;
703
704 case CC_OP_NZ:
705 cond = nz_cond[mask];
706 if (cond == TCG_COND_NEVER) {
707 goto do_dynamic;
708 }
709 account_inline_branch(s, old_cc_op);
710 break;
711
712 case CC_OP_TM_32:
713 case CC_OP_TM_64:
714 switch (mask) {
715 case 8:
716 cond = TCG_COND_EQ;
717 break;
718 case 4 | 2 | 1:
719 cond = TCG_COND_NE;
720 break;
721 default:
722 goto do_dynamic;
723 }
724 account_inline_branch(s, old_cc_op);
725 break;
726
727 case CC_OP_ICM:
728 switch (mask) {
729 case 8:
730 cond = TCG_COND_EQ;
731 break;
732 case 4 | 2 | 1:
733 case 4 | 2:
734 cond = TCG_COND_NE;
735 break;
736 default:
737 goto do_dynamic;
738 }
739 account_inline_branch(s, old_cc_op);
740 break;
741
742 case CC_OP_FLOGR:
743 switch (mask & 0xa) {
744 case 8: /* src == 0 -> no one bit found */
745 cond = TCG_COND_EQ;
746 break;
747 case 2: /* src != 0 -> one bit found */
748 cond = TCG_COND_NE;
749 break;
750 default:
751 goto do_dynamic;
752 }
753 account_inline_branch(s, old_cc_op);
754 break;
755
756 case CC_OP_ADDU_32:
757 case CC_OP_ADDU_64:
758 switch (mask) {
759 case 8 | 2: /* vr == 0 */
760 cond = TCG_COND_EQ;
761 break;
762 case 4 | 1: /* vr != 0 */
763 cond = TCG_COND_NE;
764 break;
765 case 8 | 4: /* no carry -> vr >= src */
766 cond = TCG_COND_GEU;
767 break;
768 case 2 | 1: /* carry -> vr < src */
769 cond = TCG_COND_LTU;
770 break;
771 default:
772 goto do_dynamic;
773 }
774 account_inline_branch(s, old_cc_op);
775 break;
776
777 case CC_OP_SUBU_32:
778 case CC_OP_SUBU_64:
779 /* Note that CC=0 is impossible; treat it as dont-care. */
780 switch (mask & 7) {
781 case 2: /* zero -> op1 == op2 */
782 cond = TCG_COND_EQ;
783 break;
784 case 4 | 1: /* !zero -> op1 != op2 */
785 cond = TCG_COND_NE;
786 break;
787 case 4: /* borrow (!carry) -> op1 < op2 */
788 cond = TCG_COND_LTU;
789 break;
790 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
791 cond = TCG_COND_GEU;
792 break;
793 default:
794 goto do_dynamic;
795 }
796 account_inline_branch(s, old_cc_op);
797 break;
798
799 default:
800 do_dynamic:
801 /* Calculate cc value. */
802 gen_op_calc_cc(s);
803 /* FALLTHRU */
804
805 case CC_OP_STATIC:
806 /* Jump based on CC. We'll load up the real cond below;
807 the assignment here merely avoids a compiler warning. */
808 account_noninline_branch(s, old_cc_op);
809 old_cc_op = CC_OP_STATIC;
810 cond = TCG_COND_NEVER;
811 break;
812 }
813
814 /* Load up the arguments of the comparison. */
815 c->is_64 = true;
816 c->g1 = c->g2 = false;
817 switch (old_cc_op) {
818 case CC_OP_LTGT0_32:
819 c->is_64 = false;
820 c->u.s32.a = tcg_temp_new_i32();
821 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
822 c->u.s32.b = tcg_const_i32(0);
823 break;
824 case CC_OP_LTGT_32:
825 case CC_OP_LTUGTU_32:
826 case CC_OP_SUBU_32:
827 c->is_64 = false;
828 c->u.s32.a = tcg_temp_new_i32();
829 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
830 c->u.s32.b = tcg_temp_new_i32();
831 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
832 break;
833
834 case CC_OP_LTGT0_64:
835 case CC_OP_NZ:
836 case CC_OP_FLOGR:
837 c->u.s64.a = cc_dst;
838 c->u.s64.b = tcg_const_i64(0);
839 c->g1 = true;
840 break;
841 case CC_OP_LTGT_64:
842 case CC_OP_LTUGTU_64:
843 case CC_OP_SUBU_64:
844 c->u.s64.a = cc_src;
845 c->u.s64.b = cc_dst;
846 c->g1 = c->g2 = true;
847 break;
848
849 case CC_OP_TM_32:
850 case CC_OP_TM_64:
851 case CC_OP_ICM:
852 c->u.s64.a = tcg_temp_new_i64();
853 c->u.s64.b = tcg_const_i64(0);
854 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
855 break;
856
857 case CC_OP_ADDU_32:
858 c->is_64 = false;
859 c->u.s32.a = tcg_temp_new_i32();
860 c->u.s32.b = tcg_temp_new_i32();
861 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
862 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
863 tcg_gen_movi_i32(c->u.s32.b, 0);
864 } else {
865 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
866 }
867 break;
868
869 case CC_OP_ADDU_64:
870 c->u.s64.a = cc_vr;
871 c->g1 = true;
872 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
873 c->u.s64.b = tcg_const_i64(0);
874 } else {
875 c->u.s64.b = cc_src;
876 c->g2 = true;
877 }
878 break;
879
880 case CC_OP_STATIC:
881 c->is_64 = false;
882 c->u.s32.a = cc_op;
883 c->g1 = true;
884 switch (mask) {
885 case 0x8 | 0x4 | 0x2: /* cc != 3 */
886 cond = TCG_COND_NE;
887 c->u.s32.b = tcg_const_i32(3);
888 break;
889 case 0x8 | 0x4 | 0x1: /* cc != 2 */
890 cond = TCG_COND_NE;
891 c->u.s32.b = tcg_const_i32(2);
892 break;
893 case 0x8 | 0x2 | 0x1: /* cc != 1 */
894 cond = TCG_COND_NE;
895 c->u.s32.b = tcg_const_i32(1);
896 break;
897 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
898 cond = TCG_COND_EQ;
899 c->g1 = false;
900 c->u.s32.a = tcg_temp_new_i32();
901 c->u.s32.b = tcg_const_i32(0);
902 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
903 break;
904 case 0x8 | 0x4: /* cc < 2 */
905 cond = TCG_COND_LTU;
906 c->u.s32.b = tcg_const_i32(2);
907 break;
908 case 0x8: /* cc == 0 */
909 cond = TCG_COND_EQ;
910 c->u.s32.b = tcg_const_i32(0);
911 break;
912 case 0x4 | 0x2 | 0x1: /* cc != 0 */
913 cond = TCG_COND_NE;
914 c->u.s32.b = tcg_const_i32(0);
915 break;
916 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
917 cond = TCG_COND_NE;
918 c->g1 = false;
919 c->u.s32.a = tcg_temp_new_i32();
920 c->u.s32.b = tcg_const_i32(0);
921 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
922 break;
923 case 0x4: /* cc == 1 */
924 cond = TCG_COND_EQ;
925 c->u.s32.b = tcg_const_i32(1);
926 break;
927 case 0x2 | 0x1: /* cc > 1 */
928 cond = TCG_COND_GTU;
929 c->u.s32.b = tcg_const_i32(1);
930 break;
931 case 0x2: /* cc == 2 */
932 cond = TCG_COND_EQ;
933 c->u.s32.b = tcg_const_i32(2);
934 break;
935 case 0x1: /* cc == 3 */
936 cond = TCG_COND_EQ;
937 c->u.s32.b = tcg_const_i32(3);
938 break;
939 default:
940 /* CC is masked by something else: (8 >> cc) & mask. */
941 cond = TCG_COND_NE;
942 c->g1 = false;
943 c->u.s32.a = tcg_const_i32(8);
944 c->u.s32.b = tcg_const_i32(0);
945 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
946 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
947 break;
948 }
949 break;
950
951 default:
952 abort();
953 }
954 c->cond = cond;
955 }
956
957 static void free_compare(DisasCompare *c)
958 {
959 if (!c->g1) {
960 if (c->is_64) {
961 tcg_temp_free_i64(c->u.s64.a);
962 } else {
963 tcg_temp_free_i32(c->u.s32.a);
964 }
965 }
966 if (!c->g2) {
967 if (c->is_64) {
968 tcg_temp_free_i64(c->u.s64.b);
969 } else {
970 tcg_temp_free_i32(c->u.s32.b);
971 }
972 }
973 }
974
975 /* ====================================================================== */
976 /* Define the insn format enumeration. */
977 #define F0(N) FMT_##N,
978 #define F1(N, X1) F0(N)
979 #define F2(N, X1, X2) F0(N)
980 #define F3(N, X1, X2, X3) F0(N)
981 #define F4(N, X1, X2, X3, X4) F0(N)
982 #define F5(N, X1, X2, X3, X4, X5) F0(N)
983
984 typedef enum {
985 #include "insn-format.def"
986 } DisasFormat;
987
988 #undef F0
989 #undef F1
990 #undef F2
991 #undef F3
992 #undef F4
993 #undef F5
994
995 /* Define a structure to hold the decoded fields. We'll store each inside
996 an array indexed by an enum. In order to conserve memory, we'll arrange
997 for fields that do not exist at the same time to overlap, thus the "C"
998 for compact. For checking purposes there is an "O" for original index
999 as well that will be applied to availability bitmaps. */
1000
1001 enum DisasFieldIndexO {
1002 FLD_O_r1,
1003 FLD_O_r2,
1004 FLD_O_r3,
1005 FLD_O_m1,
1006 FLD_O_m3,
1007 FLD_O_m4,
1008 FLD_O_b1,
1009 FLD_O_b2,
1010 FLD_O_b4,
1011 FLD_O_d1,
1012 FLD_O_d2,
1013 FLD_O_d4,
1014 FLD_O_x2,
1015 FLD_O_l1,
1016 FLD_O_l2,
1017 FLD_O_i1,
1018 FLD_O_i2,
1019 FLD_O_i3,
1020 FLD_O_i4,
1021 FLD_O_i5
1022 };
1023
1024 enum DisasFieldIndexC {
1025 FLD_C_r1 = 0,
1026 FLD_C_m1 = 0,
1027 FLD_C_b1 = 0,
1028 FLD_C_i1 = 0,
1029
1030 FLD_C_r2 = 1,
1031 FLD_C_b2 = 1,
1032 FLD_C_i2 = 1,
1033
1034 FLD_C_r3 = 2,
1035 FLD_C_m3 = 2,
1036 FLD_C_i3 = 2,
1037
1038 FLD_C_m4 = 3,
1039 FLD_C_b4 = 3,
1040 FLD_C_i4 = 3,
1041 FLD_C_l1 = 3,
1042
1043 FLD_C_i5 = 4,
1044 FLD_C_d1 = 4,
1045
1046 FLD_C_d2 = 5,
1047
1048 FLD_C_d4 = 6,
1049 FLD_C_x2 = 6,
1050 FLD_C_l2 = 6,
1051
1052 NUM_C_FIELD = 7
1053 };
1054
1055 struct DisasFields {
1056 uint64_t raw_insn;
1057 unsigned op:8;
1058 unsigned op2:8;
1059 unsigned presentC:16;
1060 unsigned int presentO;
1061 int c[NUM_C_FIELD];
1062 };
1063
1064 /* This is the way fields are to be accessed out of DisasFields. */
1065 #define have_field(S, F) have_field1((S), FLD_O_##F)
1066 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1067
1068 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1069 {
1070 return (f->presentO >> c) & 1;
1071 }
1072
1073 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1074 enum DisasFieldIndexC c)
1075 {
1076 assert(have_field1(f, o));
1077 return f->c[c];
1078 }
1079
1080 /* Describe the layout of each field in each format. */
1081 typedef struct DisasField {
1082 unsigned int beg:8;
1083 unsigned int size:8;
1084 unsigned int type:2;
1085 unsigned int indexC:6;
1086 enum DisasFieldIndexO indexO:8;
1087 } DisasField;
1088
1089 typedef struct DisasFormatInfo {
1090 DisasField op[NUM_C_FIELD];
1091 } DisasFormatInfo;
1092
1093 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1094 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1095 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1096 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1097 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1098 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1099 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1100 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1101 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1102 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1104 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1105 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1106 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1107
1108 #define F0(N) { { } },
1109 #define F1(N, X1) { { X1 } },
1110 #define F2(N, X1, X2) { { X1, X2 } },
1111 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1112 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1113 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1114
1115 static const DisasFormatInfo format_info[] = {
1116 #include "insn-format.def"
1117 };
1118
1119 #undef F0
1120 #undef F1
1121 #undef F2
1122 #undef F3
1123 #undef F4
1124 #undef F5
1125 #undef R
1126 #undef M
1127 #undef BD
1128 #undef BXD
1129 #undef BDL
1130 #undef BXDL
1131 #undef I
1132 #undef L
1133
1134 /* Generally, we'll extract operands into this structures, operate upon
1135 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1136 of routines below for more details. */
1137 typedef struct {
1138 bool g_out, g_out2, g_in1, g_in2;
1139 TCGv_i64 out, out2, in1, in2;
1140 TCGv_i64 addr1;
1141 } DisasOps;
1142
1143 /* Instructions can place constraints on their operands, raising specification
1144 exceptions if they are violated. To make this easy to automate, each "in1",
1145 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1146 of the following, or 0. To make this easy to document, we'll put the
1147 SPEC_<name> defines next to <name>. */
1148
1149 #define SPEC_r1_even 1
1150 #define SPEC_r2_even 2
1151 #define SPEC_r3_even 4
1152 #define SPEC_r1_f128 8
1153 #define SPEC_r2_f128 16
1154
1155 /* Return values from translate_one, indicating the state of the TB. */
1156 typedef enum {
1157 /* Continue the TB. */
1158 NO_EXIT,
1159 /* We have emitted one or more goto_tb. No fixup required. */
1160 EXIT_GOTO_TB,
1161 /* We are not using a goto_tb (for whatever reason), but have updated
1162 the PC (for whatever reason), so there's no need to do it again on
1163 exiting the TB. */
1164 EXIT_PC_UPDATED,
1165 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1166 updated the PC for the next instruction to be executed. */
1167 EXIT_PC_STALE,
1168 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1169 No following code will be executed. */
1170 EXIT_NORETURN,
1171 } ExitStatus;
1172
1173 typedef enum DisasFacility {
1174 FAC_Z, /* zarch (default) */
1175 FAC_CASS, /* compare and swap and store */
1176 FAC_CASS2, /* compare and swap and store 2*/
1177 FAC_DFP, /* decimal floating point */
1178 FAC_DFPR, /* decimal floating point rounding */
1179 FAC_DO, /* distinct operands */
1180 FAC_EE, /* execute extensions */
1181 FAC_EI, /* extended immediate */
1182 FAC_FPE, /* floating point extension */
1183 FAC_FPSSH, /* floating point support sign handling */
1184 FAC_FPRGR, /* FPR-GR transfer */
1185 FAC_GIE, /* general instructions extension */
1186 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1187 FAC_HW, /* high-word */
1188 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1189 FAC_MIE, /* miscellaneous-instruction-extensions */
1190 FAC_LAT, /* load-and-trap */
1191 FAC_LOC, /* load/store on condition */
1192 FAC_LD, /* long displacement */
1193 FAC_PC, /* population count */
1194 FAC_SCF, /* store clock fast */
1195 FAC_SFLE, /* store facility list extended */
1196 FAC_ILA, /* interlocked access facility 1 */
1197 } DisasFacility;
1198
1199 struct DisasInsn {
1200 unsigned opc:16;
1201 DisasFormat fmt:8;
1202 DisasFacility fac:8;
1203 unsigned spec:8;
1204
1205 const char *name;
1206
1207 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1208 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1209 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1210 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1211 void (*help_cout)(DisasContext *, DisasOps *);
1212 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1213
1214 uint64_t data;
1215 };
1216
1217 /* ====================================================================== */
1218 /* Miscellaneous helpers, used by several operations. */
1219
1220 static void help_l2_shift(DisasContext *s, DisasFields *f,
1221 DisasOps *o, int mask)
1222 {
1223 int b2 = get_field(f, b2);
1224 int d2 = get_field(f, d2);
1225
1226 if (b2 == 0) {
1227 o->in2 = tcg_const_i64(d2 & mask);
1228 } else {
1229 o->in2 = get_address(s, 0, b2, d2);
1230 tcg_gen_andi_i64(o->in2, o->in2, mask);
1231 }
1232 }
1233
1234 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1235 {
1236 if (dest == s->next_pc) {
1237 per_branch(s, true);
1238 return NO_EXIT;
1239 }
1240 if (use_goto_tb(s, dest)) {
1241 update_cc_op(s);
1242 per_breaking_event(s);
1243 tcg_gen_goto_tb(0);
1244 tcg_gen_movi_i64(psw_addr, dest);
1245 tcg_gen_exit_tb((uintptr_t)s->tb);
1246 return EXIT_GOTO_TB;
1247 } else {
1248 tcg_gen_movi_i64(psw_addr, dest);
1249 per_branch(s, false);
1250 return EXIT_PC_UPDATED;
1251 }
1252 }
1253
1254 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1255 bool is_imm, int imm, TCGv_i64 cdest)
1256 {
1257 ExitStatus ret;
1258 uint64_t dest = s->pc + 2 * imm;
1259 TCGLabel *lab;
1260
1261 /* Take care of the special cases first. */
1262 if (c->cond == TCG_COND_NEVER) {
1263 ret = NO_EXIT;
1264 goto egress;
1265 }
1266 if (is_imm) {
1267 if (dest == s->next_pc) {
1268 /* Branch to next. */
1269 per_branch(s, true);
1270 ret = NO_EXIT;
1271 goto egress;
1272 }
1273 if (c->cond == TCG_COND_ALWAYS) {
1274 ret = help_goto_direct(s, dest);
1275 goto egress;
1276 }
1277 } else {
1278 if (TCGV_IS_UNUSED_I64(cdest)) {
1279 /* E.g. bcr %r0 -> no branch. */
1280 ret = NO_EXIT;
1281 goto egress;
1282 }
1283 if (c->cond == TCG_COND_ALWAYS) {
1284 tcg_gen_mov_i64(psw_addr, cdest);
1285 per_branch(s, false);
1286 ret = EXIT_PC_UPDATED;
1287 goto egress;
1288 }
1289 }
1290
1291 if (use_goto_tb(s, s->next_pc)) {
1292 if (is_imm && use_goto_tb(s, dest)) {
1293 /* Both exits can use goto_tb. */
1294 update_cc_op(s);
1295
1296 lab = gen_new_label();
1297 if (c->is_64) {
1298 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1299 } else {
1300 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1301 }
1302
1303 /* Branch not taken. */
1304 tcg_gen_goto_tb(0);
1305 tcg_gen_movi_i64(psw_addr, s->next_pc);
1306 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1307
1308 /* Branch taken. */
1309 gen_set_label(lab);
1310 per_breaking_event(s);
1311 tcg_gen_goto_tb(1);
1312 tcg_gen_movi_i64(psw_addr, dest);
1313 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1314
1315 ret = EXIT_GOTO_TB;
1316 } else {
1317 /* Fallthru can use goto_tb, but taken branch cannot. */
1318 /* Store taken branch destination before the brcond. This
1319 avoids having to allocate a new local temp to hold it.
1320 We'll overwrite this in the not taken case anyway. */
1321 if (!is_imm) {
1322 tcg_gen_mov_i64(psw_addr, cdest);
1323 }
1324
1325 lab = gen_new_label();
1326 if (c->is_64) {
1327 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1328 } else {
1329 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1330 }
1331
1332 /* Branch not taken. */
1333 update_cc_op(s);
1334 tcg_gen_goto_tb(0);
1335 tcg_gen_movi_i64(psw_addr, s->next_pc);
1336 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1337
1338 gen_set_label(lab);
1339 if (is_imm) {
1340 tcg_gen_movi_i64(psw_addr, dest);
1341 }
1342 per_breaking_event(s);
1343 ret = EXIT_PC_UPDATED;
1344 }
1345 } else {
1346 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1347 Most commonly we're single-stepping or some other condition that
1348 disables all use of goto_tb. Just update the PC and exit. */
1349
1350 TCGv_i64 next = tcg_const_i64(s->next_pc);
1351 if (is_imm) {
1352 cdest = tcg_const_i64(dest);
1353 }
1354
1355 if (c->is_64) {
1356 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1357 cdest, next);
1358 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1359 } else {
1360 TCGv_i32 t0 = tcg_temp_new_i32();
1361 TCGv_i64 t1 = tcg_temp_new_i64();
1362 TCGv_i64 z = tcg_const_i64(0);
1363 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1364 tcg_gen_extu_i32_i64(t1, t0);
1365 tcg_temp_free_i32(t0);
1366 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1367 per_branch_cond(s, TCG_COND_NE, t1, z);
1368 tcg_temp_free_i64(t1);
1369 tcg_temp_free_i64(z);
1370 }
1371
1372 if (is_imm) {
1373 tcg_temp_free_i64(cdest);
1374 }
1375 tcg_temp_free_i64(next);
1376
1377 ret = EXIT_PC_UPDATED;
1378 }
1379
1380 egress:
1381 free_compare(c);
1382 return ret;
1383 }
1384
1385 /* ====================================================================== */
1386 /* The operations. These perform the bulk of the work for any insn,
1387 usually after the operands have been loaded and output initialized. */
1388
1389 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1390 {
1391 TCGv_i64 z, n;
1392 z = tcg_const_i64(0);
1393 n = tcg_temp_new_i64();
1394 tcg_gen_neg_i64(n, o->in2);
1395 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1396 tcg_temp_free_i64(n);
1397 tcg_temp_free_i64(z);
1398 return NO_EXIT;
1399 }
1400
1401 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1402 {
1403 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1404 return NO_EXIT;
1405 }
1406
1407 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1408 {
1409 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1410 return NO_EXIT;
1411 }
1412
1413 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1414 {
1415 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1416 tcg_gen_mov_i64(o->out2, o->in2);
1417 return NO_EXIT;
1418 }
1419
1420 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1421 {
1422 tcg_gen_add_i64(o->out, o->in1, o->in2);
1423 return NO_EXIT;
1424 }
1425
1426 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1427 {
1428 DisasCompare cmp;
1429 TCGv_i64 carry;
1430
1431 tcg_gen_add_i64(o->out, o->in1, o->in2);
1432
1433 /* The carry flag is the msb of CC, therefore the branch mask that would
1434 create that comparison is 3. Feeding the generated comparison to
1435 setcond produces the carry flag that we desire. */
1436 disas_jcc(s, &cmp, 3);
1437 carry = tcg_temp_new_i64();
1438 if (cmp.is_64) {
1439 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1440 } else {
1441 TCGv_i32 t = tcg_temp_new_i32();
1442 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1443 tcg_gen_extu_i32_i64(carry, t);
1444 tcg_temp_free_i32(t);
1445 }
1446 free_compare(&cmp);
1447
1448 tcg_gen_add_i64(o->out, o->out, carry);
1449 tcg_temp_free_i64(carry);
1450 return NO_EXIT;
1451 }
1452
1453 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1454 {
1455 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1456 return NO_EXIT;
1457 }
1458
1459 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1460 {
1461 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1462 return NO_EXIT;
1463 }
1464
1465 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1466 {
1467 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1468 return_low128(o->out2);
1469 return NO_EXIT;
1470 }
1471
1472 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1473 {
1474 tcg_gen_and_i64(o->out, o->in1, o->in2);
1475 return NO_EXIT;
1476 }
1477
1478 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1479 {
1480 int shift = s->insn->data & 0xff;
1481 int size = s->insn->data >> 8;
1482 uint64_t mask = ((1ull << size) - 1) << shift;
1483
1484 assert(!o->g_in2);
1485 tcg_gen_shli_i64(o->in2, o->in2, shift);
1486 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1487 tcg_gen_and_i64(o->out, o->in1, o->in2);
1488
1489 /* Produce the CC from only the bits manipulated. */
1490 tcg_gen_andi_i64(cc_dst, o->out, mask);
1491 set_cc_nz_u64(s, cc_dst);
1492 return NO_EXIT;
1493 }
1494
1495 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1496 {
1497 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1498 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1499 tcg_gen_mov_i64(psw_addr, o->in2);
1500 per_branch(s, false);
1501 return EXIT_PC_UPDATED;
1502 } else {
1503 return NO_EXIT;
1504 }
1505 }
1506
1507 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1508 {
1509 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1510 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1511 }
1512
1513 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1514 {
1515 int m1 = get_field(s->fields, m1);
1516 bool is_imm = have_field(s->fields, i2);
1517 int imm = is_imm ? get_field(s->fields, i2) : 0;
1518 DisasCompare c;
1519
1520 disas_jcc(s, &c, m1);
1521 return help_branch(s, &c, is_imm, imm, o->in2);
1522 }
1523
1524 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1525 {
1526 int r1 = get_field(s->fields, r1);
1527 bool is_imm = have_field(s->fields, i2);
1528 int imm = is_imm ? get_field(s->fields, i2) : 0;
1529 DisasCompare c;
1530 TCGv_i64 t;
1531
1532 c.cond = TCG_COND_NE;
1533 c.is_64 = false;
1534 c.g1 = false;
1535 c.g2 = false;
1536
1537 t = tcg_temp_new_i64();
1538 tcg_gen_subi_i64(t, regs[r1], 1);
1539 store_reg32_i64(r1, t);
1540 c.u.s32.a = tcg_temp_new_i32();
1541 c.u.s32.b = tcg_const_i32(0);
1542 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1543 tcg_temp_free_i64(t);
1544
1545 return help_branch(s, &c, is_imm, imm, o->in2);
1546 }
1547
1548 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1549 {
1550 int r1 = get_field(s->fields, r1);
1551 int imm = get_field(s->fields, i2);
1552 DisasCompare c;
1553 TCGv_i64 t;
1554
1555 c.cond = TCG_COND_NE;
1556 c.is_64 = false;
1557 c.g1 = false;
1558 c.g2 = false;
1559
1560 t = tcg_temp_new_i64();
1561 tcg_gen_shri_i64(t, regs[r1], 32);
1562 tcg_gen_subi_i64(t, t, 1);
1563 store_reg32h_i64(r1, t);
1564 c.u.s32.a = tcg_temp_new_i32();
1565 c.u.s32.b = tcg_const_i32(0);
1566 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1567 tcg_temp_free_i64(t);
1568
1569 return help_branch(s, &c, 1, imm, o->in2);
1570 }
1571
1572 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1573 {
1574 int r1 = get_field(s->fields, r1);
1575 bool is_imm = have_field(s->fields, i2);
1576 int imm = is_imm ? get_field(s->fields, i2) : 0;
1577 DisasCompare c;
1578
1579 c.cond = TCG_COND_NE;
1580 c.is_64 = true;
1581 c.g1 = true;
1582 c.g2 = false;
1583
1584 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1585 c.u.s64.a = regs[r1];
1586 c.u.s64.b = tcg_const_i64(0);
1587
1588 return help_branch(s, &c, is_imm, imm, o->in2);
1589 }
1590
1591 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1592 {
1593 int r1 = get_field(s->fields, r1);
1594 int r3 = get_field(s->fields, r3);
1595 bool is_imm = have_field(s->fields, i2);
1596 int imm = is_imm ? get_field(s->fields, i2) : 0;
1597 DisasCompare c;
1598 TCGv_i64 t;
1599
1600 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1601 c.is_64 = false;
1602 c.g1 = false;
1603 c.g2 = false;
1604
1605 t = tcg_temp_new_i64();
1606 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1607 c.u.s32.a = tcg_temp_new_i32();
1608 c.u.s32.b = tcg_temp_new_i32();
1609 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1610 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1611 store_reg32_i64(r1, t);
1612 tcg_temp_free_i64(t);
1613
1614 return help_branch(s, &c, is_imm, imm, o->in2);
1615 }
1616
1617 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1618 {
1619 int r1 = get_field(s->fields, r1);
1620 int r3 = get_field(s->fields, r3);
1621 bool is_imm = have_field(s->fields, i2);
1622 int imm = is_imm ? get_field(s->fields, i2) : 0;
1623 DisasCompare c;
1624
1625 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1626 c.is_64 = true;
1627
1628 if (r1 == (r3 | 1)) {
1629 c.u.s64.b = load_reg(r3 | 1);
1630 c.g2 = false;
1631 } else {
1632 c.u.s64.b = regs[r3 | 1];
1633 c.g2 = true;
1634 }
1635
1636 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1637 c.u.s64.a = regs[r1];
1638 c.g1 = true;
1639
1640 return help_branch(s, &c, is_imm, imm, o->in2);
1641 }
1642
1643 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1644 {
1645 int imm, m3 = get_field(s->fields, m3);
1646 bool is_imm;
1647 DisasCompare c;
1648
1649 c.cond = ltgt_cond[m3];
1650 if (s->insn->data) {
1651 c.cond = tcg_unsigned_cond(c.cond);
1652 }
1653 c.is_64 = c.g1 = c.g2 = true;
1654 c.u.s64.a = o->in1;
1655 c.u.s64.b = o->in2;
1656
1657 is_imm = have_field(s->fields, i4);
1658 if (is_imm) {
1659 imm = get_field(s->fields, i4);
1660 } else {
1661 imm = 0;
1662 o->out = get_address(s, 0, get_field(s->fields, b4),
1663 get_field(s->fields, d4));
1664 }
1665
1666 return help_branch(s, &c, is_imm, imm, o->out);
1667 }
1668
1669 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1670 {
1671 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1672 set_cc_static(s);
1673 return NO_EXIT;
1674 }
1675
1676 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1677 {
1678 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1679 set_cc_static(s);
1680 return NO_EXIT;
1681 }
1682
1683 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1684 {
1685 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1686 set_cc_static(s);
1687 return NO_EXIT;
1688 }
1689
1690 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1691 {
1692 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1693 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1694 tcg_temp_free_i32(m3);
1695 gen_set_cc_nz_f32(s, o->in2);
1696 return NO_EXIT;
1697 }
1698
1699 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1700 {
1701 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1702 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1703 tcg_temp_free_i32(m3);
1704 gen_set_cc_nz_f64(s, o->in2);
1705 return NO_EXIT;
1706 }
1707
1708 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1709 {
1710 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1711 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1712 tcg_temp_free_i32(m3);
1713 gen_set_cc_nz_f128(s, o->in1, o->in2);
1714 return NO_EXIT;
1715 }
1716
1717 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1718 {
1719 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1720 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1721 tcg_temp_free_i32(m3);
1722 gen_set_cc_nz_f32(s, o->in2);
1723 return NO_EXIT;
1724 }
1725
1726 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1727 {
1728 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1729 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1730 tcg_temp_free_i32(m3);
1731 gen_set_cc_nz_f64(s, o->in2);
1732 return NO_EXIT;
1733 }
1734
1735 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1736 {
1737 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1738 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1739 tcg_temp_free_i32(m3);
1740 gen_set_cc_nz_f128(s, o->in1, o->in2);
1741 return NO_EXIT;
1742 }
1743
1744 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1745 {
1746 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1747 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1748 tcg_temp_free_i32(m3);
1749 gen_set_cc_nz_f32(s, o->in2);
1750 return NO_EXIT;
1751 }
1752
1753 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1754 {
1755 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1756 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1757 tcg_temp_free_i32(m3);
1758 gen_set_cc_nz_f64(s, o->in2);
1759 return NO_EXIT;
1760 }
1761
1762 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1763 {
1764 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1765 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1766 tcg_temp_free_i32(m3);
1767 gen_set_cc_nz_f128(s, o->in1, o->in2);
1768 return NO_EXIT;
1769 }
1770
1771 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1772 {
1773 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1774 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1775 tcg_temp_free_i32(m3);
1776 gen_set_cc_nz_f32(s, o->in2);
1777 return NO_EXIT;
1778 }
1779
1780 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1781 {
1782 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1783 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1784 tcg_temp_free_i32(m3);
1785 gen_set_cc_nz_f64(s, o->in2);
1786 return NO_EXIT;
1787 }
1788
1789 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1790 {
1791 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1792 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1793 tcg_temp_free_i32(m3);
1794 gen_set_cc_nz_f128(s, o->in1, o->in2);
1795 return NO_EXIT;
1796 }
1797
1798 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1799 {
1800 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1801 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1802 tcg_temp_free_i32(m3);
1803 return NO_EXIT;
1804 }
1805
1806 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1807 {
1808 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1809 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1810 tcg_temp_free_i32(m3);
1811 return NO_EXIT;
1812 }
1813
1814 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1815 {
1816 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1817 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1818 tcg_temp_free_i32(m3);
1819 return_low128(o->out2);
1820 return NO_EXIT;
1821 }
1822
1823 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1824 {
1825 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1826 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1827 tcg_temp_free_i32(m3);
1828 return NO_EXIT;
1829 }
1830
1831 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1832 {
1833 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1834 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1835 tcg_temp_free_i32(m3);
1836 return NO_EXIT;
1837 }
1838
1839 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1840 {
1841 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1842 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1843 tcg_temp_free_i32(m3);
1844 return_low128(o->out2);
1845 return NO_EXIT;
1846 }
1847
1848 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1849 {
1850 int r2 = get_field(s->fields, r2);
1851 TCGv_i64 len = tcg_temp_new_i64();
1852
1853 potential_page_fault(s);
1854 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1855 set_cc_static(s);
1856 return_low128(o->out);
1857
1858 tcg_gen_add_i64(regs[r2], regs[r2], len);
1859 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1860 tcg_temp_free_i64(len);
1861
1862 return NO_EXIT;
1863 }
1864
1865 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1866 {
1867 int l = get_field(s->fields, l1);
1868 TCGv_i32 vl;
1869
1870 switch (l + 1) {
1871 case 1:
1872 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1873 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1874 break;
1875 case 2:
1876 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1877 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1878 break;
1879 case 4:
1880 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1881 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1882 break;
1883 case 8:
1884 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1885 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1886 break;
1887 default:
1888 potential_page_fault(s);
1889 vl = tcg_const_i32(l);
1890 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1891 tcg_temp_free_i32(vl);
1892 set_cc_static(s);
1893 return NO_EXIT;
1894 }
1895 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1896 return NO_EXIT;
1897 }
1898
1899 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1900 {
1901 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1902 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1903 potential_page_fault(s);
1904 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1905 tcg_temp_free_i32(r1);
1906 tcg_temp_free_i32(r3);
1907 set_cc_static(s);
1908 return NO_EXIT;
1909 }
1910
1911 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1912 {
1913 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1914 TCGv_i32 t1 = tcg_temp_new_i32();
1915 tcg_gen_extrl_i64_i32(t1, o->in1);
1916 potential_page_fault(s);
1917 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1918 set_cc_static(s);
1919 tcg_temp_free_i32(t1);
1920 tcg_temp_free_i32(m3);
1921 return NO_EXIT;
1922 }
1923
1924 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1925 {
1926 potential_page_fault(s);
1927 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1928 set_cc_static(s);
1929 return_low128(o->in2);
1930 return NO_EXIT;
1931 }
1932
1933 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1934 {
1935 TCGv_i64 t = tcg_temp_new_i64();
1936 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1937 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1938 tcg_gen_or_i64(o->out, o->out, t);
1939 tcg_temp_free_i64(t);
1940 return NO_EXIT;
1941 }
1942
1943 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1944 {
1945 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1946 int d2 = get_field(s->fields, d2);
1947 int b2 = get_field(s->fields, b2);
1948 int is_64 = s->insn->data;
1949 TCGv_i64 addr, mem, cc, z;
1950
1951 /* Note that in1 = R3 (new value) and
1952 in2 = (zero-extended) R1 (expected value). */
1953
1954 /* Load the memory into the (temporary) output. While the PoO only talks
1955 about moving the memory to R1 on inequality, if we include equality it
1956 means that R1 is equal to the memory in all conditions. */
1957 addr = get_address(s, 0, b2, d2);
1958 if (is_64) {
1959 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1960 } else {
1961 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1962 }
1963
1964 /* Are the memory and expected values (un)equal? Note that this setcond
1965 produces the output CC value, thus the NE sense of the test. */
1966 cc = tcg_temp_new_i64();
1967 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1968
1969 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1970 Recall that we are allowed to unconditionally issue the store (and
1971 thus any possible write trap), so (re-)store the original contents
1972 of MEM in case of inequality. */
1973 z = tcg_const_i64(0);
1974 mem = tcg_temp_new_i64();
1975 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1976 if (is_64) {
1977 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1978 } else {
1979 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1980 }
1981 tcg_temp_free_i64(z);
1982 tcg_temp_free_i64(mem);
1983 tcg_temp_free_i64(addr);
1984
1985 /* Store CC back to cc_op. Wait until after the store so that any
1986 exception gets the old cc_op value. */
1987 tcg_gen_extrl_i64_i32(cc_op, cc);
1988 tcg_temp_free_i64(cc);
1989 set_cc_static(s);
1990 return NO_EXIT;
1991 }
1992
1993 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1994 {
1995 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1996 int r1 = get_field(s->fields, r1);
1997 int r3 = get_field(s->fields, r3);
1998 int d2 = get_field(s->fields, d2);
1999 int b2 = get_field(s->fields, b2);
2000 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
2001
2002 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2003
2004 addrh = get_address(s, 0, b2, d2);
2005 addrl = get_address(s, 0, b2, d2 + 8);
2006 outh = tcg_temp_new_i64();
2007 outl = tcg_temp_new_i64();
2008
2009 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
2010 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
2011
2012 /* Fold the double-word compare with arithmetic. */
2013 cc = tcg_temp_new_i64();
2014 z = tcg_temp_new_i64();
2015 tcg_gen_xor_i64(cc, outh, regs[r1]);
2016 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
2017 tcg_gen_or_i64(cc, cc, z);
2018 tcg_gen_movi_i64(z, 0);
2019 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
2020
2021 memh = tcg_temp_new_i64();
2022 meml = tcg_temp_new_i64();
2023 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
2024 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
2025 tcg_temp_free_i64(z);
2026
2027 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
2028 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
2029 tcg_temp_free_i64(memh);
2030 tcg_temp_free_i64(meml);
2031 tcg_temp_free_i64(addrh);
2032 tcg_temp_free_i64(addrl);
2033
2034 /* Save back state now that we've passed all exceptions. */
2035 tcg_gen_mov_i64(regs[r1], outh);
2036 tcg_gen_mov_i64(regs[r1 + 1], outl);
2037 tcg_gen_extrl_i64_i32(cc_op, cc);
2038 tcg_temp_free_i64(outh);
2039 tcg_temp_free_i64(outl);
2040 tcg_temp_free_i64(cc);
2041 set_cc_static(s);
2042 return NO_EXIT;
2043 }
2044
2045 #ifndef CONFIG_USER_ONLY
2046 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2047 {
2048 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2049 check_privileged(s);
2050 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
2051 tcg_temp_free_i32(r1);
2052 set_cc_static(s);
2053 return NO_EXIT;
2054 }
2055 #endif
2056
2057 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2058 {
2059 TCGv_i64 t1 = tcg_temp_new_i64();
2060 TCGv_i32 t2 = tcg_temp_new_i32();
2061 tcg_gen_extrl_i64_i32(t2, o->in1);
2062 gen_helper_cvd(t1, t2);
2063 tcg_temp_free_i32(t2);
2064 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2065 tcg_temp_free_i64(t1);
2066 return NO_EXIT;
2067 }
2068
2069 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2070 {
2071 int m3 = get_field(s->fields, m3);
2072 TCGLabel *lab = gen_new_label();
2073 TCGCond c;
2074
2075 c = tcg_invert_cond(ltgt_cond[m3]);
2076 if (s->insn->data) {
2077 c = tcg_unsigned_cond(c);
2078 }
2079 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2080
2081 /* Trap. */
2082 gen_trap(s);
2083
2084 gen_set_label(lab);
2085 return NO_EXIT;
2086 }
2087
2088 #ifndef CONFIG_USER_ONLY
2089 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2090 {
2091 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2092 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2093 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2094
2095 check_privileged(s);
2096 update_psw_addr(s);
2097 gen_op_calc_cc(s);
2098
2099 gen_helper_diag(cpu_env, r1, r3, func_code);
2100
2101 tcg_temp_free_i32(func_code);
2102 tcg_temp_free_i32(r3);
2103 tcg_temp_free_i32(r1);
2104 return NO_EXIT;
2105 }
2106 #endif
2107
2108 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2109 {
2110 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2111 return_low128(o->out);
2112 return NO_EXIT;
2113 }
2114
2115 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2116 {
2117 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2118 return_low128(o->out);
2119 return NO_EXIT;
2120 }
2121
2122 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2123 {
2124 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2125 return_low128(o->out);
2126 return NO_EXIT;
2127 }
2128
2129 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2130 {
2131 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2132 return_low128(o->out);
2133 return NO_EXIT;
2134 }
2135
2136 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2137 {
2138 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2139 return NO_EXIT;
2140 }
2141
2142 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2143 {
2144 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2145 return NO_EXIT;
2146 }
2147
2148 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2149 {
2150 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2151 return_low128(o->out2);
2152 return NO_EXIT;
2153 }
2154
2155 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2156 {
2157 int r2 = get_field(s->fields, r2);
2158 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2159 return NO_EXIT;
2160 }
2161
2162 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2163 {
2164 /* No cache information provided. */
2165 tcg_gen_movi_i64(o->out, -1);
2166 return NO_EXIT;
2167 }
2168
2169 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2170 {
2171 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2172 return NO_EXIT;
2173 }
2174
2175 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2176 {
2177 int r1 = get_field(s->fields, r1);
2178 int r2 = get_field(s->fields, r2);
2179 TCGv_i64 t = tcg_temp_new_i64();
2180
2181 /* Note the "subsequently" in the PoO, which implies a defined result
2182 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2183 tcg_gen_shri_i64(t, psw_mask, 32);
2184 store_reg32_i64(r1, t);
2185 if (r2 != 0) {
2186 store_reg32_i64(r2, psw_mask);
2187 }
2188
2189 tcg_temp_free_i64(t);
2190 return NO_EXIT;
2191 }
2192
2193 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2194 {
2195 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2196 tb->flags, (ab)use the tb->cs_base field as the address of
2197 the template in memory, and grab 8 bits of tb->flags/cflags for
2198 the contents of the register. We would then recognize all this
2199 in gen_intermediate_code_internal, generating code for exactly
2200 one instruction. This new TB then gets executed normally.
2201
2202 On the other hand, this seems to be mostly used for modifying
2203 MVC inside of memcpy, which needs a helper call anyway. So
2204 perhaps this doesn't bear thinking about any further. */
2205
2206 TCGv_i64 tmp;
2207
2208 update_psw_addr(s);
2209 gen_op_calc_cc(s);
2210
2211 tmp = tcg_const_i64(s->next_pc);
2212 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2213 tcg_temp_free_i64(tmp);
2214
2215 return NO_EXIT;
2216 }
2217
2218 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2219 {
2220 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2221 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2222 tcg_temp_free_i32(m3);
2223 return NO_EXIT;
2224 }
2225
2226 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2227 {
2228 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2229 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2230 tcg_temp_free_i32(m3);
2231 return NO_EXIT;
2232 }
2233
2234 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2235 {
2236 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2237 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2238 return_low128(o->out2);
2239 tcg_temp_free_i32(m3);
2240 return NO_EXIT;
2241 }
2242
2243 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2244 {
2245 /* We'll use the original input for cc computation, since we get to
2246 compare that against 0, which ought to be better than comparing
2247 the real output against 64. It also lets cc_dst be a convenient
2248 temporary during our computation. */
2249 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2250
2251 /* R1 = IN ? CLZ(IN) : 64. */
2252 gen_helper_clz(o->out, o->in2);
2253
2254 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2255 value by 64, which is undefined. But since the shift is 64 iff the
2256 input is zero, we still get the correct result after and'ing. */
2257 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2258 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2259 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2260 return NO_EXIT;
2261 }
2262
2263 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2264 {
2265 int m3 = get_field(s->fields, m3);
2266 int pos, len, base = s->insn->data;
2267 TCGv_i64 tmp = tcg_temp_new_i64();
2268 uint64_t ccm;
2269
2270 switch (m3) {
2271 case 0xf:
2272 /* Effectively a 32-bit load. */
2273 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2274 len = 32;
2275 goto one_insert;
2276
2277 case 0xc:
2278 case 0x6:
2279 case 0x3:
2280 /* Effectively a 16-bit load. */
2281 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2282 len = 16;
2283 goto one_insert;
2284
2285 case 0x8:
2286 case 0x4:
2287 case 0x2:
2288 case 0x1:
2289 /* Effectively an 8-bit load. */
2290 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2291 len = 8;
2292 goto one_insert;
2293
2294 one_insert:
2295 pos = base + ctz32(m3) * 8;
2296 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2297 ccm = ((1ull << len) - 1) << pos;
2298 break;
2299
2300 default:
2301 /* This is going to be a sequence of loads and inserts. */
2302 pos = base + 32 - 8;
2303 ccm = 0;
2304 while (m3) {
2305 if (m3 & 0x8) {
2306 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2307 tcg_gen_addi_i64(o->in2, o->in2, 1);
2308 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2309 ccm |= 0xff << pos;
2310 }
2311 m3 = (m3 << 1) & 0xf;
2312 pos -= 8;
2313 }
2314 break;
2315 }
2316
2317 tcg_gen_movi_i64(tmp, ccm);
2318 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2319 tcg_temp_free_i64(tmp);
2320 return NO_EXIT;
2321 }
2322
2323 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2324 {
2325 int shift = s->insn->data & 0xff;
2326 int size = s->insn->data >> 8;
2327 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2328 return NO_EXIT;
2329 }
2330
2331 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2332 {
2333 TCGv_i64 t1;
2334
2335 gen_op_calc_cc(s);
2336 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2337
2338 t1 = tcg_temp_new_i64();
2339 tcg_gen_shli_i64(t1, psw_mask, 20);
2340 tcg_gen_shri_i64(t1, t1, 36);
2341 tcg_gen_or_i64(o->out, o->out, t1);
2342
2343 tcg_gen_extu_i32_i64(t1, cc_op);
2344 tcg_gen_shli_i64(t1, t1, 28);
2345 tcg_gen_or_i64(o->out, o->out, t1);
2346 tcg_temp_free_i64(t1);
2347 return NO_EXIT;
2348 }
2349
2350 #ifndef CONFIG_USER_ONLY
2351 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2352 {
2353 check_privileged(s);
2354 gen_helper_ipte(cpu_env, o->in1, o->in2);
2355 return NO_EXIT;
2356 }
2357
2358 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2359 {
2360 check_privileged(s);
2361 gen_helper_iske(o->out, cpu_env, o->in2);
2362 return NO_EXIT;
2363 }
2364 #endif
2365
2366 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2367 {
2368 gen_helper_ldeb(o->out, cpu_env, o->in2);
2369 return NO_EXIT;
2370 }
2371
2372 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2373 {
2374 gen_helper_ledb(o->out, cpu_env, o->in2);
2375 return NO_EXIT;
2376 }
2377
2378 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2379 {
2380 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2381 return NO_EXIT;
2382 }
2383
2384 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2385 {
2386 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2387 return NO_EXIT;
2388 }
2389
2390 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2391 {
2392 gen_helper_lxdb(o->out, cpu_env, o->in2);
2393 return_low128(o->out2);
2394 return NO_EXIT;
2395 }
2396
2397 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2398 {
2399 gen_helper_lxeb(o->out, cpu_env, o->in2);
2400 return_low128(o->out2);
2401 return NO_EXIT;
2402 }
2403
2404 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2405 {
2406 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2407 return NO_EXIT;
2408 }
2409
2410 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2411 {
2412 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2413 return NO_EXIT;
2414 }
2415
2416 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2417 {
2418 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2419 return NO_EXIT;
2420 }
2421
2422 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2423 {
2424 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2425 return NO_EXIT;
2426 }
2427
2428 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2429 {
2430 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2431 return NO_EXIT;
2432 }
2433
2434 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2435 {
2436 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2437 return NO_EXIT;
2438 }
2439
2440 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2441 {
2442 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2443 return NO_EXIT;
2444 }
2445
2446 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2447 {
2448 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2449 return NO_EXIT;
2450 }
2451
2452 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2453 {
2454 TCGLabel *lab = gen_new_label();
2455 store_reg32_i64(get_field(s->fields, r1), o->in2);
2456 /* The value is stored even in case of trap. */
2457 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2458 gen_trap(s);
2459 gen_set_label(lab);
2460 return NO_EXIT;
2461 }
2462
2463 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2464 {
2465 TCGLabel *lab = gen_new_label();
2466 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2467 /* The value is stored even in case of trap. */
2468 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2469 gen_trap(s);
2470 gen_set_label(lab);
2471 return NO_EXIT;
2472 }
2473
2474 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2475 {
2476 TCGLabel *lab = gen_new_label();
2477 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2478 /* The value is stored even in case of trap. */
2479 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2480 gen_trap(s);
2481 gen_set_label(lab);
2482 return NO_EXIT;
2483 }
2484
2485 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2486 {
2487 TCGLabel *lab = gen_new_label();
2488 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2489 /* The value is stored even in case of trap. */
2490 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2491 gen_trap(s);
2492 gen_set_label(lab);
2493 return NO_EXIT;
2494 }
2495
2496 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2497 {
2498 TCGLabel *lab = gen_new_label();
2499 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2500 /* The value is stored even in case of trap. */
2501 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2502 gen_trap(s);
2503 gen_set_label(lab);
2504 return NO_EXIT;
2505 }
2506
2507 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2508 {
2509 DisasCompare c;
2510
2511 disas_jcc(s, &c, get_field(s->fields, m3));
2512
2513 if (c.is_64) {
2514 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2515 o->in2, o->in1);
2516 free_compare(&c);
2517 } else {
2518 TCGv_i32 t32 = tcg_temp_new_i32();
2519 TCGv_i64 t, z;
2520
2521 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2522 free_compare(&c);
2523
2524 t = tcg_temp_new_i64();
2525 tcg_gen_extu_i32_i64(t, t32);
2526 tcg_temp_free_i32(t32);
2527
2528 z = tcg_const_i64(0);
2529 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2530 tcg_temp_free_i64(t);
2531 tcg_temp_free_i64(z);
2532 }
2533
2534 return NO_EXIT;
2535 }
2536
2537 #ifndef CONFIG_USER_ONLY
2538 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2539 {
2540 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2541 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2542 check_privileged(s);
2543 potential_page_fault(s);
2544 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2545 tcg_temp_free_i32(r1);
2546 tcg_temp_free_i32(r3);
2547 return NO_EXIT;
2548 }
2549
2550 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2551 {
2552 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2553 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2554 check_privileged(s);
2555 potential_page_fault(s);
2556 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2557 tcg_temp_free_i32(r1);
2558 tcg_temp_free_i32(r3);
2559 return NO_EXIT;
2560 }
2561 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2562 {
2563 check_privileged(s);
2564 potential_page_fault(s);
2565 gen_helper_lra(o->out, cpu_env, o->in2);
2566 set_cc_static(s);
2567 return NO_EXIT;
2568 }
2569
2570 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2571 {
2572 TCGv_i64 t1, t2;
2573
2574 check_privileged(s);
2575 per_breaking_event(s);
2576
2577 t1 = tcg_temp_new_i64();
2578 t2 = tcg_temp_new_i64();
2579 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2580 tcg_gen_addi_i64(o->in2, o->in2, 4);
2581 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2582 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2583 tcg_gen_shli_i64(t1, t1, 32);
2584 gen_helper_load_psw(cpu_env, t1, t2);
2585 tcg_temp_free_i64(t1);
2586 tcg_temp_free_i64(t2);
2587 return EXIT_NORETURN;
2588 }
2589
2590 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2591 {
2592 TCGv_i64 t1, t2;
2593
2594 check_privileged(s);
2595 per_breaking_event(s);
2596
2597 t1 = tcg_temp_new_i64();
2598 t2 = tcg_temp_new_i64();
2599 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2600 tcg_gen_addi_i64(o->in2, o->in2, 8);
2601 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2602 gen_helper_load_psw(cpu_env, t1, t2);
2603 tcg_temp_free_i64(t1);
2604 tcg_temp_free_i64(t2);
2605 return EXIT_NORETURN;
2606 }
2607 #endif
2608
2609 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2610 {
2611 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2612 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2613 potential_page_fault(s);
2614 gen_helper_lam(cpu_env, r1, o->in2, r3);
2615 tcg_temp_free_i32(r1);
2616 tcg_temp_free_i32(r3);
2617 return NO_EXIT;
2618 }
2619
2620 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2621 {
2622 int r1 = get_field(s->fields, r1);
2623 int r3 = get_field(s->fields, r3);
2624 TCGv_i64 t1, t2;
2625
2626 /* Only one register to read. */
2627 t1 = tcg_temp_new_i64();
2628 if (unlikely(r1 == r3)) {
2629 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2630 store_reg32_i64(r1, t1);
2631 tcg_temp_free(t1);
2632 return NO_EXIT;
2633 }
2634
2635 /* First load the values of the first and last registers to trigger
2636 possible page faults. */
2637 t2 = tcg_temp_new_i64();
2638 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2639 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2640 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2641 store_reg32_i64(r1, t1);
2642 store_reg32_i64(r3, t2);
2643
2644 /* Only two registers to read. */
2645 if (((r1 + 1) & 15) == r3) {
2646 tcg_temp_free(t2);
2647 tcg_temp_free(t1);
2648 return NO_EXIT;
2649 }
2650
2651 /* Then load the remaining registers. Page fault can't occur. */
2652 r3 = (r3 - 1) & 15;
2653 tcg_gen_movi_i64(t2, 4);
2654 while (r1 != r3) {
2655 r1 = (r1 + 1) & 15;
2656 tcg_gen_add_i64(o->in2, o->in2, t2);
2657 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2658 store_reg32_i64(r1, t1);
2659 }
2660 tcg_temp_free(t2);
2661 tcg_temp_free(t1);
2662
2663 return NO_EXIT;
2664 }
2665
2666 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2667 {
2668 int r1 = get_field(s->fields, r1);
2669 int r3 = get_field(s->fields, r3);
2670 TCGv_i64 t1, t2;
2671
2672 /* Only one register to read. */
2673 t1 = tcg_temp_new_i64();
2674 if (unlikely(r1 == r3)) {
2675 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2676 store_reg32h_i64(r1, t1);
2677 tcg_temp_free(t1);
2678 return NO_EXIT;
2679 }
2680
2681 /* First load the values of the first and last registers to trigger
2682 possible page faults. */
2683 t2 = tcg_temp_new_i64();
2684 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2685 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2686 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2687 store_reg32h_i64(r1, t1);
2688 store_reg32h_i64(r3, t2);
2689
2690 /* Only two registers to read. */
2691 if (((r1 + 1) & 15) == r3) {
2692 tcg_temp_free(t2);
2693 tcg_temp_free(t1);
2694 return NO_EXIT;
2695 }
2696
2697 /* Then load the remaining registers. Page fault can't occur. */
2698 r3 = (r3 - 1) & 15;
2699 tcg_gen_movi_i64(t2, 4);
2700 while (r1 != r3) {
2701 r1 = (r1 + 1) & 15;
2702 tcg_gen_add_i64(o->in2, o->in2, t2);
2703 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2704 store_reg32h_i64(r1, t1);
2705 }
2706 tcg_temp_free(t2);
2707 tcg_temp_free(t1);
2708
2709 return NO_EXIT;
2710 }
2711
2712 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2713 {
2714 int r1 = get_field(s->fields, r1);
2715 int r3 = get_field(s->fields, r3);
2716 TCGv_i64 t1, t2;
2717
2718 /* Only one register to read. */
2719 if (unlikely(r1 == r3)) {
2720 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2721 return NO_EXIT;
2722 }
2723
2724 /* First load the values of the first and last registers to trigger
2725 possible page faults. */
2726 t1 = tcg_temp_new_i64();
2727 t2 = tcg_temp_new_i64();
2728 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2729 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2730 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2731 tcg_gen_mov_i64(regs[r1], t1);
2732 tcg_temp_free(t2);
2733
2734 /* Only two registers to read. */
2735 if (((r1 + 1) & 15) == r3) {
2736 tcg_temp_free(t1);
2737 return NO_EXIT;
2738 }
2739
2740 /* Then load the remaining registers. Page fault can't occur. */
2741 r3 = (r3 - 1) & 15;
2742 tcg_gen_movi_i64(t1, 8);
2743 while (r1 != r3) {
2744 r1 = (r1 + 1) & 15;
2745 tcg_gen_add_i64(o->in2, o->in2, t1);
2746 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2747 }
2748 tcg_temp_free(t1);
2749
2750 return NO_EXIT;
2751 }
2752
2753 #ifndef CONFIG_USER_ONLY
2754 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2755 {
2756 check_privileged(s);
2757 potential_page_fault(s);
2758 gen_helper_lura(o->out, cpu_env, o->in2);
2759 return NO_EXIT;
2760 }
2761
2762 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2763 {
2764 check_privileged(s);
2765 potential_page_fault(s);
2766 gen_helper_lurag(o->out, cpu_env, o->in2);
2767 return NO_EXIT;
2768 }
2769 #endif
2770
2771 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2772 {
2773 o->out = o->in2;
2774 o->g_out = o->g_in2;
2775 TCGV_UNUSED_I64(o->in2);
2776 o->g_in2 = false;
2777 return NO_EXIT;
2778 }
2779
2780 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2781 {
2782 int b2 = get_field(s->fields, b2);
2783 TCGv ar1 = tcg_temp_new_i64();
2784
2785 o->out = o->in2;
2786 o->g_out = o->g_in2;
2787 TCGV_UNUSED_I64(o->in2);
2788 o->g_in2 = false;
2789
2790 switch (s->tb->flags & FLAG_MASK_ASC) {
2791 case PSW_ASC_PRIMARY >> 32:
2792 tcg_gen_movi_i64(ar1, 0);
2793 break;
2794 case PSW_ASC_ACCREG >> 32:
2795 tcg_gen_movi_i64(ar1, 1);
2796 break;
2797 case PSW_ASC_SECONDARY >> 32:
2798 if (b2) {
2799 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2800 } else {
2801 tcg_gen_movi_i64(ar1, 0);
2802 }
2803 break;
2804 case PSW_ASC_HOME >> 32:
2805 tcg_gen_movi_i64(ar1, 2);
2806 break;
2807 }
2808
2809 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2810 tcg_temp_free_i64(ar1);
2811
2812 return NO_EXIT;
2813 }
2814
2815 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2816 {
2817 o->out = o->in1;
2818 o->out2 = o->in2;
2819 o->g_out = o->g_in1;
2820 o->g_out2 = o->g_in2;
2821 TCGV_UNUSED_I64(o->in1);
2822 TCGV_UNUSED_I64(o->in2);
2823 o->g_in1 = o->g_in2 = false;
2824 return NO_EXIT;
2825 }
2826
2827 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2828 {
2829 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2830 potential_page_fault(s);
2831 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2832 tcg_temp_free_i32(l);
2833 return NO_EXIT;
2834 }
2835
2836 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2837 {
2838 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2839 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2840 potential_page_fault(s);
2841 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2842 tcg_temp_free_i32(r1);
2843 tcg_temp_free_i32(r2);
2844 set_cc_static(s);
2845 return NO_EXIT;
2846 }
2847
2848 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2849 {
2850 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2851 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2852 potential_page_fault(s);
2853 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2854 tcg_temp_free_i32(r1);
2855 tcg_temp_free_i32(r3);
2856 set_cc_static(s);
2857 return NO_EXIT;
2858 }
2859
2860 #ifndef CONFIG_USER_ONLY
2861 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2862 {
2863 int r1 = get_field(s->fields, l1);
2864 check_privileged(s);
2865 potential_page_fault(s);
2866 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2867 set_cc_static(s);
2868 return NO_EXIT;
2869 }
2870
2871 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2872 {
2873 int r1 = get_field(s->fields, l1);
2874 check_privileged(s);
2875 potential_page_fault(s);
2876 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2877 set_cc_static(s);
2878 return NO_EXIT;
2879 }
2880 #endif
2881
2882 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2883 {
2884 potential_page_fault(s);
2885 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2886 set_cc_static(s);
2887 return NO_EXIT;
2888 }
2889
2890 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2891 {
2892 potential_page_fault(s);
2893 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2894 set_cc_static(s);
2895 return_low128(o->in2);
2896 return NO_EXIT;
2897 }
2898
2899 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2900 {
2901 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2902 return NO_EXIT;
2903 }
2904
2905 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2906 {
2907 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2908 return NO_EXIT;
2909 }
2910
2911 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2912 {
2913 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2914 return NO_EXIT;
2915 }
2916
2917 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2918 {
2919 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2920 return NO_EXIT;
2921 }
2922
2923 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2924 {
2925 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2926 return NO_EXIT;
2927 }
2928
2929 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2930 {
2931 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2932 return_low128(o->out2);
2933 return NO_EXIT;
2934 }
2935
2936 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2937 {
2938 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2939 return_low128(o->out2);
2940 return NO_EXIT;
2941 }
2942
2943 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2944 {
2945 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2946 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2947 tcg_temp_free_i64(r3);
2948 return NO_EXIT;
2949 }
2950
2951 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2952 {
2953 int r3 = get_field(s->fields, r3);
2954 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2955 return NO_EXIT;
2956 }
2957
2958 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2959 {
2960 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2961 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2962 tcg_temp_free_i64(r3);
2963 return NO_EXIT;
2964 }
2965
2966 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2967 {
2968 int r3 = get_field(s->fields, r3);
2969 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2970 return NO_EXIT;
2971 }
2972
2973 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2974 {
2975 TCGv_i64 z, n;
2976 z = tcg_const_i64(0);
2977 n = tcg_temp_new_i64();
2978 tcg_gen_neg_i64(n, o->in2);
2979 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
2980 tcg_temp_free_i64(n);
2981 tcg_temp_free_i64(z);
2982 return NO_EXIT;
2983 }
2984
2985 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2986 {
2987 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2988 return NO_EXIT;
2989 }
2990
2991 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2992 {
2993 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2994 return NO_EXIT;
2995 }
2996
2997 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2998 {
2999 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3000 tcg_gen_mov_i64(o->out2, o->in2);
3001 return NO_EXIT;
3002 }
3003
3004 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3005 {
3006 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3007 potential_page_fault(s);
3008 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3009 tcg_temp_free_i32(l);
3010 set_cc_static(s);
3011 return NO_EXIT;
3012 }
3013
3014 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3015 {
3016 tcg_gen_neg_i64(o->out, o->in2);
3017 return NO_EXIT;
3018 }
3019
3020 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3021 {
3022 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3023 return NO_EXIT;
3024 }
3025
3026 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3027 {
3028 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3029 return NO_EXIT;
3030 }
3031
3032 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3033 {
3034 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3035 tcg_gen_mov_i64(o->out2, o->in2);
3036 return NO_EXIT;
3037 }
3038
3039 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3040 {
3041 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3042 potential_page_fault(s);
3043 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3044 tcg_temp_free_i32(l);
3045 set_cc_static(s);
3046 return NO_EXIT;
3047 }
3048
3049 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3050 {
3051 tcg_gen_or_i64(o->out, o->in1, o->in2);
3052 return NO_EXIT;
3053 }
3054
3055 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3056 {
3057 int shift = s->insn->data & 0xff;
3058 int size = s->insn->data >> 8;
3059 uint64_t mask = ((1ull << size) - 1) << shift;
3060
3061 assert(!o->g_in2);
3062 tcg_gen_shli_i64(o->in2, o->in2, shift);
3063 tcg_gen_or_i64(o->out, o->in1, o->in2);
3064
3065 /* Produce the CC from only the bits manipulated. */
3066 tcg_gen_andi_i64(cc_dst, o->out, mask);
3067 set_cc_nz_u64(s, cc_dst);
3068 return NO_EXIT;
3069 }
3070
3071 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3072 {
3073 gen_helper_popcnt(o->out, o->in2);
3074 return NO_EXIT;
3075 }
3076
3077 #ifndef CONFIG_USER_ONLY
3078 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3079 {
3080 check_privileged(s);
3081 gen_helper_ptlb(cpu_env);
3082 return NO_EXIT;
3083 }
3084 #endif
3085
3086 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3087 {
3088 int i3 = get_field(s->fields, i3);
3089 int i4 = get_field(s->fields, i4);
3090 int i5 = get_field(s->fields, i5);
3091 int do_zero = i4 & 0x80;
3092 uint64_t mask, imask, pmask;
3093 int pos, len, rot;
3094
3095 /* Adjust the arguments for the specific insn. */
3096 switch (s->fields->op2) {
3097 case 0x55: /* risbg */
3098 i3 &= 63;
3099 i4 &= 63;
3100 pmask = ~0;
3101 break;
3102 case 0x5d: /* risbhg */
3103 i3 &= 31;
3104 i4 &= 31;
3105 pmask = 0xffffffff00000000ull;
3106 break;
3107 case 0x51: /* risblg */
3108 i3 &= 31;
3109 i4 &= 31;
3110 pmask = 0x00000000ffffffffull;
3111 break;
3112 default:
3113 abort();
3114 }
3115
3116 /* MASK is the set of bits to be inserted from R2.
3117 Take care for I3/I4 wraparound. */
3118 mask = pmask >> i3;
3119 if (i3 <= i4) {
3120 mask ^= pmask >> i4 >> 1;
3121 } else {
3122 mask |= ~(pmask >> i4 >> 1);
3123 }
3124 mask &= pmask;
3125
3126 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3127 insns, we need to keep the other half of the register. */
3128 imask = ~mask | ~pmask;
3129 if (do_zero) {
3130 if (s->fields->op2 == 0x55) {
3131 imask = 0;
3132 } else {
3133 imask = ~pmask;
3134 }
3135 }
3136
3137 /* In some cases we can implement this with deposit, which can be more
3138 efficient on some hosts. */
3139 if (~mask == imask && i3 <= i4) {
3140 if (s->fields->op2 == 0x5d) {
3141 i3 += 32, i4 += 32;
3142 }
3143 /* Note that we rotate the bits to be inserted to the lsb, not to
3144 the position as described in the PoO. */
3145 len = i4 - i3 + 1;
3146 pos = 63 - i4;
3147 rot = (i5 - pos) & 63;
3148 } else {
3149 pos = len = -1;
3150 rot = i5 & 63;
3151 }
3152
3153 /* Rotate the input as necessary. */
3154 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3155
3156 /* Insert the selected bits into the output. */
3157 if (pos >= 0) {
3158 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3159 } else if (imask == 0) {
3160 tcg_gen_andi_i64(o->out, o->in2, mask);
3161 } else {
3162 tcg_gen_andi_i64(o->in2, o->in2, mask);
3163 tcg_gen_andi_i64(o->out, o->out, imask);
3164 tcg_gen_or_i64(o->out, o->out, o->in2);
3165 }
3166 return NO_EXIT;
3167 }
3168
3169 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3170 {
3171 int i3 = get_field(s->fields, i3);
3172 int i4 = get_field(s->fields, i4);
3173 int i5 = get_field(s->fields, i5);
3174 uint64_t mask;
3175
3176 /* If this is a test-only form, arrange to discard the result. */
3177 if (i3 & 0x80) {
3178 o->out = tcg_temp_new_i64();
3179 o->g_out = false;
3180 }
3181
3182 i3 &= 63;
3183 i4 &= 63;
3184 i5 &= 63;
3185
3186 /* MASK is the set of bits to be operated on from R2.
3187 Take care for I3/I4 wraparound. */
3188 mask = ~0ull >> i3;
3189 if (i3 <= i4) {
3190 mask ^= ~0ull >> i4 >> 1;
3191 } else {
3192 mask |= ~(~0ull >> i4 >> 1);
3193 }
3194
3195 /* Rotate the input as necessary. */
3196 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3197
3198 /* Operate. */
3199 switch (s->fields->op2) {
3200 case 0x55: /* AND */
3201 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3202 tcg_gen_and_i64(o->out, o->out, o->in2);
3203 break;
3204 case 0x56: /* OR */
3205 tcg_gen_andi_i64(o->in2, o->in2, mask);
3206 tcg_gen_or_i64(o->out, o->out, o->in2);
3207 break;
3208 case 0x57: /* XOR */
3209 tcg_gen_andi_i64(o->in2, o->in2, mask);
3210 tcg_gen_xor_i64(o->out, o->out, o->in2);
3211 break;