vfio/pci: Add IGD documentation
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
35 #include "tcg-op.h"
36 #include "qemu/log.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
39
40 /* global register indexes */
41 static TCGv_env cpu_env;
42
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
46
47 #include "trace-tcg.h"
48 #include "exec/log.h"
49
50
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
55
56 struct DisasContext {
57 struct TranslationBlock *tb;
58 const DisasInsn *insn;
59 DisasFields *fields;
60 uint64_t pc, next_pc;
61 enum cc_op cc_op;
62 bool singlestep_enabled;
63 };
64
65 /* Information carried about a condition to be evaluated. */
66 typedef struct {
67 TCGCond cond:8;
68 bool is_64;
69 bool g1;
70 bool g2;
71 union {
72 struct { TCGv_i64 a, b; } s64;
73 struct { TCGv_i32 a, b; } s32;
74 } u;
75 } DisasCompare;
76
77 #define DISAS_EXCP 4
78
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit[CC_OP_MAX];
81 static uint64_t inline_branch_miss[CC_OP_MAX];
82 #endif
83
84 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
85 {
86 if (!(s->tb->flags & FLAG_MASK_64)) {
87 if (s->tb->flags & FLAG_MASK_32) {
88 return pc | 0x80000000;
89 }
90 }
91 return pc;
92 }
93
94 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
95 int flags)
96 {
97 S390CPU *cpu = S390_CPU(cs);
98 CPUS390XState *env = &cpu->env;
99 int i;
100
101 if (env->cc_op > 3) {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
103 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
104 } else {
105 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
106 env->psw.mask, env->psw.addr, env->cc_op);
107 }
108
109 for (i = 0; i < 16; i++) {
110 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
111 if ((i % 4) == 3) {
112 cpu_fprintf(f, "\n");
113 } else {
114 cpu_fprintf(f, " ");
115 }
116 }
117
118 for (i = 0; i < 16; i++) {
119 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
120 if ((i % 4) == 3) {
121 cpu_fprintf(f, "\n");
122 } else {
123 cpu_fprintf(f, " ");
124 }
125 }
126
127 for (i = 0; i < 32; i++) {
128 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
129 env->vregs[i][0].ll, env->vregs[i][1].ll);
130 cpu_fprintf(f, (i % 2) ? "\n" : " ");
131 }
132
133 #ifndef CONFIG_USER_ONLY
134 for (i = 0; i < 16; i++) {
135 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
136 if ((i % 4) == 3) {
137 cpu_fprintf(f, "\n");
138 } else {
139 cpu_fprintf(f, " ");
140 }
141 }
142 #endif
143
144 #ifdef DEBUG_INLINE_BRANCHES
145 for (i = 0; i < CC_OP_MAX; i++) {
146 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
147 inline_branch_miss[i], inline_branch_hit[i]);
148 }
149 #endif
150
151 cpu_fprintf(f, "\n");
152 }
153
154 static TCGv_i64 psw_addr;
155 static TCGv_i64 psw_mask;
156 static TCGv_i64 gbea;
157
158 static TCGv_i32 cc_op;
159 static TCGv_i64 cc_src;
160 static TCGv_i64 cc_dst;
161 static TCGv_i64 cc_vr;
162
163 static char cpu_reg_names[32][4];
164 static TCGv_i64 regs[16];
165 static TCGv_i64 fregs[16];
166
167 void s390x_translate_init(void)
168 {
169 int i;
170
171 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
172 psw_addr = tcg_global_mem_new_i64(cpu_env,
173 offsetof(CPUS390XState, psw.addr),
174 "psw_addr");
175 psw_mask = tcg_global_mem_new_i64(cpu_env,
176 offsetof(CPUS390XState, psw.mask),
177 "psw_mask");
178 gbea = tcg_global_mem_new_i64(cpu_env,
179 offsetof(CPUS390XState, gbea),
180 "gbea");
181
182 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
183 "cc_op");
184 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
185 "cc_src");
186 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
187 "cc_dst");
188 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
189 "cc_vr");
190
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
193 regs[i] = tcg_global_mem_new(cpu_env,
194 offsetof(CPUS390XState, regs[i]),
195 cpu_reg_names[i]);
196 }
197
198 for (i = 0; i < 16; i++) {
199 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
200 fregs[i] = tcg_global_mem_new(cpu_env,
201 offsetof(CPUS390XState, vregs[i][0].d),
202 cpu_reg_names[i + 16]);
203 }
204 }
205
206 static TCGv_i64 load_reg(int reg)
207 {
208 TCGv_i64 r = tcg_temp_new_i64();
209 tcg_gen_mov_i64(r, regs[reg]);
210 return r;
211 }
212
213 static TCGv_i64 load_freg32_i64(int reg)
214 {
215 TCGv_i64 r = tcg_temp_new_i64();
216 tcg_gen_shri_i64(r, fregs[reg], 32);
217 return r;
218 }
219
220 static void store_reg(int reg, TCGv_i64 v)
221 {
222 tcg_gen_mov_i64(regs[reg], v);
223 }
224
225 static void store_freg(int reg, TCGv_i64 v)
226 {
227 tcg_gen_mov_i64(fregs[reg], v);
228 }
229
230 static void store_reg32_i64(int reg, TCGv_i64 v)
231 {
232 /* 32 bit register writes keep the upper half */
233 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
234 }
235
236 static void store_reg32h_i64(int reg, TCGv_i64 v)
237 {
238 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
239 }
240
241 static void store_freg32_i64(int reg, TCGv_i64 v)
242 {
243 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
244 }
245
246 static void return_low128(TCGv_i64 dest)
247 {
248 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
249 }
250
251 static void update_psw_addr(DisasContext *s)
252 {
253 /* psw.addr */
254 tcg_gen_movi_i64(psw_addr, s->pc);
255 }
256
257 static void per_branch(DisasContext *s, bool to_next)
258 {
259 #ifndef CONFIG_USER_ONLY
260 tcg_gen_movi_i64(gbea, s->pc);
261
262 if (s->tb->flags & FLAG_MASK_PER) {
263 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
264 gen_helper_per_branch(cpu_env, gbea, next_pc);
265 if (to_next) {
266 tcg_temp_free_i64(next_pc);
267 }
268 }
269 #endif
270 }
271
272 static void per_branch_cond(DisasContext *s, TCGCond cond,
273 TCGv_i64 arg1, TCGv_i64 arg2)
274 {
275 #ifndef CONFIG_USER_ONLY
276 if (s->tb->flags & FLAG_MASK_PER) {
277 TCGLabel *lab = gen_new_label();
278 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
279
280 tcg_gen_movi_i64(gbea, s->pc);
281 gen_helper_per_branch(cpu_env, gbea, psw_addr);
282
283 gen_set_label(lab);
284 } else {
285 TCGv_i64 pc = tcg_const_i64(s->pc);
286 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
287 tcg_temp_free_i64(pc);
288 }
289 #endif
290 }
291
292 static void per_breaking_event(DisasContext *s)
293 {
294 tcg_gen_movi_i64(gbea, s->pc);
295 }
296
297 static void update_cc_op(DisasContext *s)
298 {
299 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
300 tcg_gen_movi_i32(cc_op, s->cc_op);
301 }
302 }
303
304 static void potential_page_fault(DisasContext *s)
305 {
306 update_psw_addr(s);
307 update_cc_op(s);
308 }
309
310 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
311 {
312 return (uint64_t)cpu_lduw_code(env, pc);
313 }
314
315 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
316 {
317 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
318 }
319
320 static int get_mem_index(DisasContext *s)
321 {
322 switch (s->tb->flags & FLAG_MASK_ASC) {
323 case PSW_ASC_PRIMARY >> 32:
324 return 0;
325 case PSW_ASC_SECONDARY >> 32:
326 return 1;
327 case PSW_ASC_HOME >> 32:
328 return 2;
329 default:
330 tcg_abort();
331 break;
332 }
333 }
334
335 static void gen_exception(int excp)
336 {
337 TCGv_i32 tmp = tcg_const_i32(excp);
338 gen_helper_exception(cpu_env, tmp);
339 tcg_temp_free_i32(tmp);
340 }
341
342 static void gen_program_exception(DisasContext *s, int code)
343 {
344 TCGv_i32 tmp;
345
346 /* Remember what pgm exeption this was. */
347 tmp = tcg_const_i32(code);
348 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
349 tcg_temp_free_i32(tmp);
350
351 tmp = tcg_const_i32(s->next_pc - s->pc);
352 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
353 tcg_temp_free_i32(tmp);
354
355 /* Advance past instruction. */
356 s->pc = s->next_pc;
357 update_psw_addr(s);
358
359 /* Save off cc. */
360 update_cc_op(s);
361
362 /* Trigger exception. */
363 gen_exception(EXCP_PGM);
364 }
365
366 static inline void gen_illegal_opcode(DisasContext *s)
367 {
368 gen_program_exception(s, PGM_OPERATION);
369 }
370
371 static inline void gen_trap(DisasContext *s)
372 {
373 TCGv_i32 t;
374
375 /* Set DXC to 0xff. */
376 t = tcg_temp_new_i32();
377 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
378 tcg_gen_ori_i32(t, t, 0xff00);
379 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
380 tcg_temp_free_i32(t);
381
382 gen_program_exception(s, PGM_DATA);
383 }
384
385 #ifndef CONFIG_USER_ONLY
386 static void check_privileged(DisasContext *s)
387 {
388 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
389 gen_program_exception(s, PGM_PRIVILEGED);
390 }
391 }
392 #endif
393
394 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
395 {
396 TCGv_i64 tmp = tcg_temp_new_i64();
397 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
398
399 /* Note that d2 is limited to 20 bits, signed. If we crop negative
400 displacements early we create larger immedate addends. */
401
402 /* Note that addi optimizes the imm==0 case. */
403 if (b2 && x2) {
404 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
405 tcg_gen_addi_i64(tmp, tmp, d2);
406 } else if (b2) {
407 tcg_gen_addi_i64(tmp, regs[b2], d2);
408 } else if (x2) {
409 tcg_gen_addi_i64(tmp, regs[x2], d2);
410 } else {
411 if (need_31) {
412 d2 &= 0x7fffffff;
413 need_31 = false;
414 }
415 tcg_gen_movi_i64(tmp, d2);
416 }
417 if (need_31) {
418 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
419 }
420
421 return tmp;
422 }
423
424 static inline bool live_cc_data(DisasContext *s)
425 {
426 return (s->cc_op != CC_OP_DYNAMIC
427 && s->cc_op != CC_OP_STATIC
428 && s->cc_op > 3);
429 }
430
431 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
432 {
433 if (live_cc_data(s)) {
434 tcg_gen_discard_i64(cc_src);
435 tcg_gen_discard_i64(cc_dst);
436 tcg_gen_discard_i64(cc_vr);
437 }
438 s->cc_op = CC_OP_CONST0 + val;
439 }
440
441 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
442 {
443 if (live_cc_data(s)) {
444 tcg_gen_discard_i64(cc_src);
445 tcg_gen_discard_i64(cc_vr);
446 }
447 tcg_gen_mov_i64(cc_dst, dst);
448 s->cc_op = op;
449 }
450
451 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
452 TCGv_i64 dst)
453 {
454 if (live_cc_data(s)) {
455 tcg_gen_discard_i64(cc_vr);
456 }
457 tcg_gen_mov_i64(cc_src, src);
458 tcg_gen_mov_i64(cc_dst, dst);
459 s->cc_op = op;
460 }
461
462 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
463 TCGv_i64 dst, TCGv_i64 vr)
464 {
465 tcg_gen_mov_i64(cc_src, src);
466 tcg_gen_mov_i64(cc_dst, dst);
467 tcg_gen_mov_i64(cc_vr, vr);
468 s->cc_op = op;
469 }
470
471 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
472 {
473 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
474 }
475
476 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
477 {
478 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
479 }
480
481 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
482 {
483 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
484 }
485
486 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
487 {
488 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
489 }
490
491 /* CC value is in env->cc_op */
492 static void set_cc_static(DisasContext *s)
493 {
494 if (live_cc_data(s)) {
495 tcg_gen_discard_i64(cc_src);
496 tcg_gen_discard_i64(cc_dst);
497 tcg_gen_discard_i64(cc_vr);
498 }
499 s->cc_op = CC_OP_STATIC;
500 }
501
502 /* calculates cc into cc_op */
503 static void gen_op_calc_cc(DisasContext *s)
504 {
505 TCGv_i32 local_cc_op;
506 TCGv_i64 dummy;
507
508 TCGV_UNUSED_I32(local_cc_op);
509 TCGV_UNUSED_I64(dummy);
510 switch (s->cc_op) {
511 default:
512 dummy = tcg_const_i64(0);
513 /* FALLTHRU */
514 case CC_OP_ADD_64:
515 case CC_OP_ADDU_64:
516 case CC_OP_ADDC_64:
517 case CC_OP_SUB_64:
518 case CC_OP_SUBU_64:
519 case CC_OP_SUBB_64:
520 case CC_OP_ADD_32:
521 case CC_OP_ADDU_32:
522 case CC_OP_ADDC_32:
523 case CC_OP_SUB_32:
524 case CC_OP_SUBU_32:
525 case CC_OP_SUBB_32:
526 local_cc_op = tcg_const_i32(s->cc_op);
527 break;
528 case CC_OP_CONST0:
529 case CC_OP_CONST1:
530 case CC_OP_CONST2:
531 case CC_OP_CONST3:
532 case CC_OP_STATIC:
533 case CC_OP_DYNAMIC:
534 break;
535 }
536
537 switch (s->cc_op) {
538 case CC_OP_CONST0:
539 case CC_OP_CONST1:
540 case CC_OP_CONST2:
541 case CC_OP_CONST3:
542 /* s->cc_op is the cc value */
543 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
544 break;
545 case CC_OP_STATIC:
546 /* env->cc_op already is the cc value */
547 break;
548 case CC_OP_NZ:
549 case CC_OP_ABS_64:
550 case CC_OP_NABS_64:
551 case CC_OP_ABS_32:
552 case CC_OP_NABS_32:
553 case CC_OP_LTGT0_32:
554 case CC_OP_LTGT0_64:
555 case CC_OP_COMP_32:
556 case CC_OP_COMP_64:
557 case CC_OP_NZ_F32:
558 case CC_OP_NZ_F64:
559 case CC_OP_FLOGR:
560 /* 1 argument */
561 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
562 break;
563 case CC_OP_ICM:
564 case CC_OP_LTGT_32:
565 case CC_OP_LTGT_64:
566 case CC_OP_LTUGTU_32:
567 case CC_OP_LTUGTU_64:
568 case CC_OP_TM_32:
569 case CC_OP_TM_64:
570 case CC_OP_SLA_32:
571 case CC_OP_SLA_64:
572 case CC_OP_NZ_F128:
573 /* 2 arguments */
574 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
575 break;
576 case CC_OP_ADD_64:
577 case CC_OP_ADDU_64:
578 case CC_OP_ADDC_64:
579 case CC_OP_SUB_64:
580 case CC_OP_SUBU_64:
581 case CC_OP_SUBB_64:
582 case CC_OP_ADD_32:
583 case CC_OP_ADDU_32:
584 case CC_OP_ADDC_32:
585 case CC_OP_SUB_32:
586 case CC_OP_SUBU_32:
587 case CC_OP_SUBB_32:
588 /* 3 arguments */
589 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
590 break;
591 case CC_OP_DYNAMIC:
592 /* unknown operation - assume 3 arguments and cc_op in env */
593 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
594 break;
595 default:
596 tcg_abort();
597 }
598
599 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
600 tcg_temp_free_i32(local_cc_op);
601 }
602 if (!TCGV_IS_UNUSED_I64(dummy)) {
603 tcg_temp_free_i64(dummy);
604 }
605
606 /* We now have cc in cc_op as constant */
607 set_cc_static(s);
608 }
609
610 static int use_goto_tb(DisasContext *s, uint64_t dest)
611 {
612 if (unlikely(s->singlestep_enabled) ||
613 (s->tb->cflags & CF_LAST_IO) ||
614 (s->tb->flags & FLAG_MASK_PER)) {
615 return false;
616 }
617 #ifndef CONFIG_USER_ONLY
618 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
619 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
620 #else
621 return true;
622 #endif
623 }
624
625 static void account_noninline_branch(DisasContext *s, int cc_op)
626 {
627 #ifdef DEBUG_INLINE_BRANCHES
628 inline_branch_miss[cc_op]++;
629 #endif
630 }
631
632 static void account_inline_branch(DisasContext *s, int cc_op)
633 {
634 #ifdef DEBUG_INLINE_BRANCHES
635 inline_branch_hit[cc_op]++;
636 #endif
637 }
638
639 /* Table of mask values to comparison codes, given a comparison as input.
640 For such, CC=3 should not be possible. */
641 static const TCGCond ltgt_cond[16] = {
642 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
643 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
644 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
645 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
646 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
647 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
648 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
649 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
650 };
651
652 /* Table of mask values to comparison codes, given a logic op as input.
653 For such, only CC=0 and CC=1 should be possible. */
654 static const TCGCond nz_cond[16] = {
655 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
656 TCG_COND_NEVER, TCG_COND_NEVER,
657 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
658 TCG_COND_NE, TCG_COND_NE,
659 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
660 TCG_COND_EQ, TCG_COND_EQ,
661 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
662 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
663 };
664
665 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
666 details required to generate a TCG comparison. */
667 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
668 {
669 TCGCond cond;
670 enum cc_op old_cc_op = s->cc_op;
671
672 if (mask == 15 || mask == 0) {
673 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
674 c->u.s32.a = cc_op;
675 c->u.s32.b = cc_op;
676 c->g1 = c->g2 = true;
677 c->is_64 = false;
678 return;
679 }
680
681 /* Find the TCG condition for the mask + cc op. */
682 switch (old_cc_op) {
683 case CC_OP_LTGT0_32:
684 case CC_OP_LTGT0_64:
685 case CC_OP_LTGT_32:
686 case CC_OP_LTGT_64:
687 cond = ltgt_cond[mask];
688 if (cond == TCG_COND_NEVER) {
689 goto do_dynamic;
690 }
691 account_inline_branch(s, old_cc_op);
692 break;
693
694 case CC_OP_LTUGTU_32:
695 case CC_OP_LTUGTU_64:
696 cond = tcg_unsigned_cond(ltgt_cond[mask]);
697 if (cond == TCG_COND_NEVER) {
698 goto do_dynamic;
699 }
700 account_inline_branch(s, old_cc_op);
701 break;
702
703 case CC_OP_NZ:
704 cond = nz_cond[mask];
705 if (cond == TCG_COND_NEVER) {
706 goto do_dynamic;
707 }
708 account_inline_branch(s, old_cc_op);
709 break;
710
711 case CC_OP_TM_32:
712 case CC_OP_TM_64:
713 switch (mask) {
714 case 8:
715 cond = TCG_COND_EQ;
716 break;
717 case 4 | 2 | 1:
718 cond = TCG_COND_NE;
719 break;
720 default:
721 goto do_dynamic;
722 }
723 account_inline_branch(s, old_cc_op);
724 break;
725
726 case CC_OP_ICM:
727 switch (mask) {
728 case 8:
729 cond = TCG_COND_EQ;
730 break;
731 case 4 | 2 | 1:
732 case 4 | 2:
733 cond = TCG_COND_NE;
734 break;
735 default:
736 goto do_dynamic;
737 }
738 account_inline_branch(s, old_cc_op);
739 break;
740
741 case CC_OP_FLOGR:
742 switch (mask & 0xa) {
743 case 8: /* src == 0 -> no one bit found */
744 cond = TCG_COND_EQ;
745 break;
746 case 2: /* src != 0 -> one bit found */
747 cond = TCG_COND_NE;
748 break;
749 default:
750 goto do_dynamic;
751 }
752 account_inline_branch(s, old_cc_op);
753 break;
754
755 case CC_OP_ADDU_32:
756 case CC_OP_ADDU_64:
757 switch (mask) {
758 case 8 | 2: /* vr == 0 */
759 cond = TCG_COND_EQ;
760 break;
761 case 4 | 1: /* vr != 0 */
762 cond = TCG_COND_NE;
763 break;
764 case 8 | 4: /* no carry -> vr >= src */
765 cond = TCG_COND_GEU;
766 break;
767 case 2 | 1: /* carry -> vr < src */
768 cond = TCG_COND_LTU;
769 break;
770 default:
771 goto do_dynamic;
772 }
773 account_inline_branch(s, old_cc_op);
774 break;
775
776 case CC_OP_SUBU_32:
777 case CC_OP_SUBU_64:
778 /* Note that CC=0 is impossible; treat it as dont-care. */
779 switch (mask & 7) {
780 case 2: /* zero -> op1 == op2 */
781 cond = TCG_COND_EQ;
782 break;
783 case 4 | 1: /* !zero -> op1 != op2 */
784 cond = TCG_COND_NE;
785 break;
786 case 4: /* borrow (!carry) -> op1 < op2 */
787 cond = TCG_COND_LTU;
788 break;
789 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
790 cond = TCG_COND_GEU;
791 break;
792 default:
793 goto do_dynamic;
794 }
795 account_inline_branch(s, old_cc_op);
796 break;
797
798 default:
799 do_dynamic:
800 /* Calculate cc value. */
801 gen_op_calc_cc(s);
802 /* FALLTHRU */
803
804 case CC_OP_STATIC:
805 /* Jump based on CC. We'll load up the real cond below;
806 the assignment here merely avoids a compiler warning. */
807 account_noninline_branch(s, old_cc_op);
808 old_cc_op = CC_OP_STATIC;
809 cond = TCG_COND_NEVER;
810 break;
811 }
812
813 /* Load up the arguments of the comparison. */
814 c->is_64 = true;
815 c->g1 = c->g2 = false;
816 switch (old_cc_op) {
817 case CC_OP_LTGT0_32:
818 c->is_64 = false;
819 c->u.s32.a = tcg_temp_new_i32();
820 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
821 c->u.s32.b = tcg_const_i32(0);
822 break;
823 case CC_OP_LTGT_32:
824 case CC_OP_LTUGTU_32:
825 case CC_OP_SUBU_32:
826 c->is_64 = false;
827 c->u.s32.a = tcg_temp_new_i32();
828 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
829 c->u.s32.b = tcg_temp_new_i32();
830 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
831 break;
832
833 case CC_OP_LTGT0_64:
834 case CC_OP_NZ:
835 case CC_OP_FLOGR:
836 c->u.s64.a = cc_dst;
837 c->u.s64.b = tcg_const_i64(0);
838 c->g1 = true;
839 break;
840 case CC_OP_LTGT_64:
841 case CC_OP_LTUGTU_64:
842 case CC_OP_SUBU_64:
843 c->u.s64.a = cc_src;
844 c->u.s64.b = cc_dst;
845 c->g1 = c->g2 = true;
846 break;
847
848 case CC_OP_TM_32:
849 case CC_OP_TM_64:
850 case CC_OP_ICM:
851 c->u.s64.a = tcg_temp_new_i64();
852 c->u.s64.b = tcg_const_i64(0);
853 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
854 break;
855
856 case CC_OP_ADDU_32:
857 c->is_64 = false;
858 c->u.s32.a = tcg_temp_new_i32();
859 c->u.s32.b = tcg_temp_new_i32();
860 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
861 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
862 tcg_gen_movi_i32(c->u.s32.b, 0);
863 } else {
864 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
865 }
866 break;
867
868 case CC_OP_ADDU_64:
869 c->u.s64.a = cc_vr;
870 c->g1 = true;
871 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
872 c->u.s64.b = tcg_const_i64(0);
873 } else {
874 c->u.s64.b = cc_src;
875 c->g2 = true;
876 }
877 break;
878
879 case CC_OP_STATIC:
880 c->is_64 = false;
881 c->u.s32.a = cc_op;
882 c->g1 = true;
883 switch (mask) {
884 case 0x8 | 0x4 | 0x2: /* cc != 3 */
885 cond = TCG_COND_NE;
886 c->u.s32.b = tcg_const_i32(3);
887 break;
888 case 0x8 | 0x4 | 0x1: /* cc != 2 */
889 cond = TCG_COND_NE;
890 c->u.s32.b = tcg_const_i32(2);
891 break;
892 case 0x8 | 0x2 | 0x1: /* cc != 1 */
893 cond = TCG_COND_NE;
894 c->u.s32.b = tcg_const_i32(1);
895 break;
896 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
897 cond = TCG_COND_EQ;
898 c->g1 = false;
899 c->u.s32.a = tcg_temp_new_i32();
900 c->u.s32.b = tcg_const_i32(0);
901 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
902 break;
903 case 0x8 | 0x4: /* cc < 2 */
904 cond = TCG_COND_LTU;
905 c->u.s32.b = tcg_const_i32(2);
906 break;
907 case 0x8: /* cc == 0 */
908 cond = TCG_COND_EQ;
909 c->u.s32.b = tcg_const_i32(0);
910 break;
911 case 0x4 | 0x2 | 0x1: /* cc != 0 */
912 cond = TCG_COND_NE;
913 c->u.s32.b = tcg_const_i32(0);
914 break;
915 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
916 cond = TCG_COND_NE;
917 c->g1 = false;
918 c->u.s32.a = tcg_temp_new_i32();
919 c->u.s32.b = tcg_const_i32(0);
920 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
921 break;
922 case 0x4: /* cc == 1 */
923 cond = TCG_COND_EQ;
924 c->u.s32.b = tcg_const_i32(1);
925 break;
926 case 0x2 | 0x1: /* cc > 1 */
927 cond = TCG_COND_GTU;
928 c->u.s32.b = tcg_const_i32(1);
929 break;
930 case 0x2: /* cc == 2 */
931 cond = TCG_COND_EQ;
932 c->u.s32.b = tcg_const_i32(2);
933 break;
934 case 0x1: /* cc == 3 */
935 cond = TCG_COND_EQ;
936 c->u.s32.b = tcg_const_i32(3);
937 break;
938 default:
939 /* CC is masked by something else: (8 >> cc) & mask. */
940 cond = TCG_COND_NE;
941 c->g1 = false;
942 c->u.s32.a = tcg_const_i32(8);
943 c->u.s32.b = tcg_const_i32(0);
944 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
945 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
946 break;
947 }
948 break;
949
950 default:
951 abort();
952 }
953 c->cond = cond;
954 }
955
956 static void free_compare(DisasCompare *c)
957 {
958 if (!c->g1) {
959 if (c->is_64) {
960 tcg_temp_free_i64(c->u.s64.a);
961 } else {
962 tcg_temp_free_i32(c->u.s32.a);
963 }
964 }
965 if (!c->g2) {
966 if (c->is_64) {
967 tcg_temp_free_i64(c->u.s64.b);
968 } else {
969 tcg_temp_free_i32(c->u.s32.b);
970 }
971 }
972 }
973
974 /* ====================================================================== */
975 /* Define the insn format enumeration. */
976 #define F0(N) FMT_##N,
977 #define F1(N, X1) F0(N)
978 #define F2(N, X1, X2) F0(N)
979 #define F3(N, X1, X2, X3) F0(N)
980 #define F4(N, X1, X2, X3, X4) F0(N)
981 #define F5(N, X1, X2, X3, X4, X5) F0(N)
982
983 typedef enum {
984 #include "insn-format.def"
985 } DisasFormat;
986
987 #undef F0
988 #undef F1
989 #undef F2
990 #undef F3
991 #undef F4
992 #undef F5
993
994 /* Define a structure to hold the decoded fields. We'll store each inside
995 an array indexed by an enum. In order to conserve memory, we'll arrange
996 for fields that do not exist at the same time to overlap, thus the "C"
997 for compact. For checking purposes there is an "O" for original index
998 as well that will be applied to availability bitmaps. */
999
1000 enum DisasFieldIndexO {
1001 FLD_O_r1,
1002 FLD_O_r2,
1003 FLD_O_r3,
1004 FLD_O_m1,
1005 FLD_O_m3,
1006 FLD_O_m4,
1007 FLD_O_b1,
1008 FLD_O_b2,
1009 FLD_O_b4,
1010 FLD_O_d1,
1011 FLD_O_d2,
1012 FLD_O_d4,
1013 FLD_O_x2,
1014 FLD_O_l1,
1015 FLD_O_l2,
1016 FLD_O_i1,
1017 FLD_O_i2,
1018 FLD_O_i3,
1019 FLD_O_i4,
1020 FLD_O_i5
1021 };
1022
1023 enum DisasFieldIndexC {
1024 FLD_C_r1 = 0,
1025 FLD_C_m1 = 0,
1026 FLD_C_b1 = 0,
1027 FLD_C_i1 = 0,
1028
1029 FLD_C_r2 = 1,
1030 FLD_C_b2 = 1,
1031 FLD_C_i2 = 1,
1032
1033 FLD_C_r3 = 2,
1034 FLD_C_m3 = 2,
1035 FLD_C_i3 = 2,
1036
1037 FLD_C_m4 = 3,
1038 FLD_C_b4 = 3,
1039 FLD_C_i4 = 3,
1040 FLD_C_l1 = 3,
1041
1042 FLD_C_i5 = 4,
1043 FLD_C_d1 = 4,
1044
1045 FLD_C_d2 = 5,
1046
1047 FLD_C_d4 = 6,
1048 FLD_C_x2 = 6,
1049 FLD_C_l2 = 6,
1050
1051 NUM_C_FIELD = 7
1052 };
1053
1054 struct DisasFields {
1055 uint64_t raw_insn;
1056 unsigned op:8;
1057 unsigned op2:8;
1058 unsigned presentC:16;
1059 unsigned int presentO;
1060 int c[NUM_C_FIELD];
1061 };
1062
1063 /* This is the way fields are to be accessed out of DisasFields. */
1064 #define have_field(S, F) have_field1((S), FLD_O_##F)
1065 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1066
1067 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1068 {
1069 return (f->presentO >> c) & 1;
1070 }
1071
1072 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1073 enum DisasFieldIndexC c)
1074 {
1075 assert(have_field1(f, o));
1076 return f->c[c];
1077 }
1078
1079 /* Describe the layout of each field in each format. */
1080 typedef struct DisasField {
1081 unsigned int beg:8;
1082 unsigned int size:8;
1083 unsigned int type:2;
1084 unsigned int indexC:6;
1085 enum DisasFieldIndexO indexO:8;
1086 } DisasField;
1087
1088 typedef struct DisasFormatInfo {
1089 DisasField op[NUM_C_FIELD];
1090 } DisasFormatInfo;
1091
1092 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1093 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1094 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1095 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1096 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1097 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1098 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1099 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1100 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1101 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1102 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1103 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1104 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1105 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1106
1107 #define F0(N) { { } },
1108 #define F1(N, X1) { { X1 } },
1109 #define F2(N, X1, X2) { { X1, X2 } },
1110 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1111 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1112 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1113
1114 static const DisasFormatInfo format_info[] = {
1115 #include "insn-format.def"
1116 };
1117
1118 #undef F0
1119 #undef F1
1120 #undef F2
1121 #undef F3
1122 #undef F4
1123 #undef F5
1124 #undef R
1125 #undef M
1126 #undef BD
1127 #undef BXD
1128 #undef BDL
1129 #undef BXDL
1130 #undef I
1131 #undef L
1132
1133 /* Generally, we'll extract operands into this structures, operate upon
1134 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1135 of routines below for more details. */
1136 typedef struct {
1137 bool g_out, g_out2, g_in1, g_in2;
1138 TCGv_i64 out, out2, in1, in2;
1139 TCGv_i64 addr1;
1140 } DisasOps;
1141
1142 /* Instructions can place constraints on their operands, raising specification
1143 exceptions if they are violated. To make this easy to automate, each "in1",
1144 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1145 of the following, or 0. To make this easy to document, we'll put the
1146 SPEC_<name> defines next to <name>. */
1147
1148 #define SPEC_r1_even 1
1149 #define SPEC_r2_even 2
1150 #define SPEC_r3_even 4
1151 #define SPEC_r1_f128 8
1152 #define SPEC_r2_f128 16
1153
1154 /* Return values from translate_one, indicating the state of the TB. */
1155 typedef enum {
1156 /* Continue the TB. */
1157 NO_EXIT,
1158 /* We have emitted one or more goto_tb. No fixup required. */
1159 EXIT_GOTO_TB,
1160 /* We are not using a goto_tb (for whatever reason), but have updated
1161 the PC (for whatever reason), so there's no need to do it again on
1162 exiting the TB. */
1163 EXIT_PC_UPDATED,
1164 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1165 updated the PC for the next instruction to be executed. */
1166 EXIT_PC_STALE,
1167 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1168 No following code will be executed. */
1169 EXIT_NORETURN,
1170 } ExitStatus;
1171
1172 typedef enum DisasFacility {
1173 FAC_Z, /* zarch (default) */
1174 FAC_CASS, /* compare and swap and store */
1175 FAC_CASS2, /* compare and swap and store 2*/
1176 FAC_DFP, /* decimal floating point */
1177 FAC_DFPR, /* decimal floating point rounding */
1178 FAC_DO, /* distinct operands */
1179 FAC_EE, /* execute extensions */
1180 FAC_EI, /* extended immediate */
1181 FAC_FPE, /* floating point extension */
1182 FAC_FPSSH, /* floating point support sign handling */
1183 FAC_FPRGR, /* FPR-GR transfer */
1184 FAC_GIE, /* general instructions extension */
1185 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1186 FAC_HW, /* high-word */
1187 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1188 FAC_MIE, /* miscellaneous-instruction-extensions */
1189 FAC_LAT, /* load-and-trap */
1190 FAC_LOC, /* load/store on condition */
1191 FAC_LD, /* long displacement */
1192 FAC_PC, /* population count */
1193 FAC_SCF, /* store clock fast */
1194 FAC_SFLE, /* store facility list extended */
1195 FAC_ILA, /* interlocked access facility 1 */
1196 } DisasFacility;
1197
1198 struct DisasInsn {
1199 unsigned opc:16;
1200 DisasFormat fmt:8;
1201 DisasFacility fac:8;
1202 unsigned spec:8;
1203
1204 const char *name;
1205
1206 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1207 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1208 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1209 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1210 void (*help_cout)(DisasContext *, DisasOps *);
1211 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1212
1213 uint64_t data;
1214 };
1215
1216 /* ====================================================================== */
1217 /* Miscellaneous helpers, used by several operations. */
1218
1219 static void help_l2_shift(DisasContext *s, DisasFields *f,
1220 DisasOps *o, int mask)
1221 {
1222 int b2 = get_field(f, b2);
1223 int d2 = get_field(f, d2);
1224
1225 if (b2 == 0) {
1226 o->in2 = tcg_const_i64(d2 & mask);
1227 } else {
1228 o->in2 = get_address(s, 0, b2, d2);
1229 tcg_gen_andi_i64(o->in2, o->in2, mask);
1230 }
1231 }
1232
1233 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1234 {
1235 if (dest == s->next_pc) {
1236 per_branch(s, true);
1237 return NO_EXIT;
1238 }
1239 if (use_goto_tb(s, dest)) {
1240 update_cc_op(s);
1241 per_breaking_event(s);
1242 tcg_gen_goto_tb(0);
1243 tcg_gen_movi_i64(psw_addr, dest);
1244 tcg_gen_exit_tb((uintptr_t)s->tb);
1245 return EXIT_GOTO_TB;
1246 } else {
1247 tcg_gen_movi_i64(psw_addr, dest);
1248 per_branch(s, false);
1249 return EXIT_PC_UPDATED;
1250 }
1251 }
1252
1253 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1254 bool is_imm, int imm, TCGv_i64 cdest)
1255 {
1256 ExitStatus ret;
1257 uint64_t dest = s->pc + 2 * imm;
1258 TCGLabel *lab;
1259
1260 /* Take care of the special cases first. */
1261 if (c->cond == TCG_COND_NEVER) {
1262 ret = NO_EXIT;
1263 goto egress;
1264 }
1265 if (is_imm) {
1266 if (dest == s->next_pc) {
1267 /* Branch to next. */
1268 per_branch(s, true);
1269 ret = NO_EXIT;
1270 goto egress;
1271 }
1272 if (c->cond == TCG_COND_ALWAYS) {
1273 ret = help_goto_direct(s, dest);
1274 goto egress;
1275 }
1276 } else {
1277 if (TCGV_IS_UNUSED_I64(cdest)) {
1278 /* E.g. bcr %r0 -> no branch. */
1279 ret = NO_EXIT;
1280 goto egress;
1281 }
1282 if (c->cond == TCG_COND_ALWAYS) {
1283 tcg_gen_mov_i64(psw_addr, cdest);
1284 per_branch(s, false);
1285 ret = EXIT_PC_UPDATED;
1286 goto egress;
1287 }
1288 }
1289
1290 if (use_goto_tb(s, s->next_pc)) {
1291 if (is_imm && use_goto_tb(s, dest)) {
1292 /* Both exits can use goto_tb. */
1293 update_cc_op(s);
1294
1295 lab = gen_new_label();
1296 if (c->is_64) {
1297 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1298 } else {
1299 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1300 }
1301
1302 /* Branch not taken. */
1303 tcg_gen_goto_tb(0);
1304 tcg_gen_movi_i64(psw_addr, s->next_pc);
1305 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1306
1307 /* Branch taken. */
1308 gen_set_label(lab);
1309 per_breaking_event(s);
1310 tcg_gen_goto_tb(1);
1311 tcg_gen_movi_i64(psw_addr, dest);
1312 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1313
1314 ret = EXIT_GOTO_TB;
1315 } else {
1316 /* Fallthru can use goto_tb, but taken branch cannot. */
1317 /* Store taken branch destination before the brcond. This
1318 avoids having to allocate a new local temp to hold it.
1319 We'll overwrite this in the not taken case anyway. */
1320 if (!is_imm) {
1321 tcg_gen_mov_i64(psw_addr, cdest);
1322 }
1323
1324 lab = gen_new_label();
1325 if (c->is_64) {
1326 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1327 } else {
1328 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1329 }
1330
1331 /* Branch not taken. */
1332 update_cc_op(s);
1333 tcg_gen_goto_tb(0);
1334 tcg_gen_movi_i64(psw_addr, s->next_pc);
1335 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1336
1337 gen_set_label(lab);
1338 if (is_imm) {
1339 tcg_gen_movi_i64(psw_addr, dest);
1340 }
1341 per_breaking_event(s);
1342 ret = EXIT_PC_UPDATED;
1343 }
1344 } else {
1345 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1346 Most commonly we're single-stepping or some other condition that
1347 disables all use of goto_tb. Just update the PC and exit. */
1348
1349 TCGv_i64 next = tcg_const_i64(s->next_pc);
1350 if (is_imm) {
1351 cdest = tcg_const_i64(dest);
1352 }
1353
1354 if (c->is_64) {
1355 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1356 cdest, next);
1357 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1358 } else {
1359 TCGv_i32 t0 = tcg_temp_new_i32();
1360 TCGv_i64 t1 = tcg_temp_new_i64();
1361 TCGv_i64 z = tcg_const_i64(0);
1362 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1363 tcg_gen_extu_i32_i64(t1, t0);
1364 tcg_temp_free_i32(t0);
1365 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1366 per_branch_cond(s, TCG_COND_NE, t1, z);
1367 tcg_temp_free_i64(t1);
1368 tcg_temp_free_i64(z);
1369 }
1370
1371 if (is_imm) {
1372 tcg_temp_free_i64(cdest);
1373 }
1374 tcg_temp_free_i64(next);
1375
1376 ret = EXIT_PC_UPDATED;
1377 }
1378
1379 egress:
1380 free_compare(c);
1381 return ret;
1382 }
1383
1384 /* ====================================================================== */
1385 /* The operations. These perform the bulk of the work for any insn,
1386 usually after the operands have been loaded and output initialized. */
1387
1388 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1389 {
1390 TCGv_i64 z, n;
1391 z = tcg_const_i64(0);
1392 n = tcg_temp_new_i64();
1393 tcg_gen_neg_i64(n, o->in2);
1394 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1395 tcg_temp_free_i64(n);
1396 tcg_temp_free_i64(z);
1397 return NO_EXIT;
1398 }
1399
1400 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1401 {
1402 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1403 return NO_EXIT;
1404 }
1405
1406 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1407 {
1408 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1409 return NO_EXIT;
1410 }
1411
1412 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1413 {
1414 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1415 tcg_gen_mov_i64(o->out2, o->in2);
1416 return NO_EXIT;
1417 }
1418
1419 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1420 {
1421 tcg_gen_add_i64(o->out, o->in1, o->in2);
1422 return NO_EXIT;
1423 }
1424
1425 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1426 {
1427 DisasCompare cmp;
1428 TCGv_i64 carry;
1429
1430 tcg_gen_add_i64(o->out, o->in1, o->in2);
1431
1432 /* The carry flag is the msb of CC, therefore the branch mask that would
1433 create that comparison is 3. Feeding the generated comparison to
1434 setcond produces the carry flag that we desire. */
1435 disas_jcc(s, &cmp, 3);
1436 carry = tcg_temp_new_i64();
1437 if (cmp.is_64) {
1438 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1439 } else {
1440 TCGv_i32 t = tcg_temp_new_i32();
1441 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1442 tcg_gen_extu_i32_i64(carry, t);
1443 tcg_temp_free_i32(t);
1444 }
1445 free_compare(&cmp);
1446
1447 tcg_gen_add_i64(o->out, o->out, carry);
1448 tcg_temp_free_i64(carry);
1449 return NO_EXIT;
1450 }
1451
1452 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1453 {
1454 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1455 return NO_EXIT;
1456 }
1457
1458 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1459 {
1460 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1461 return NO_EXIT;
1462 }
1463
1464 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1465 {
1466 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1467 return_low128(o->out2);
1468 return NO_EXIT;
1469 }
1470
1471 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1472 {
1473 tcg_gen_and_i64(o->out, o->in1, o->in2);
1474 return NO_EXIT;
1475 }
1476
1477 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1478 {
1479 int shift = s->insn->data & 0xff;
1480 int size = s->insn->data >> 8;
1481 uint64_t mask = ((1ull << size) - 1) << shift;
1482
1483 assert(!o->g_in2);
1484 tcg_gen_shli_i64(o->in2, o->in2, shift);
1485 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1486 tcg_gen_and_i64(o->out, o->in1, o->in2);
1487
1488 /* Produce the CC from only the bits manipulated. */
1489 tcg_gen_andi_i64(cc_dst, o->out, mask);
1490 set_cc_nz_u64(s, cc_dst);
1491 return NO_EXIT;
1492 }
1493
1494 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1495 {
1496 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1497 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1498 tcg_gen_mov_i64(psw_addr, o->in2);
1499 per_branch(s, false);
1500 return EXIT_PC_UPDATED;
1501 } else {
1502 return NO_EXIT;
1503 }
1504 }
1505
1506 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1507 {
1508 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1509 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1510 }
1511
1512 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1513 {
1514 int m1 = get_field(s->fields, m1);
1515 bool is_imm = have_field(s->fields, i2);
1516 int imm = is_imm ? get_field(s->fields, i2) : 0;
1517 DisasCompare c;
1518
1519 disas_jcc(s, &c, m1);
1520 return help_branch(s, &c, is_imm, imm, o->in2);
1521 }
1522
1523 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1524 {
1525 int r1 = get_field(s->fields, r1);
1526 bool is_imm = have_field(s->fields, i2);
1527 int imm = is_imm ? get_field(s->fields, i2) : 0;
1528 DisasCompare c;
1529 TCGv_i64 t;
1530
1531 c.cond = TCG_COND_NE;
1532 c.is_64 = false;
1533 c.g1 = false;
1534 c.g2 = false;
1535
1536 t = tcg_temp_new_i64();
1537 tcg_gen_subi_i64(t, regs[r1], 1);
1538 store_reg32_i64(r1, t);
1539 c.u.s32.a = tcg_temp_new_i32();
1540 c.u.s32.b = tcg_const_i32(0);
1541 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1542 tcg_temp_free_i64(t);
1543
1544 return help_branch(s, &c, is_imm, imm, o->in2);
1545 }
1546
1547 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1548 {
1549 int r1 = get_field(s->fields, r1);
1550 int imm = get_field(s->fields, i2);
1551 DisasCompare c;
1552 TCGv_i64 t;
1553
1554 c.cond = TCG_COND_NE;
1555 c.is_64 = false;
1556 c.g1 = false;
1557 c.g2 = false;
1558
1559 t = tcg_temp_new_i64();
1560 tcg_gen_shri_i64(t, regs[r1], 32);
1561 tcg_gen_subi_i64(t, t, 1);
1562 store_reg32h_i64(r1, t);
1563 c.u.s32.a = tcg_temp_new_i32();
1564 c.u.s32.b = tcg_const_i32(0);
1565 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1566 tcg_temp_free_i64(t);
1567
1568 return help_branch(s, &c, 1, imm, o->in2);
1569 }
1570
1571 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1572 {
1573 int r1 = get_field(s->fields, r1);
1574 bool is_imm = have_field(s->fields, i2);
1575 int imm = is_imm ? get_field(s->fields, i2) : 0;
1576 DisasCompare c;
1577
1578 c.cond = TCG_COND_NE;
1579 c.is_64 = true;
1580 c.g1 = true;
1581 c.g2 = false;
1582
1583 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1584 c.u.s64.a = regs[r1];
1585 c.u.s64.b = tcg_const_i64(0);
1586
1587 return help_branch(s, &c, is_imm, imm, o->in2);
1588 }
1589
1590 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1591 {
1592 int r1 = get_field(s->fields, r1);
1593 int r3 = get_field(s->fields, r3);
1594 bool is_imm = have_field(s->fields, i2);
1595 int imm = is_imm ? get_field(s->fields, i2) : 0;
1596 DisasCompare c;
1597 TCGv_i64 t;
1598
1599 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1600 c.is_64 = false;
1601 c.g1 = false;
1602 c.g2 = false;
1603
1604 t = tcg_temp_new_i64();
1605 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1606 c.u.s32.a = tcg_temp_new_i32();
1607 c.u.s32.b = tcg_temp_new_i32();
1608 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1609 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1610 store_reg32_i64(r1, t);
1611 tcg_temp_free_i64(t);
1612
1613 return help_branch(s, &c, is_imm, imm, o->in2);
1614 }
1615
1616 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1617 {
1618 int r1 = get_field(s->fields, r1);
1619 int r3 = get_field(s->fields, r3);
1620 bool is_imm = have_field(s->fields, i2);
1621 int imm = is_imm ? get_field(s->fields, i2) : 0;
1622 DisasCompare c;
1623
1624 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1625 c.is_64 = true;
1626
1627 if (r1 == (r3 | 1)) {
1628 c.u.s64.b = load_reg(r3 | 1);
1629 c.g2 = false;
1630 } else {
1631 c.u.s64.b = regs[r3 | 1];
1632 c.g2 = true;
1633 }
1634
1635 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1636 c.u.s64.a = regs[r1];
1637 c.g1 = true;
1638
1639 return help_branch(s, &c, is_imm, imm, o->in2);
1640 }
1641
1642 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1643 {
1644 int imm, m3 = get_field(s->fields, m3);
1645 bool is_imm;
1646 DisasCompare c;
1647
1648 c.cond = ltgt_cond[m3];
1649 if (s->insn->data) {
1650 c.cond = tcg_unsigned_cond(c.cond);
1651 }
1652 c.is_64 = c.g1 = c.g2 = true;
1653 c.u.s64.a = o->in1;
1654 c.u.s64.b = o->in2;
1655
1656 is_imm = have_field(s->fields, i4);
1657 if (is_imm) {
1658 imm = get_field(s->fields, i4);
1659 } else {
1660 imm = 0;
1661 o->out = get_address(s, 0, get_field(s->fields, b4),
1662 get_field(s->fields, d4));
1663 }
1664
1665 return help_branch(s, &c, is_imm, imm, o->out);
1666 }
1667
1668 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1669 {
1670 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1671 set_cc_static(s);
1672 return NO_EXIT;
1673 }
1674
1675 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1676 {
1677 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1678 set_cc_static(s);
1679 return NO_EXIT;
1680 }
1681
1682 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1683 {
1684 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1685 set_cc_static(s);
1686 return NO_EXIT;
1687 }
1688
1689 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1690 {
1691 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1692 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1693 tcg_temp_free_i32(m3);
1694 gen_set_cc_nz_f32(s, o->in2);
1695 return NO_EXIT;
1696 }
1697
1698 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1699 {
1700 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1701 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1702 tcg_temp_free_i32(m3);
1703 gen_set_cc_nz_f64(s, o->in2);
1704 return NO_EXIT;
1705 }
1706
1707 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1708 {
1709 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1710 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1711 tcg_temp_free_i32(m3);
1712 gen_set_cc_nz_f128(s, o->in1, o->in2);
1713 return NO_EXIT;
1714 }
1715
1716 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1717 {
1718 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1719 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1720 tcg_temp_free_i32(m3);
1721 gen_set_cc_nz_f32(s, o->in2);
1722 return NO_EXIT;
1723 }
1724
1725 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1726 {
1727 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1728 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1729 tcg_temp_free_i32(m3);
1730 gen_set_cc_nz_f64(s, o->in2);
1731 return NO_EXIT;
1732 }
1733
1734 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1735 {
1736 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1737 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1738 tcg_temp_free_i32(m3);
1739 gen_set_cc_nz_f128(s, o->in1, o->in2);
1740 return NO_EXIT;
1741 }
1742
1743 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1744 {
1745 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1746 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1747 tcg_temp_free_i32(m3);
1748 gen_set_cc_nz_f32(s, o->in2);
1749 return NO_EXIT;
1750 }
1751
1752 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1753 {
1754 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1755 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1756 tcg_temp_free_i32(m3);
1757 gen_set_cc_nz_f64(s, o->in2);
1758 return NO_EXIT;
1759 }
1760
1761 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1762 {
1763 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1764 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1765 tcg_temp_free_i32(m3);
1766 gen_set_cc_nz_f128(s, o->in1, o->in2);
1767 return NO_EXIT;
1768 }
1769
1770 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1771 {
1772 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1773 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1774 tcg_temp_free_i32(m3);
1775 gen_set_cc_nz_f32(s, o->in2);
1776 return NO_EXIT;
1777 }
1778
1779 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1780 {
1781 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1782 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1783 tcg_temp_free_i32(m3);
1784 gen_set_cc_nz_f64(s, o->in2);
1785 return NO_EXIT;
1786 }
1787
1788 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1789 {
1790 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1791 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1792 tcg_temp_free_i32(m3);
1793 gen_set_cc_nz_f128(s, o->in1, o->in2);
1794 return NO_EXIT;
1795 }
1796
1797 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1798 {
1799 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1800 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1801 tcg_temp_free_i32(m3);
1802 return NO_EXIT;
1803 }
1804
1805 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1806 {
1807 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1808 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1809 tcg_temp_free_i32(m3);
1810 return NO_EXIT;
1811 }
1812
1813 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1814 {
1815 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1816 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1817 tcg_temp_free_i32(m3);
1818 return_low128(o->out2);
1819 return NO_EXIT;
1820 }
1821
1822 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1823 {
1824 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1825 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1826 tcg_temp_free_i32(m3);
1827 return NO_EXIT;
1828 }
1829
1830 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1831 {
1832 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1833 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1834 tcg_temp_free_i32(m3);
1835 return NO_EXIT;
1836 }
1837
1838 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1839 {
1840 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1841 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1842 tcg_temp_free_i32(m3);
1843 return_low128(o->out2);
1844 return NO_EXIT;
1845 }
1846
1847 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1848 {
1849 int r2 = get_field(s->fields, r2);
1850 TCGv_i64 len = tcg_temp_new_i64();
1851
1852 potential_page_fault(s);
1853 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1854 set_cc_static(s);
1855 return_low128(o->out);
1856
1857 tcg_gen_add_i64(regs[r2], regs[r2], len);
1858 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1859 tcg_temp_free_i64(len);
1860
1861 return NO_EXIT;
1862 }
1863
1864 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1865 {
1866 int l = get_field(s->fields, l1);
1867 TCGv_i32 vl;
1868
1869 switch (l + 1) {
1870 case 1:
1871 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1872 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1873 break;
1874 case 2:
1875 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1876 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1877 break;
1878 case 4:
1879 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1880 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1881 break;
1882 case 8:
1883 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1884 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1885 break;
1886 default:
1887 potential_page_fault(s);
1888 vl = tcg_const_i32(l);
1889 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1890 tcg_temp_free_i32(vl);
1891 set_cc_static(s);
1892 return NO_EXIT;
1893 }
1894 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1895 return NO_EXIT;
1896 }
1897
1898 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1899 {
1900 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1901 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1902 potential_page_fault(s);
1903 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1904 tcg_temp_free_i32(r1);
1905 tcg_temp_free_i32(r3);
1906 set_cc_static(s);
1907 return NO_EXIT;
1908 }
1909
1910 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1911 {
1912 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1913 TCGv_i32 t1 = tcg_temp_new_i32();
1914 tcg_gen_extrl_i64_i32(t1, o->in1);
1915 potential_page_fault(s);
1916 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1917 set_cc_static(s);
1918 tcg_temp_free_i32(t1);
1919 tcg_temp_free_i32(m3);
1920 return NO_EXIT;
1921 }
1922
1923 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1924 {
1925 potential_page_fault(s);
1926 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1927 set_cc_static(s);
1928 return_low128(o->in2);
1929 return NO_EXIT;
1930 }
1931
1932 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1933 {
1934 TCGv_i64 t = tcg_temp_new_i64();
1935 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1936 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1937 tcg_gen_or_i64(o->out, o->out, t);
1938 tcg_temp_free_i64(t);
1939 return NO_EXIT;
1940 }
1941
1942 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1943 {
1944 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1945 int d2 = get_field(s->fields, d2);
1946 int b2 = get_field(s->fields, b2);
1947 int is_64 = s->insn->data;
1948 TCGv_i64 addr, mem, cc, z;
1949
1950 /* Note that in1 = R3 (new value) and
1951 in2 = (zero-extended) R1 (expected value). */
1952
1953 /* Load the memory into the (temporary) output. While the PoO only talks
1954 about moving the memory to R1 on inequality, if we include equality it
1955 means that R1 is equal to the memory in all conditions. */
1956 addr = get_address(s, 0, b2, d2);
1957 if (is_64) {
1958 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1959 } else {
1960 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1961 }
1962
1963 /* Are the memory and expected values (un)equal? Note that this setcond
1964 produces the output CC value, thus the NE sense of the test. */
1965 cc = tcg_temp_new_i64();
1966 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1967
1968 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1969 Recall that we are allowed to unconditionally issue the store (and
1970 thus any possible write trap), so (re-)store the original contents
1971 of MEM in case of inequality. */
1972 z = tcg_const_i64(0);
1973 mem = tcg_temp_new_i64();
1974 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1975 if (is_64) {
1976 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1977 } else {
1978 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1979 }
1980 tcg_temp_free_i64(z);
1981 tcg_temp_free_i64(mem);
1982 tcg_temp_free_i64(addr);
1983
1984 /* Store CC back to cc_op. Wait until after the store so that any
1985 exception gets the old cc_op value. */
1986 tcg_gen_extrl_i64_i32(cc_op, cc);
1987 tcg_temp_free_i64(cc);
1988 set_cc_static(s);
1989 return NO_EXIT;
1990 }
1991
1992 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1993 {
1994 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1995 int r1 = get_field(s->fields, r1);
1996 int r3 = get_field(s->fields, r3);
1997 int d2 = get_field(s->fields, d2);
1998 int b2 = get_field(s->fields, b2);
1999 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
2000
2001 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2002
2003 addrh = get_address(s, 0, b2, d2);
2004 addrl = get_address(s, 0, b2, d2 + 8);
2005 outh = tcg_temp_new_i64();
2006 outl = tcg_temp_new_i64();
2007
2008 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
2009 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
2010
2011 /* Fold the double-word compare with arithmetic. */
2012 cc = tcg_temp_new_i64();
2013 z = tcg_temp_new_i64();
2014 tcg_gen_xor_i64(cc, outh, regs[r1]);
2015 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
2016 tcg_gen_or_i64(cc, cc, z);
2017 tcg_gen_movi_i64(z, 0);
2018 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
2019
2020 memh = tcg_temp_new_i64();
2021 meml = tcg_temp_new_i64();
2022 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
2023 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
2024 tcg_temp_free_i64(z);
2025
2026 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
2027 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
2028 tcg_temp_free_i64(memh);
2029 tcg_temp_free_i64(meml);
2030 tcg_temp_free_i64(addrh);
2031 tcg_temp_free_i64(addrl);
2032
2033 /* Save back state now that we've passed all exceptions. */
2034 tcg_gen_mov_i64(regs[r1], outh);
2035 tcg_gen_mov_i64(regs[r1 + 1], outl);
2036 tcg_gen_extrl_i64_i32(cc_op, cc);
2037 tcg_temp_free_i64(outh);
2038 tcg_temp_free_i64(outl);
2039 tcg_temp_free_i64(cc);
2040 set_cc_static(s);
2041 return NO_EXIT;
2042 }
2043
2044 #ifndef CONFIG_USER_ONLY
2045 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2046 {
2047 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2048 check_privileged(s);
2049 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
2050 tcg_temp_free_i32(r1);
2051 set_cc_static(s);
2052 return NO_EXIT;
2053 }
2054 #endif
2055
2056 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2057 {
2058 TCGv_i64 t1 = tcg_temp_new_i64();
2059 TCGv_i32 t2 = tcg_temp_new_i32();
2060 tcg_gen_extrl_i64_i32(t2, o->in1);
2061 gen_helper_cvd(t1, t2);
2062 tcg_temp_free_i32(t2);
2063 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2064 tcg_temp_free_i64(t1);
2065 return NO_EXIT;
2066 }
2067
2068 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2069 {
2070 int m3 = get_field(s->fields, m3);
2071 TCGLabel *lab = gen_new_label();
2072 TCGCond c;
2073
2074 c = tcg_invert_cond(ltgt_cond[m3]);
2075 if (s->insn->data) {
2076 c = tcg_unsigned_cond(c);
2077 }
2078 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2079
2080 /* Trap. */
2081 gen_trap(s);
2082
2083 gen_set_label(lab);
2084 return NO_EXIT;
2085 }
2086
2087 #ifndef CONFIG_USER_ONLY
2088 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2089 {
2090 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2091 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2092 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2093
2094 check_privileged(s);
2095 update_psw_addr(s);
2096 gen_op_calc_cc(s);
2097
2098 gen_helper_diag(cpu_env, r1, r3, func_code);
2099
2100 tcg_temp_free_i32(func_code);
2101 tcg_temp_free_i32(r3);
2102 tcg_temp_free_i32(r1);
2103 return NO_EXIT;
2104 }
2105 #endif
2106
2107 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2108 {
2109 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2110 return_low128(o->out);
2111 return NO_EXIT;
2112 }
2113
2114 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2115 {
2116 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2117 return_low128(o->out);
2118 return NO_EXIT;
2119 }
2120
2121 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2122 {
2123 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2124 return_low128(o->out);
2125 return NO_EXIT;
2126 }
2127
2128 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2129 {
2130 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2131 return_low128(o->out);
2132 return NO_EXIT;
2133 }
2134
2135 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2136 {
2137 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2138 return NO_EXIT;
2139 }
2140
2141 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2142 {
2143 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2144 return NO_EXIT;
2145 }
2146
2147 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2148 {
2149 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2150 return_low128(o->out2);
2151 return NO_EXIT;
2152 }
2153
2154 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2155 {
2156 int r2 = get_field(s->fields, r2);
2157 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2158 return NO_EXIT;
2159 }
2160
2161 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2162 {
2163 /* No cache information provided. */
2164 tcg_gen_movi_i64(o->out, -1);
2165 return NO_EXIT;
2166 }
2167
2168 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2169 {
2170 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2171 return NO_EXIT;
2172 }
2173
2174 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2175 {
2176 int r1 = get_field(s->fields, r1);
2177 int r2 = get_field(s->fields, r2);
2178 TCGv_i64 t = tcg_temp_new_i64();
2179
2180 /* Note the "subsequently" in the PoO, which implies a defined result
2181 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2182 tcg_gen_shri_i64(t, psw_mask, 32);
2183 store_reg32_i64(r1, t);
2184 if (r2 != 0) {
2185 store_reg32_i64(r2, psw_mask);
2186 }
2187
2188 tcg_temp_free_i64(t);
2189 return NO_EXIT;
2190 }
2191
2192 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2193 {
2194 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2195 tb->flags, (ab)use the tb->cs_base field as the address of
2196 the template in memory, and grab 8 bits of tb->flags/cflags for
2197 the contents of the register. We would then recognize all this
2198 in gen_intermediate_code_internal, generating code for exactly
2199 one instruction. This new TB then gets executed normally.
2200
2201 On the other hand, this seems to be mostly used for modifying
2202 MVC inside of memcpy, which needs a helper call anyway. So
2203 perhaps this doesn't bear thinking about any further. */
2204
2205 TCGv_i64 tmp;
2206
2207 update_psw_addr(s);
2208 gen_op_calc_cc(s);
2209
2210 tmp = tcg_const_i64(s->next_pc);
2211 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2212 tcg_temp_free_i64(tmp);
2213
2214 return NO_EXIT;
2215 }
2216
2217 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2218 {
2219 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2220 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2221 tcg_temp_free_i32(m3);
2222 return NO_EXIT;
2223 }
2224
2225 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2226 {
2227 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2228 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2229 tcg_temp_free_i32(m3);
2230 return NO_EXIT;
2231 }
2232
2233 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2234 {
2235 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2236 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2237 return_low128(o->out2);
2238 tcg_temp_free_i32(m3);
2239 return NO_EXIT;
2240 }
2241
2242 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2243 {
2244 /* We'll use the original input for cc computation, since we get to
2245 compare that against 0, which ought to be better than comparing
2246 the real output against 64. It also lets cc_dst be a convenient
2247 temporary during our computation. */
2248 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2249
2250 /* R1 = IN ? CLZ(IN) : 64. */
2251 gen_helper_clz(o->out, o->in2);
2252
2253 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2254 value by 64, which is undefined. But since the shift is 64 iff the
2255 input is zero, we still get the correct result after and'ing. */
2256 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2257 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2258 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2259 return NO_EXIT;
2260 }
2261
2262 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2263 {
2264 int m3 = get_field(s->fields, m3);
2265 int pos, len, base = s->insn->data;
2266 TCGv_i64 tmp = tcg_temp_new_i64();
2267 uint64_t ccm;
2268
2269 switch (m3) {
2270 case 0xf:
2271 /* Effectively a 32-bit load. */
2272 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2273 len = 32;
2274 goto one_insert;
2275
2276 case 0xc:
2277 case 0x6:
2278 case 0x3:
2279 /* Effectively a 16-bit load. */
2280 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2281 len = 16;
2282 goto one_insert;
2283
2284 case 0x8:
2285 case 0x4:
2286 case 0x2:
2287 case 0x1:
2288 /* Effectively an 8-bit load. */
2289 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2290 len = 8;
2291 goto one_insert;
2292
2293 one_insert:
2294 pos = base + ctz32(m3) * 8;
2295 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2296 ccm = ((1ull << len) - 1) << pos;
2297 break;
2298
2299 default:
2300 /* This is going to be a sequence of loads and inserts. */
2301 pos = base + 32 - 8;
2302 ccm = 0;
2303 while (m3) {
2304 if (m3 & 0x8) {
2305 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2306 tcg_gen_addi_i64(o->in2, o->in2, 1);
2307 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2308 ccm |= 0xff << pos;
2309 }
2310 m3 = (m3 << 1) & 0xf;
2311 pos -= 8;
2312 }
2313 break;
2314 }
2315
2316 tcg_gen_movi_i64(tmp, ccm);
2317 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2318 tcg_temp_free_i64(tmp);
2319 return NO_EXIT;
2320 }
2321
2322 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2323 {
2324 int shift = s->insn->data & 0xff;
2325 int size = s->insn->data >> 8;
2326 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2327 return NO_EXIT;
2328 }
2329
2330 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2331 {
2332 TCGv_i64 t1;
2333
2334 gen_op_calc_cc(s);
2335 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2336
2337 t1 = tcg_temp_new_i64();
2338 tcg_gen_shli_i64(t1, psw_mask, 20);
2339 tcg_gen_shri_i64(t1, t1, 36);
2340 tcg_gen_or_i64(o->out, o->out, t1);
2341
2342 tcg_gen_extu_i32_i64(t1, cc_op);
2343 tcg_gen_shli_i64(t1, t1, 28);
2344 tcg_gen_or_i64(o->out, o->out, t1);
2345 tcg_temp_free_i64(t1);
2346 return NO_EXIT;
2347 }
2348
2349 #ifndef CONFIG_USER_ONLY
2350 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2351 {
2352 check_privileged(s);
2353 gen_helper_ipte(cpu_env, o->in1, o->in2);
2354 return NO_EXIT;
2355 }
2356
2357 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2358 {
2359 check_privileged(s);
2360 gen_helper_iske(o->out, cpu_env, o->in2);
2361 return NO_EXIT;
2362 }
2363 #endif
2364
2365 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2366 {
2367 gen_helper_ldeb(o->out, cpu_env, o->in2);
2368 return NO_EXIT;
2369 }
2370
2371 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2372 {
2373 gen_helper_ledb(o->out, cpu_env, o->in2);
2374 return NO_EXIT;
2375 }
2376
2377 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2378 {
2379 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2380 return NO_EXIT;
2381 }
2382
2383 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2384 {
2385 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2386 return NO_EXIT;
2387 }
2388
2389 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2390 {
2391 gen_helper_lxdb(o->out, cpu_env, o->in2);
2392 return_low128(o->out2);
2393 return NO_EXIT;
2394 }
2395
2396 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2397 {
2398 gen_helper_lxeb(o->out, cpu_env, o->in2);
2399 return_low128(o->out2);
2400 return NO_EXIT;
2401 }
2402
2403 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2404 {
2405 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2406 return NO_EXIT;
2407 }
2408
2409 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2410 {
2411 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2412 return NO_EXIT;
2413 }
2414
2415 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2416 {
2417 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2418 return NO_EXIT;
2419 }
2420
2421 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2422 {
2423 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2424 return NO_EXIT;
2425 }
2426
2427 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2428 {
2429 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2430 return NO_EXIT;
2431 }
2432
2433 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2434 {
2435 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2436 return NO_EXIT;
2437 }
2438
2439 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2440 {
2441 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2442 return NO_EXIT;
2443 }
2444
2445 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2446 {
2447 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2448 return NO_EXIT;
2449 }
2450
2451 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2452 {
2453 TCGLabel *lab = gen_new_label();
2454 store_reg32_i64(get_field(s->fields, r1), o->in2);
2455 /* The value is stored even in case of trap. */
2456 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2457 gen_trap(s);
2458 gen_set_label(lab);
2459 return NO_EXIT;
2460 }
2461
2462 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2463 {
2464 TCGLabel *lab = gen_new_label();
2465 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2466 /* The value is stored even in case of trap. */
2467 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2468 gen_trap(s);
2469 gen_set_label(lab);
2470 return NO_EXIT;
2471 }
2472
2473 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2474 {
2475 TCGLabel *lab = gen_new_label();
2476 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2477 /* The value is stored even in case of trap. */
2478 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2479 gen_trap(s);
2480 gen_set_label(lab);
2481 return NO_EXIT;
2482 }
2483
2484 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2485 {
2486 TCGLabel *lab = gen_new_label();
2487 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2488 /* The value is stored even in case of trap. */
2489 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2490 gen_trap(s);
2491 gen_set_label(lab);
2492 return NO_EXIT;
2493 }
2494
2495 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2496 {
2497 TCGLabel *lab = gen_new_label();
2498 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2499 /* The value is stored even in case of trap. */
2500 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2501 gen_trap(s);
2502 gen_set_label(lab);
2503 return NO_EXIT;
2504 }
2505
2506 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2507 {
2508 DisasCompare c;
2509
2510 disas_jcc(s, &c, get_field(s->fields, m3));
2511
2512 if (c.is_64) {
2513 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2514 o->in2, o->in1);
2515 free_compare(&c);
2516 } else {
2517 TCGv_i32 t32 = tcg_temp_new_i32();
2518 TCGv_i64 t, z;
2519
2520 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2521 free_compare(&c);
2522
2523 t = tcg_temp_new_i64();
2524 tcg_gen_extu_i32_i64(t, t32);
2525 tcg_temp_free_i32(t32);
2526
2527 z = tcg_const_i64(0);
2528 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2529 tcg_temp_free_i64(t);
2530 tcg_temp_free_i64(z);
2531 }
2532
2533 return NO_EXIT;
2534 }
2535
2536 #ifndef CONFIG_USER_ONLY
2537 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2538 {
2539 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2540 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2541 check_privileged(s);
2542 potential_page_fault(s);
2543 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2544 tcg_temp_free_i32(r1);
2545 tcg_temp_free_i32(r3);
2546 return NO_EXIT;
2547 }
2548
2549 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2550 {
2551 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2552 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2553 check_privileged(s);
2554 potential_page_fault(s);
2555 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2556 tcg_temp_free_i32(r1);
2557 tcg_temp_free_i32(r3);
2558 return NO_EXIT;
2559 }
2560 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2561 {
2562 check_privileged(s);
2563 potential_page_fault(s);
2564 gen_helper_lra(o->out, cpu_env, o->in2);
2565 set_cc_static(s);
2566 return NO_EXIT;
2567 }
2568
2569 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2570 {
2571 TCGv_i64 t1, t2;
2572
2573 check_privileged(s);
2574 per_breaking_event(s);
2575
2576 t1 = tcg_temp_new_i64();
2577 t2 = tcg_temp_new_i64();
2578 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2579 tcg_gen_addi_i64(o->in2, o->in2, 4);
2580 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2581 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2582 tcg_gen_shli_i64(t1, t1, 32);
2583 gen_helper_load_psw(cpu_env, t1, t2);
2584 tcg_temp_free_i64(t1);
2585 tcg_temp_free_i64(t2);
2586 return EXIT_NORETURN;
2587 }
2588
2589 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2590 {
2591 TCGv_i64 t1, t2;
2592
2593 check_privileged(s);
2594 per_breaking_event(s);
2595
2596 t1 = tcg_temp_new_i64();
2597 t2 = tcg_temp_new_i64();
2598 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2599 tcg_gen_addi_i64(o->in2, o->in2, 8);
2600 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2601 gen_helper_load_psw(cpu_env, t1, t2);
2602 tcg_temp_free_i64(t1);
2603 tcg_temp_free_i64(t2);
2604 return EXIT_NORETURN;
2605 }
2606 #endif
2607
2608 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2609 {
2610 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2611 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2612 potential_page_fault(s);
2613 gen_helper_lam(cpu_env, r1, o->in2, r3);
2614 tcg_temp_free_i32(r1);
2615 tcg_temp_free_i32(r3);
2616 return NO_EXIT;
2617 }
2618
2619 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2620 {
2621 int r1 = get_field(s->fields, r1);
2622 int r3 = get_field(s->fields, r3);
2623 TCGv_i64 t1, t2;
2624
2625 /* Only one register to read. */
2626 t1 = tcg_temp_new_i64();
2627 if (unlikely(r1 == r3)) {
2628 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2629 store_reg32_i64(r1, t1);
2630 tcg_temp_free(t1);
2631 return NO_EXIT;
2632 }
2633
2634 /* First load the values of the first and last registers to trigger
2635 possible page faults. */
2636 t2 = tcg_temp_new_i64();
2637 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2638 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2639 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2640 store_reg32_i64(r1, t1);
2641 store_reg32_i64(r3, t2);
2642
2643 /* Only two registers to read. */
2644 if (((r1 + 1) & 15) == r3) {
2645 tcg_temp_free(t2);
2646 tcg_temp_free(t1);
2647 return NO_EXIT;
2648 }
2649
2650 /* Then load the remaining registers. Page fault can't occur. */
2651 r3 = (r3 - 1) & 15;
2652 tcg_gen_movi_i64(t2, 4);
2653 while (r1 != r3) {
2654 r1 = (r1 + 1) & 15;
2655 tcg_gen_add_i64(o->in2, o->in2, t2);
2656 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2657 store_reg32_i64(r1, t1);
2658 }
2659 tcg_temp_free(t2);
2660 tcg_temp_free(t1);
2661
2662 return NO_EXIT;
2663 }
2664
2665 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2666 {
2667 int r1 = get_field(s->fields, r1);
2668 int r3 = get_field(s->fields, r3);
2669 TCGv_i64 t1, t2;
2670
2671 /* Only one register to read. */
2672 t1 = tcg_temp_new_i64();
2673 if (unlikely(r1 == r3)) {
2674 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2675 store_reg32h_i64(r1, t1);
2676 tcg_temp_free(t1);
2677 return NO_EXIT;
2678 }
2679
2680 /* First load the values of the first and last registers to trigger
2681 possible page faults. */
2682 t2 = tcg_temp_new_i64();
2683 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2684 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2685 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2686 store_reg32h_i64(r1, t1);
2687 store_reg32h_i64(r3, t2);
2688
2689 /* Only two registers to read. */
2690 if (((r1 + 1) & 15) == r3) {
2691 tcg_temp_free(t2);
2692 tcg_temp_free(t1);
2693 return NO_EXIT;
2694 }
2695
2696 /* Then load the remaining registers. Page fault can't occur. */
2697 r3 = (r3 - 1) & 15;
2698 tcg_gen_movi_i64(t2, 4);
2699 while (r1 != r3) {
2700 r1 = (r1 + 1) & 15;
2701 tcg_gen_add_i64(o->in2, o->in2, t2);
2702 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2703 store_reg32h_i64(r1, t1);
2704 }
2705 tcg_temp_free(t2);
2706 tcg_temp_free(t1);
2707
2708 return NO_EXIT;
2709 }
2710
2711 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2712 {
2713 int r1 = get_field(s->fields, r1);
2714 int r3 = get_field(s->fields, r3);
2715 TCGv_i64 t1, t2;
2716
2717 /* Only one register to read. */
2718 if (unlikely(r1 == r3)) {
2719 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2720 return NO_EXIT;
2721 }
2722
2723 /* First load the values of the first and last registers to trigger
2724 possible page faults. */
2725 t1 = tcg_temp_new_i64();
2726 t2 = tcg_temp_new_i64();
2727 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2728 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2729 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2730 tcg_gen_mov_i64(regs[r1], t1);
2731 tcg_temp_free(t2);
2732
2733 /* Only two registers to read. */
2734 if (((r1 + 1) & 15) == r3) {
2735 tcg_temp_free(t1);
2736 return NO_EXIT;
2737 }
2738
2739 /* Then load the remaining registers. Page fault can't occur. */
2740 r3 = (r3 - 1) & 15;
2741 tcg_gen_movi_i64(t1, 8);
2742 while (r1 != r3) {
2743 r1 = (r1 + 1) & 15;
2744 tcg_gen_add_i64(o->in2, o->in2, t1);
2745 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2746 }
2747 tcg_temp_free(t1);
2748
2749 return NO_EXIT;
2750 }
2751
2752 #ifndef CONFIG_USER_ONLY
2753 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2754 {
2755 check_privileged(s);
2756 potential_page_fault(s);
2757 gen_helper_lura(o->out, cpu_env, o->in2);
2758 return NO_EXIT;
2759 }
2760
2761 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2762 {
2763 check_privileged(s);
2764 potential_page_fault(s);
2765 gen_helper_lurag(o->out, cpu_env, o->in2);
2766 return NO_EXIT;
2767 }
2768 #endif
2769
2770 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2771 {
2772 o->out = o->in2;
2773 o->g_out = o->g_in2;
2774 TCGV_UNUSED_I64(o->in2);
2775 o->g_in2 = false;
2776 return NO_EXIT;
2777 }
2778
2779 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2780 {
2781 int b2 = get_field(s->fields, b2);
2782 TCGv ar1 = tcg_temp_new_i64();
2783
2784 o->out = o->in2;
2785 o->g_out = o->g_in2;
2786 TCGV_UNUSED_I64(o->in2);
2787 o->g_in2 = false;
2788
2789 switch (s->tb->flags & FLAG_MASK_ASC) {
2790 case PSW_ASC_PRIMARY >> 32:
2791 tcg_gen_movi_i64(ar1, 0);
2792 break;
2793 case PSW_ASC_ACCREG >> 32:
2794 tcg_gen_movi_i64(ar1, 1);
2795 break;
2796 case PSW_ASC_SECONDARY >> 32:
2797 if (b2) {
2798 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2799 } else {
2800 tcg_gen_movi_i64(ar1, 0);
2801 }
2802 break;
2803 case PSW_ASC_HOME >> 32:
2804 tcg_gen_movi_i64(ar1, 2);
2805 break;
2806 }
2807
2808 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2809 tcg_temp_free_i64(ar1);
2810
2811 return NO_EXIT;
2812 }
2813
2814 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2815 {
2816 o->out = o->in1;
2817 o->out2 = o->in2;
2818 o->g_out = o->g_in1;
2819 o->g_out2 = o->g_in2;
2820 TCGV_UNUSED_I64(o->in1);
2821 TCGV_UNUSED_I64(o->in2);
2822 o->g_in1 = o->g_in2 = false;
2823 return NO_EXIT;
2824 }
2825
2826 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2827 {
2828 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2829 potential_page_fault(s);
2830 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2831 tcg_temp_free_i32(l);
2832 return NO_EXIT;
2833 }
2834
2835 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2836 {
2837 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2838 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2839 potential_page_fault(s);
2840 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2841 tcg_temp_free_i32(r1);
2842 tcg_temp_free_i32(r2);
2843 set_cc_static(s);
2844 return NO_EXIT;
2845 }
2846
2847 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2848 {
2849 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2850 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2851 potential_page_fault(s);
2852 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2853 tcg_temp_free_i32(r1);
2854 tcg_temp_free_i32(r3);
2855 set_cc_static(s);
2856 return NO_EXIT;
2857 }
2858
2859 #ifndef CONFIG_USER_ONLY
2860 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2861 {
2862 int r1 = get_field(s->fields, l1);
2863 check_privileged(s);
2864 potential_page_fault(s);
2865 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2866 set_cc_static(s);
2867 return NO_EXIT;
2868 }
2869
2870 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2871 {
2872 int r1 = get_field(s->fields, l1);
2873 check_privileged(s);
2874 potential_page_fault(s);
2875 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2876 set_cc_static(s);
2877 return NO_EXIT;
2878 }
2879 #endif
2880
2881 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2882 {
2883 potential_page_fault(s);
2884 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2885 set_cc_static(s);
2886 return NO_EXIT;
2887 }
2888
2889 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2890 {
2891 potential_page_fault(s);
2892 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2893 set_cc_static(s);
2894 return_low128(o->in2);
2895 return NO_EXIT;
2896 }
2897
2898 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2899 {
2900 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2901 return NO_EXIT;
2902 }
2903
2904 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2905 {
2906 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2907 return NO_EXIT;
2908 }
2909
2910 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2911 {
2912 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2913 return NO_EXIT;
2914 }
2915
2916 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2917 {
2918 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2919 return NO_EXIT;
2920 }
2921
2922 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2923 {
2924 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2925 return NO_EXIT;
2926 }
2927
2928 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2929 {
2930 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2931 return_low128(o->out2);
2932 return NO_EXIT;
2933 }
2934
2935 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2936 {
2937 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2938 return_low128(o->out2);
2939 return NO_EXIT;
2940 }
2941
2942 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2943 {
2944 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2945 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2946 tcg_temp_free_i64(r3);
2947 return NO_EXIT;
2948 }
2949
2950 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2951 {
2952 int r3 = get_field(s->fields, r3);
2953 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2954 return NO_EXIT;
2955 }
2956
2957 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2958 {
2959 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2960 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2961 tcg_temp_free_i64(r3);
2962 return NO_EXIT;
2963 }
2964
2965 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2966 {
2967 int r3 = get_field(s->fields, r3);
2968 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2969 return NO_EXIT;
2970 }
2971
2972 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2973 {
2974 TCGv_i64 z, n;
2975 z = tcg_const_i64(0);
2976 n = tcg_temp_new_i64();
2977 tcg_gen_neg_i64(n, o->in2);
2978 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
2979 tcg_temp_free_i64(n);
2980 tcg_temp_free_i64(z);
2981 return NO_EXIT;
2982 }
2983
2984 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2985 {
2986 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2987 return NO_EXIT;
2988 }
2989
2990 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2991 {
2992 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2993 return NO_EXIT;
2994 }
2995
2996 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2997 {
2998 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2999 tcg_gen_mov_i64(o->out2, o->in2);
3000 return NO_EXIT;
3001 }
3002
3003 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3004 {
3005 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3006 potential_page_fault(s);
3007 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3008 tcg_temp_free_i32(l);
3009 set_cc_static(s);
3010 return NO_EXIT;
3011 }
3012
3013 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3014 {
3015 tcg_gen_neg_i64(o->out, o->in2);
3016 return NO_EXIT;
3017 }
3018
3019 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3020 {
3021 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3022 return NO_EXIT;
3023 }
3024
3025 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3026 {
3027 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3028 return NO_EXIT;
3029 }
3030
3031 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3032 {
3033 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3034 tcg_gen_mov_i64(o->out2, o->in2);
3035 return NO_EXIT;
3036 }
3037
3038 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3039 {
3040 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3041 potential_page_fault(s);
3042 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3043 tcg_temp_free_i32(l);
3044 set_cc_static(s);
3045 return NO_EXIT;
3046 }
3047
3048 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3049 {
3050 tcg_gen_or_i64(o->out, o->in1, o->in2);
3051 return NO_EXIT;
3052 }
3053
3054 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3055 {
3056 int shift = s->insn->data & 0xff;
3057 int size = s->insn->data >> 8;
3058 uint64_t mask = ((1ull << size) - 1) << shift;
3059
3060 assert(!o->g_in2);
3061 tcg_gen_shli_i64(o->in2, o->in2, shift);
3062 tcg_gen_or_i64(o->out, o->in1, o->in2);
3063
3064 /* Produce the CC from only the bits manipulated. */
3065 tcg_gen_andi_i64(cc_dst, o->out, mask);
3066 set_cc_nz_u64(s, cc_dst);
3067 return NO_EXIT;
3068 }
3069
3070 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3071 {
3072 gen_helper_popcnt(o->out, o->in2);
3073 return NO_EXIT;
3074 }
3075
3076 #ifndef CONFIG_USER_ONLY
3077 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3078 {
3079 check_privileged(s);
3080 gen_helper_ptlb(cpu_env);
3081 return NO_EXIT;
3082 }
3083 #endif
3084
3085 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3086 {
3087 int i3 = get_field(s->fields, i3);
3088 int i4 = get_field(s->fields, i4);
3089 int i5 = get_field(s->fields, i5);
3090 int do_zero = i4 & 0x80;
3091 uint64_t mask, imask, pmask;
3092 int pos, len, rot;
3093
3094 /* Adjust the arguments for the specific insn. */
3095 switch (s->fields->op2) {
3096 case 0x55: /* risbg */
3097 i3 &= 63;
3098 i4 &= 63;
3099 pmask = ~0;
3100 break;
3101 case 0x5d: /* risbhg */
3102 i3 &= 31;
3103 i4 &= 31;
3104 pmask = 0xffffffff00000000ull;
3105 break;
3106 case 0x51: /* risblg */
3107 i3 &= 31;
3108 i4 &= 31;
3109 pmask = 0x00000000ffffffffull;
3110 break;
3111 default:
3112 abort();
3113 }
3114
3115 /* MASK is the set of bits to be inserted from R2.
3116 Take care for I3/I4 wraparound. */
3117 mask = pmask >> i3;
3118 if (i3 <= i4) {
3119 mask ^= pmask >> i4 >> 1;
3120 } else {
3121 mask |= ~(pmask >> i4 >> 1);
3122 }
3123 mask &= pmask;
3124
3125 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3126 insns, we need to keep the other half of the register. */
3127 imask = ~mask | ~pmask;
3128 if (do_zero) {
3129 if (s->fields->op2 == 0x55) {
3130 imask = 0;
3131 } else {
3132 imask = ~pmask;
3133 }
3134 }
3135
3136 /* In some cases we can implement this with deposit, which can be more
3137 efficient on some hosts. */
3138 if (~mask == imask && i3 <= i4) {
3139 if (s->fields->op2 == 0x5d) {
3140 i3 += 32, i4 += 32;
3141 }
3142 /* Note that we rotate the bits to be inserted to the lsb, not to
3143 the position as described in the PoO. */
3144 len = i4 - i3 + 1;
3145 pos = 63 - i4;
3146 rot = (i5 - pos) & 63;
3147 } else {
3148 pos = len = -1;
3149 rot = i5 & 63;
3150 }
3151
3152 /* Rotate the input as necessary. */
3153 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3154
3155 /* Insert the selected bits into the output. */
3156 if (pos >= 0) {
3157 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3158 } else if (imask == 0) {
3159 tcg_gen_andi_i64(o->out, o->in2, mask);
3160 } else {
3161 tcg_gen_andi_i64(o->in2, o->in2, mask);
3162 tcg_gen_andi_i64(o->out, o->out, imask);
3163 tcg_gen_or_i64(o->out, o->out, o->in2);
3164 }
3165 return NO_EXIT;
3166 }
3167
3168 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3169 {
3170 int i3 = get_field(s->fields, i3);
3171 int i4 = get_field(s->fields, i4);
3172 int i5 = get_field(s->fields, i5);
3173 uint64_t mask;
3174
3175 /* If this is a test-only form, arrange to discard the result. */
3176 if (i3 & 0x80) {
3177 o->out = tcg_temp_new_i64();
3178 o->g_out = false;
3179 }
3180
3181 i3 &= 63;
3182 i4 &= 63;
3183 i5 &= 63;
3184
3185 /* MASK is the set of bits to be operated on from R2.
3186 Take care for I3/I4 wraparound. */
3187 mask = ~0ull >> i3;
3188 if (i3 <= i4) {
3189 mask ^= ~0ull >> i4 >> 1;
3190 } else {
3191 mask |= ~(~0ull >> i4 >> 1);
3192 }
3193
3194 /* Rotate the input as necessary. */
3195 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3196
3197 /* Operate. */
3198 switch (s->fields->op2) {
3199 case 0x55: /* AND */
3200 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3201 tcg_gen_and_i64(o->out, o->out, o->in2);
3202 break;
3203 case 0x56: /* OR */
3204 tcg_gen_andi_i64(o->in2, o->in2, mask);
3205 tcg_gen_or_i64(o->out, o->out, o->in2);
3206 break;
3207 case 0x57: /* XOR */
3208 tcg_gen_andi_i64(o->in2, o->in2, mask);
3209 tcg_gen_xor_i64(o->out, o->out, o->in2);
3210 break;
3211 default:
3212 abort();