s390x/tcg: Implement MULTIPLY SINGLE (MSC, MSGC, MSGRKC, MSRKC)
[qemu.git] / target / s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
47 #include "exec/log.h"
48 #include "qemu/atomic128.h"
49
50
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
55
56 /*
57 * Define a structure to hold the decoded fields. We'll store each inside
58 * an array indexed by an enum. In order to conserve memory, we'll arrange
59 * for fields that do not exist at the same time to overlap, thus the "C"
60 * for compact. For checking purposes there is an "O" for original index
61 * as well that will be applied to availability bitmaps.
62 */
63
64 enum DisasFieldIndexO {
65 FLD_O_r1,
66 FLD_O_r2,
67 FLD_O_r3,
68 FLD_O_m1,
69 FLD_O_m3,
70 FLD_O_m4,
71 FLD_O_m5,
72 FLD_O_m6,
73 FLD_O_b1,
74 FLD_O_b2,
75 FLD_O_b4,
76 FLD_O_d1,
77 FLD_O_d2,
78 FLD_O_d4,
79 FLD_O_x2,
80 FLD_O_l1,
81 FLD_O_l2,
82 FLD_O_i1,
83 FLD_O_i2,
84 FLD_O_i3,
85 FLD_O_i4,
86 FLD_O_i5,
87 FLD_O_v1,
88 FLD_O_v2,
89 FLD_O_v3,
90 FLD_O_v4,
91 };
92
93 enum DisasFieldIndexC {
94 FLD_C_r1 = 0,
95 FLD_C_m1 = 0,
96 FLD_C_b1 = 0,
97 FLD_C_i1 = 0,
98 FLD_C_v1 = 0,
99
100 FLD_C_r2 = 1,
101 FLD_C_b2 = 1,
102 FLD_C_i2 = 1,
103
104 FLD_C_r3 = 2,
105 FLD_C_m3 = 2,
106 FLD_C_i3 = 2,
107 FLD_C_v3 = 2,
108
109 FLD_C_m4 = 3,
110 FLD_C_b4 = 3,
111 FLD_C_i4 = 3,
112 FLD_C_l1 = 3,
113 FLD_C_v4 = 3,
114
115 FLD_C_i5 = 4,
116 FLD_C_d1 = 4,
117 FLD_C_m5 = 4,
118
119 FLD_C_d2 = 5,
120 FLD_C_m6 = 5,
121
122 FLD_C_d4 = 6,
123 FLD_C_x2 = 6,
124 FLD_C_l2 = 6,
125 FLD_C_v2 = 6,
126
127 NUM_C_FIELD = 7
128 };
129
130 struct DisasFields {
131 uint64_t raw_insn;
132 unsigned op:8;
133 unsigned op2:8;
134 unsigned presentC:16;
135 unsigned int presentO;
136 int c[NUM_C_FIELD];
137 };
138
139 struct DisasContext {
140 DisasContextBase base;
141 const DisasInsn *insn;
142 DisasFields fields;
143 uint64_t ex_value;
144 /*
145 * During translate_one(), pc_tmp is used to determine the instruction
146 * to be executed after base.pc_next - e.g. next sequential instruction
147 * or a branch target.
148 */
149 uint64_t pc_tmp;
150 uint32_t ilen;
151 enum cc_op cc_op;
152 bool do_debug;
153 };
154
155 /* Information carried about a condition to be evaluated. */
156 typedef struct {
157 TCGCond cond:8;
158 bool is_64;
159 bool g1;
160 bool g2;
161 union {
162 struct { TCGv_i64 a, b; } s64;
163 struct { TCGv_i32 a, b; } s32;
164 } u;
165 } DisasCompare;
166
167 #ifdef DEBUG_INLINE_BRANCHES
168 static uint64_t inline_branch_hit[CC_OP_MAX];
169 static uint64_t inline_branch_miss[CC_OP_MAX];
170 #endif
171
172 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
173 {
174 TCGv_i64 tmp;
175
176 if (s->base.tb->flags & FLAG_MASK_32) {
177 if (s->base.tb->flags & FLAG_MASK_64) {
178 tcg_gen_movi_i64(out, pc);
179 return;
180 }
181 pc |= 0x80000000;
182 }
183 assert(!(s->base.tb->flags & FLAG_MASK_64));
184 tmp = tcg_const_i64(pc);
185 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
186 tcg_temp_free_i64(tmp);
187 }
188
189 static TCGv_i64 psw_addr;
190 static TCGv_i64 psw_mask;
191 static TCGv_i64 gbea;
192
193 static TCGv_i32 cc_op;
194 static TCGv_i64 cc_src;
195 static TCGv_i64 cc_dst;
196 static TCGv_i64 cc_vr;
197
198 static char cpu_reg_names[16][4];
199 static TCGv_i64 regs[16];
200
201 void s390x_translate_init(void)
202 {
203 int i;
204
205 psw_addr = tcg_global_mem_new_i64(cpu_env,
206 offsetof(CPUS390XState, psw.addr),
207 "psw_addr");
208 psw_mask = tcg_global_mem_new_i64(cpu_env,
209 offsetof(CPUS390XState, psw.mask),
210 "psw_mask");
211 gbea = tcg_global_mem_new_i64(cpu_env,
212 offsetof(CPUS390XState, gbea),
213 "gbea");
214
215 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
216 "cc_op");
217 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
218 "cc_src");
219 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
220 "cc_dst");
221 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
222 "cc_vr");
223
224 for (i = 0; i < 16; i++) {
225 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
226 regs[i] = tcg_global_mem_new(cpu_env,
227 offsetof(CPUS390XState, regs[i]),
228 cpu_reg_names[i]);
229 }
230 }
231
232 static inline int vec_full_reg_offset(uint8_t reg)
233 {
234 g_assert(reg < 32);
235 return offsetof(CPUS390XState, vregs[reg][0]);
236 }
237
238 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
239 {
240 /* Convert element size (es) - e.g. MO_8 - to bytes */
241 const uint8_t bytes = 1 << es;
242 int offs = enr * bytes;
243
244 /*
245 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
246 * of the 16 byte vector, on both, little and big endian systems.
247 *
248 * Big Endian (target/possible host)
249 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
250 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
251 * W: [ 0][ 1] - [ 2][ 3]
252 * DW: [ 0] - [ 1]
253 *
254 * Little Endian (possible host)
255 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
256 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
257 * W: [ 1][ 0] - [ 3][ 2]
258 * DW: [ 0] - [ 1]
259 *
260 * For 16 byte elements, the two 8 byte halves will not form a host
261 * int128 if the host is little endian, since they're in the wrong order.
262 * Some operations (e.g. xor) do not care. For operations like addition,
263 * the two 8 byte elements have to be loaded separately. Let's force all
264 * 16 byte operations to handle it in a special way.
265 */
266 g_assert(es <= MO_64);
267 #ifndef HOST_WORDS_BIGENDIAN
268 offs ^= (8 - bytes);
269 #endif
270 return offs + vec_full_reg_offset(reg);
271 }
272
273 static inline int freg64_offset(uint8_t reg)
274 {
275 g_assert(reg < 16);
276 return vec_reg_offset(reg, 0, MO_64);
277 }
278
279 static inline int freg32_offset(uint8_t reg)
280 {
281 g_assert(reg < 16);
282 return vec_reg_offset(reg, 0, MO_32);
283 }
284
285 static TCGv_i64 load_reg(int reg)
286 {
287 TCGv_i64 r = tcg_temp_new_i64();
288 tcg_gen_mov_i64(r, regs[reg]);
289 return r;
290 }
291
292 static TCGv_i64 load_freg(int reg)
293 {
294 TCGv_i64 r = tcg_temp_new_i64();
295
296 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
297 return r;
298 }
299
300 static TCGv_i64 load_freg32_i64(int reg)
301 {
302 TCGv_i64 r = tcg_temp_new_i64();
303
304 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
305 return r;
306 }
307
308 static void store_reg(int reg, TCGv_i64 v)
309 {
310 tcg_gen_mov_i64(regs[reg], v);
311 }
312
313 static void store_freg(int reg, TCGv_i64 v)
314 {
315 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
316 }
317
318 static void store_reg32_i64(int reg, TCGv_i64 v)
319 {
320 /* 32 bit register writes keep the upper half */
321 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
322 }
323
324 static void store_reg32h_i64(int reg, TCGv_i64 v)
325 {
326 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
327 }
328
329 static void store_freg32_i64(int reg, TCGv_i64 v)
330 {
331 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
332 }
333
334 static void return_low128(TCGv_i64 dest)
335 {
336 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
337 }
338
339 static void update_psw_addr(DisasContext *s)
340 {
341 /* psw.addr */
342 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
343 }
344
345 static void per_branch(DisasContext *s, bool to_next)
346 {
347 #ifndef CONFIG_USER_ONLY
348 tcg_gen_movi_i64(gbea, s->base.pc_next);
349
350 if (s->base.tb->flags & FLAG_MASK_PER) {
351 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
352 gen_helper_per_branch(cpu_env, gbea, next_pc);
353 if (to_next) {
354 tcg_temp_free_i64(next_pc);
355 }
356 }
357 #endif
358 }
359
360 static void per_branch_cond(DisasContext *s, TCGCond cond,
361 TCGv_i64 arg1, TCGv_i64 arg2)
362 {
363 #ifndef CONFIG_USER_ONLY
364 if (s->base.tb->flags & FLAG_MASK_PER) {
365 TCGLabel *lab = gen_new_label();
366 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
367
368 tcg_gen_movi_i64(gbea, s->base.pc_next);
369 gen_helper_per_branch(cpu_env, gbea, psw_addr);
370
371 gen_set_label(lab);
372 } else {
373 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
374 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
375 tcg_temp_free_i64(pc);
376 }
377 #endif
378 }
379
380 static void per_breaking_event(DisasContext *s)
381 {
382 tcg_gen_movi_i64(gbea, s->base.pc_next);
383 }
384
385 static void update_cc_op(DisasContext *s)
386 {
387 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
388 tcg_gen_movi_i32(cc_op, s->cc_op);
389 }
390 }
391
392 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
393 {
394 return (uint64_t)cpu_lduw_code(env, pc);
395 }
396
397 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
398 {
399 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
400 }
401
402 static int get_mem_index(DisasContext *s)
403 {
404 #ifdef CONFIG_USER_ONLY
405 return MMU_USER_IDX;
406 #else
407 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408 return MMU_REAL_IDX;
409 }
410
411 switch (s->base.tb->flags & FLAG_MASK_ASC) {
412 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413 return MMU_PRIMARY_IDX;
414 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415 return MMU_SECONDARY_IDX;
416 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417 return MMU_HOME_IDX;
418 default:
419 tcg_abort();
420 break;
421 }
422 #endif
423 }
424
425 static void gen_exception(int excp)
426 {
427 TCGv_i32 tmp = tcg_const_i32(excp);
428 gen_helper_exception(cpu_env, tmp);
429 tcg_temp_free_i32(tmp);
430 }
431
432 static void gen_program_exception(DisasContext *s, int code)
433 {
434 TCGv_i32 tmp;
435
436 /* Remember what pgm exeption this was. */
437 tmp = tcg_const_i32(code);
438 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
439 tcg_temp_free_i32(tmp);
440
441 tmp = tcg_const_i32(s->ilen);
442 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
443 tcg_temp_free_i32(tmp);
444
445 /* update the psw */
446 update_psw_addr(s);
447
448 /* Save off cc. */
449 update_cc_op(s);
450
451 /* Trigger exception. */
452 gen_exception(EXCP_PGM);
453 }
454
455 static inline void gen_illegal_opcode(DisasContext *s)
456 {
457 gen_program_exception(s, PGM_OPERATION);
458 }
459
460 static inline void gen_data_exception(uint8_t dxc)
461 {
462 TCGv_i32 tmp = tcg_const_i32(dxc);
463 gen_helper_data_exception(cpu_env, tmp);
464 tcg_temp_free_i32(tmp);
465 }
466
467 static inline void gen_trap(DisasContext *s)
468 {
469 /* Set DXC to 0xff */
470 gen_data_exception(0xff);
471 }
472
473 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
474 int64_t imm)
475 {
476 tcg_gen_addi_i64(dst, src, imm);
477 if (!(s->base.tb->flags & FLAG_MASK_64)) {
478 if (s->base.tb->flags & FLAG_MASK_32) {
479 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
480 } else {
481 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
482 }
483 }
484 }
485
486 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
487 {
488 TCGv_i64 tmp = tcg_temp_new_i64();
489
490 /*
491 * Note that d2 is limited to 20 bits, signed. If we crop negative
492 * displacements early we create larger immedate addends.
493 */
494 if (b2 && x2) {
495 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
496 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
497 } else if (b2) {
498 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
499 } else if (x2) {
500 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
501 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
502 if (s->base.tb->flags & FLAG_MASK_32) {
503 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
504 } else {
505 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
506 }
507 } else {
508 tcg_gen_movi_i64(tmp, d2);
509 }
510
511 return tmp;
512 }
513
514 static inline bool live_cc_data(DisasContext *s)
515 {
516 return (s->cc_op != CC_OP_DYNAMIC
517 && s->cc_op != CC_OP_STATIC
518 && s->cc_op > 3);
519 }
520
521 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
522 {
523 if (live_cc_data(s)) {
524 tcg_gen_discard_i64(cc_src);
525 tcg_gen_discard_i64(cc_dst);
526 tcg_gen_discard_i64(cc_vr);
527 }
528 s->cc_op = CC_OP_CONST0 + val;
529 }
530
531 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
532 {
533 if (live_cc_data(s)) {
534 tcg_gen_discard_i64(cc_src);
535 tcg_gen_discard_i64(cc_vr);
536 }
537 tcg_gen_mov_i64(cc_dst, dst);
538 s->cc_op = op;
539 }
540
541 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
542 TCGv_i64 dst)
543 {
544 if (live_cc_data(s)) {
545 tcg_gen_discard_i64(cc_vr);
546 }
547 tcg_gen_mov_i64(cc_src, src);
548 tcg_gen_mov_i64(cc_dst, dst);
549 s->cc_op = op;
550 }
551
552 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
553 TCGv_i64 dst, TCGv_i64 vr)
554 {
555 tcg_gen_mov_i64(cc_src, src);
556 tcg_gen_mov_i64(cc_dst, dst);
557 tcg_gen_mov_i64(cc_vr, vr);
558 s->cc_op = op;
559 }
560
561 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
562 {
563 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
564 }
565
566 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
567 {
568 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
569 }
570
571 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
572 {
573 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
574 }
575
576 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
577 {
578 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
579 }
580
581 /* CC value is in env->cc_op */
582 static void set_cc_static(DisasContext *s)
583 {
584 if (live_cc_data(s)) {
585 tcg_gen_discard_i64(cc_src);
586 tcg_gen_discard_i64(cc_dst);
587 tcg_gen_discard_i64(cc_vr);
588 }
589 s->cc_op = CC_OP_STATIC;
590 }
591
592 /* calculates cc into cc_op */
593 static void gen_op_calc_cc(DisasContext *s)
594 {
595 TCGv_i32 local_cc_op = NULL;
596 TCGv_i64 dummy = NULL;
597
598 switch (s->cc_op) {
599 default:
600 dummy = tcg_const_i64(0);
601 /* FALLTHRU */
602 case CC_OP_ADD_64:
603 case CC_OP_ADDU_64:
604 case CC_OP_ADDC_64:
605 case CC_OP_SUB_64:
606 case CC_OP_SUBU_64:
607 case CC_OP_SUBB_64:
608 case CC_OP_ADD_32:
609 case CC_OP_ADDU_32:
610 case CC_OP_ADDC_32:
611 case CC_OP_SUB_32:
612 case CC_OP_SUBU_32:
613 case CC_OP_SUBB_32:
614 local_cc_op = tcg_const_i32(s->cc_op);
615 break;
616 case CC_OP_CONST0:
617 case CC_OP_CONST1:
618 case CC_OP_CONST2:
619 case CC_OP_CONST3:
620 case CC_OP_STATIC:
621 case CC_OP_DYNAMIC:
622 break;
623 }
624
625 switch (s->cc_op) {
626 case CC_OP_CONST0:
627 case CC_OP_CONST1:
628 case CC_OP_CONST2:
629 case CC_OP_CONST3:
630 /* s->cc_op is the cc value */
631 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
632 break;
633 case CC_OP_STATIC:
634 /* env->cc_op already is the cc value */
635 break;
636 case CC_OP_NZ:
637 case CC_OP_ABS_64:
638 case CC_OP_NABS_64:
639 case CC_OP_ABS_32:
640 case CC_OP_NABS_32:
641 case CC_OP_LTGT0_32:
642 case CC_OP_LTGT0_64:
643 case CC_OP_COMP_32:
644 case CC_OP_COMP_64:
645 case CC_OP_NZ_F32:
646 case CC_OP_NZ_F64:
647 case CC_OP_FLOGR:
648 case CC_OP_LCBB:
649 case CC_OP_MULS_32:
650 /* 1 argument */
651 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
652 break;
653 case CC_OP_ICM:
654 case CC_OP_LTGT_32:
655 case CC_OP_LTGT_64:
656 case CC_OP_LTUGTU_32:
657 case CC_OP_LTUGTU_64:
658 case CC_OP_TM_32:
659 case CC_OP_TM_64:
660 case CC_OP_SLA_32:
661 case CC_OP_SLA_64:
662 case CC_OP_NZ_F128:
663 case CC_OP_VC:
664 case CC_OP_MULS_64:
665 /* 2 arguments */
666 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
667 break;
668 case CC_OP_ADD_64:
669 case CC_OP_ADDU_64:
670 case CC_OP_ADDC_64:
671 case CC_OP_SUB_64:
672 case CC_OP_SUBU_64:
673 case CC_OP_SUBB_64:
674 case CC_OP_ADD_32:
675 case CC_OP_ADDU_32:
676 case CC_OP_ADDC_32:
677 case CC_OP_SUB_32:
678 case CC_OP_SUBU_32:
679 case CC_OP_SUBB_32:
680 /* 3 arguments */
681 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
682 break;
683 case CC_OP_DYNAMIC:
684 /* unknown operation - assume 3 arguments and cc_op in env */
685 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
686 break;
687 default:
688 tcg_abort();
689 }
690
691 if (local_cc_op) {
692 tcg_temp_free_i32(local_cc_op);
693 }
694 if (dummy) {
695 tcg_temp_free_i64(dummy);
696 }
697
698 /* We now have cc in cc_op as constant */
699 set_cc_static(s);
700 }
701
702 static bool use_exit_tb(DisasContext *s)
703 {
704 return s->base.singlestep_enabled ||
705 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
706 (s->base.tb->flags & FLAG_MASK_PER);
707 }
708
709 static bool use_goto_tb(DisasContext *s, uint64_t dest)
710 {
711 if (unlikely(use_exit_tb(s))) {
712 return false;
713 }
714 #ifndef CONFIG_USER_ONLY
715 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
716 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
717 #else
718 return true;
719 #endif
720 }
721
722 static void account_noninline_branch(DisasContext *s, int cc_op)
723 {
724 #ifdef DEBUG_INLINE_BRANCHES
725 inline_branch_miss[cc_op]++;
726 #endif
727 }
728
729 static void account_inline_branch(DisasContext *s, int cc_op)
730 {
731 #ifdef DEBUG_INLINE_BRANCHES
732 inline_branch_hit[cc_op]++;
733 #endif
734 }
735
736 /* Table of mask values to comparison codes, given a comparison as input.
737 For such, CC=3 should not be possible. */
738 static const TCGCond ltgt_cond[16] = {
739 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
740 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
741 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
742 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
743 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
744 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
745 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
746 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
747 };
748
749 /* Table of mask values to comparison codes, given a logic op as input.
750 For such, only CC=0 and CC=1 should be possible. */
751 static const TCGCond nz_cond[16] = {
752 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
753 TCG_COND_NEVER, TCG_COND_NEVER,
754 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
755 TCG_COND_NE, TCG_COND_NE,
756 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
757 TCG_COND_EQ, TCG_COND_EQ,
758 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
759 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
760 };
761
762 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
763 details required to generate a TCG comparison. */
764 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
765 {
766 TCGCond cond;
767 enum cc_op old_cc_op = s->cc_op;
768
769 if (mask == 15 || mask == 0) {
770 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
771 c->u.s32.a = cc_op;
772 c->u.s32.b = cc_op;
773 c->g1 = c->g2 = true;
774 c->is_64 = false;
775 return;
776 }
777
778 /* Find the TCG condition for the mask + cc op. */
779 switch (old_cc_op) {
780 case CC_OP_LTGT0_32:
781 case CC_OP_LTGT0_64:
782 case CC_OP_LTGT_32:
783 case CC_OP_LTGT_64:
784 cond = ltgt_cond[mask];
785 if (cond == TCG_COND_NEVER) {
786 goto do_dynamic;
787 }
788 account_inline_branch(s, old_cc_op);
789 break;
790
791 case CC_OP_LTUGTU_32:
792 case CC_OP_LTUGTU_64:
793 cond = tcg_unsigned_cond(ltgt_cond[mask]);
794 if (cond == TCG_COND_NEVER) {
795 goto do_dynamic;
796 }
797 account_inline_branch(s, old_cc_op);
798 break;
799
800 case CC_OP_NZ:
801 cond = nz_cond[mask];
802 if (cond == TCG_COND_NEVER) {
803 goto do_dynamic;
804 }
805 account_inline_branch(s, old_cc_op);
806 break;
807
808 case CC_OP_TM_32:
809 case CC_OP_TM_64:
810 switch (mask) {
811 case 8:
812 cond = TCG_COND_EQ;
813 break;
814 case 4 | 2 | 1:
815 cond = TCG_COND_NE;
816 break;
817 default:
818 goto do_dynamic;
819 }
820 account_inline_branch(s, old_cc_op);
821 break;
822
823 case CC_OP_ICM:
824 switch (mask) {
825 case 8:
826 cond = TCG_COND_EQ;
827 break;
828 case 4 | 2 | 1:
829 case 4 | 2:
830 cond = TCG_COND_NE;
831 break;
832 default:
833 goto do_dynamic;
834 }
835 account_inline_branch(s, old_cc_op);
836 break;
837
838 case CC_OP_FLOGR:
839 switch (mask & 0xa) {
840 case 8: /* src == 0 -> no one bit found */
841 cond = TCG_COND_EQ;
842 break;
843 case 2: /* src != 0 -> one bit found */
844 cond = TCG_COND_NE;
845 break;
846 default:
847 goto do_dynamic;
848 }
849 account_inline_branch(s, old_cc_op);
850 break;
851
852 case CC_OP_ADDU_32:
853 case CC_OP_ADDU_64:
854 switch (mask) {
855 case 8 | 2: /* vr == 0 */
856 cond = TCG_COND_EQ;
857 break;
858 case 4 | 1: /* vr != 0 */
859 cond = TCG_COND_NE;
860 break;
861 case 8 | 4: /* no carry -> vr >= src */
862 cond = TCG_COND_GEU;
863 break;
864 case 2 | 1: /* carry -> vr < src */
865 cond = TCG_COND_LTU;
866 break;
867 default:
868 goto do_dynamic;
869 }
870 account_inline_branch(s, old_cc_op);
871 break;
872
873 case CC_OP_SUBU_32:
874 case CC_OP_SUBU_64:
875 /* Note that CC=0 is impossible; treat it as dont-care. */
876 switch (mask & 7) {
877 case 2: /* zero -> op1 == op2 */
878 cond = TCG_COND_EQ;
879 break;
880 case 4 | 1: /* !zero -> op1 != op2 */
881 cond = TCG_COND_NE;
882 break;
883 case 4: /* borrow (!carry) -> op1 < op2 */
884 cond = TCG_COND_LTU;
885 break;
886 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
887 cond = TCG_COND_GEU;
888 break;
889 default:
890 goto do_dynamic;
891 }
892 account_inline_branch(s, old_cc_op);
893 break;
894
895 default:
896 do_dynamic:
897 /* Calculate cc value. */
898 gen_op_calc_cc(s);
899 /* FALLTHRU */
900
901 case CC_OP_STATIC:
902 /* Jump based on CC. We'll load up the real cond below;
903 the assignment here merely avoids a compiler warning. */
904 account_noninline_branch(s, old_cc_op);
905 old_cc_op = CC_OP_STATIC;
906 cond = TCG_COND_NEVER;
907 break;
908 }
909
910 /* Load up the arguments of the comparison. */
911 c->is_64 = true;
912 c->g1 = c->g2 = false;
913 switch (old_cc_op) {
914 case CC_OP_LTGT0_32:
915 c->is_64 = false;
916 c->u.s32.a = tcg_temp_new_i32();
917 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
918 c->u.s32.b = tcg_const_i32(0);
919 break;
920 case CC_OP_LTGT_32:
921 case CC_OP_LTUGTU_32:
922 case CC_OP_SUBU_32:
923 c->is_64 = false;
924 c->u.s32.a = tcg_temp_new_i32();
925 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
926 c->u.s32.b = tcg_temp_new_i32();
927 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
928 break;
929
930 case CC_OP_LTGT0_64:
931 case CC_OP_NZ:
932 case CC_OP_FLOGR:
933 c->u.s64.a = cc_dst;
934 c->u.s64.b = tcg_const_i64(0);
935 c->g1 = true;
936 break;
937 case CC_OP_LTGT_64:
938 case CC_OP_LTUGTU_64:
939 case CC_OP_SUBU_64:
940 c->u.s64.a = cc_src;
941 c->u.s64.b = cc_dst;
942 c->g1 = c->g2 = true;
943 break;
944
945 case CC_OP_TM_32:
946 case CC_OP_TM_64:
947 case CC_OP_ICM:
948 c->u.s64.a = tcg_temp_new_i64();
949 c->u.s64.b = tcg_const_i64(0);
950 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
951 break;
952
953 case CC_OP_ADDU_32:
954 c->is_64 = false;
955 c->u.s32.a = tcg_temp_new_i32();
956 c->u.s32.b = tcg_temp_new_i32();
957 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
958 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
959 tcg_gen_movi_i32(c->u.s32.b, 0);
960 } else {
961 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
962 }
963 break;
964
965 case CC_OP_ADDU_64:
966 c->u.s64.a = cc_vr;
967 c->g1 = true;
968 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
969 c->u.s64.b = tcg_const_i64(0);
970 } else {
971 c->u.s64.b = cc_src;
972 c->g2 = true;
973 }
974 break;
975
976 case CC_OP_STATIC:
977 c->is_64 = false;
978 c->u.s32.a = cc_op;
979 c->g1 = true;
980 switch (mask) {
981 case 0x8 | 0x4 | 0x2: /* cc != 3 */
982 cond = TCG_COND_NE;
983 c->u.s32.b = tcg_const_i32(3);
984 break;
985 case 0x8 | 0x4 | 0x1: /* cc != 2 */
986 cond = TCG_COND_NE;
987 c->u.s32.b = tcg_const_i32(2);
988 break;
989 case 0x8 | 0x2 | 0x1: /* cc != 1 */
990 cond = TCG_COND_NE;
991 c->u.s32.b = tcg_const_i32(1);
992 break;
993 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
994 cond = TCG_COND_EQ;
995 c->g1 = false;
996 c->u.s32.a = tcg_temp_new_i32();
997 c->u.s32.b = tcg_const_i32(0);
998 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
999 break;
1000 case 0x8 | 0x4: /* cc < 2 */
1001 cond = TCG_COND_LTU;
1002 c->u.s32.b = tcg_const_i32(2);
1003 break;
1004 case 0x8: /* cc == 0 */
1005 cond = TCG_COND_EQ;
1006 c->u.s32.b = tcg_const_i32(0);
1007 break;
1008 case 0x4 | 0x2 | 0x1: /* cc != 0 */
1009 cond = TCG_COND_NE;
1010 c->u.s32.b = tcg_const_i32(0);
1011 break;
1012 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
1013 cond = TCG_COND_NE;
1014 c->g1 = false;
1015 c->u.s32.a = tcg_temp_new_i32();
1016 c->u.s32.b = tcg_const_i32(0);
1017 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
1018 break;
1019 case 0x4: /* cc == 1 */
1020 cond = TCG_COND_EQ;
1021 c->u.s32.b = tcg_const_i32(1);
1022 break;
1023 case 0x2 | 0x1: /* cc > 1 */
1024 cond = TCG_COND_GTU;
1025 c->u.s32.b = tcg_const_i32(1);
1026 break;
1027 case 0x2: /* cc == 2 */
1028 cond = TCG_COND_EQ;
1029 c->u.s32.b = tcg_const_i32(2);
1030 break;
1031 case 0x1: /* cc == 3 */
1032 cond = TCG_COND_EQ;
1033 c->u.s32.b = tcg_const_i32(3);
1034 break;
1035 default:
1036 /* CC is masked by something else: (8 >> cc) & mask. */
1037 cond = TCG_COND_NE;
1038 c->g1 = false;
1039 c->u.s32.a = tcg_const_i32(8);
1040 c->u.s32.b = tcg_const_i32(0);
1041 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
1042 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
1043 break;
1044 }
1045 break;
1046
1047 default:
1048 abort();
1049 }
1050 c->cond = cond;
1051 }
1052
1053 static void free_compare(DisasCompare *c)
1054 {
1055 if (!c->g1) {
1056 if (c->is_64) {
1057 tcg_temp_free_i64(c->u.s64.a);
1058 } else {
1059 tcg_temp_free_i32(c->u.s32.a);
1060 }
1061 }
1062 if (!c->g2) {
1063 if (c->is_64) {
1064 tcg_temp_free_i64(c->u.s64.b);
1065 } else {
1066 tcg_temp_free_i32(c->u.s32.b);
1067 }
1068 }
1069 }
1070
1071 /* ====================================================================== */
1072 /* Define the insn format enumeration. */
1073 #define F0(N) FMT_##N,
1074 #define F1(N, X1) F0(N)
1075 #define F2(N, X1, X2) F0(N)
1076 #define F3(N, X1, X2, X3) F0(N)
1077 #define F4(N, X1, X2, X3, X4) F0(N)
1078 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1079 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1080
1081 typedef enum {
1082 #include "insn-format.def"
1083 } DisasFormat;
1084
1085 #undef F0
1086 #undef F1
1087 #undef F2
1088 #undef F3
1089 #undef F4
1090 #undef F5
1091 #undef F6
1092
1093 /* This is the way fields are to be accessed out of DisasFields. */
1094 #define have_field(S, F) have_field1((S), FLD_O_##F)
1095 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1096
1097 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1098 {
1099 return (s->fields.presentO >> c) & 1;
1100 }
1101
1102 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1103 enum DisasFieldIndexC c)
1104 {
1105 assert(have_field1(s, o));
1106 return s->fields.c[c];
1107 }
1108
1109 /* Describe the layout of each field in each format. */
1110 typedef struct DisasField {
1111 unsigned int beg:8;
1112 unsigned int size:8;
1113 unsigned int type:2;
1114 unsigned int indexC:6;
1115 enum DisasFieldIndexO indexO:8;
1116 } DisasField;
1117
1118 typedef struct DisasFormatInfo {
1119 DisasField op[NUM_C_FIELD];
1120 } DisasFormatInfo;
1121
1122 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1123 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1124 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1125 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1126 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1127 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1128 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1129 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1130 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1131 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1132 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1133 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1134 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1135 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1136 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1137
1138 #define F0(N) { { } },
1139 #define F1(N, X1) { { X1 } },
1140 #define F2(N, X1, X2) { { X1, X2 } },
1141 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1142 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1143 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1144 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1145
1146 static const DisasFormatInfo format_info[] = {
1147 #include "insn-format.def"
1148 };
1149
1150 #undef F0
1151 #undef F1
1152 #undef F2
1153 #undef F3
1154 #undef F4
1155 #undef F5
1156 #undef F6
1157 #undef R
1158 #undef M
1159 #undef V
1160 #undef BD
1161 #undef BXD
1162 #undef BDL
1163 #undef BXDL
1164 #undef I
1165 #undef L
1166
1167 /* Generally, we'll extract operands into this structures, operate upon
1168 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1169 of routines below for more details. */
1170 typedef struct {
1171 bool g_out, g_out2, g_in1, g_in2;
1172 TCGv_i64 out, out2, in1, in2;
1173 TCGv_i64 addr1;
1174 } DisasOps;
1175
1176 /* Instructions can place constraints on their operands, raising specification
1177 exceptions if they are violated. To make this easy to automate, each "in1",
1178 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1179 of the following, or 0. To make this easy to document, we'll put the
1180 SPEC_<name> defines next to <name>. */
1181
1182 #define SPEC_r1_even 1
1183 #define SPEC_r2_even 2
1184 #define SPEC_r3_even 4
1185 #define SPEC_r1_f128 8
1186 #define SPEC_r2_f128 16
1187
1188 /* Return values from translate_one, indicating the state of the TB. */
1189
1190 /* We are not using a goto_tb (for whatever reason), but have updated
1191 the PC (for whatever reason), so there's no need to do it again on
1192 exiting the TB. */
1193 #define DISAS_PC_UPDATED DISAS_TARGET_0
1194
1195 /* We have emitted one or more goto_tb. No fixup required. */
1196 #define DISAS_GOTO_TB DISAS_TARGET_1
1197
1198 /* We have updated the PC and CC values. */
1199 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1200
1201 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1202 updated the PC for the next instruction to be executed. */
1203 #define DISAS_PC_STALE DISAS_TARGET_3
1204
1205 /* We are exiting the TB to the main loop. */
1206 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1207
1208
1209 /* Instruction flags */
1210 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1211 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1212 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1213 #define IF_BFP 0x0008 /* binary floating point instruction */
1214 #define IF_DFP 0x0010 /* decimal floating point instruction */
1215 #define IF_PRIV 0x0020 /* privileged instruction */
1216 #define IF_VEC 0x0040 /* vector instruction */
1217
1218 struct DisasInsn {
1219 unsigned opc:16;
1220 unsigned flags:16;
1221 DisasFormat fmt:8;
1222 unsigned fac:8;
1223 unsigned spec:8;
1224
1225 const char *name;
1226
1227 /* Pre-process arguments before HELP_OP. */
1228 void (*help_in1)(DisasContext *, DisasOps *);
1229 void (*help_in2)(DisasContext *, DisasOps *);
1230 void (*help_prep)(DisasContext *, DisasOps *);
1231
1232 /*
1233 * Post-process output after HELP_OP.
1234 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1235 */
1236 void (*help_wout)(DisasContext *, DisasOps *);
1237 void (*help_cout)(DisasContext *, DisasOps *);
1238
1239 /* Implement the operation itself. */
1240 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1241
1242 uint64_t data;
1243 };
1244
1245 /* ====================================================================== */
1246 /* Miscellaneous helpers, used by several operations. */
1247
1248 static void help_l2_shift(DisasContext *s, DisasOps *o, int mask)
1249 {
1250 int b2 = get_field(s, b2);
1251 int d2 = get_field(s, d2);
1252
1253 if (b2 == 0) {
1254 o->in2 = tcg_const_i64(d2 & mask);
1255 } else {
1256 o->in2 = get_address(s, 0, b2, d2);
1257 tcg_gen_andi_i64(o->in2, o->in2, mask);
1258 }
1259 }
1260
1261 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1262 {
1263 if (dest == s->pc_tmp) {
1264 per_branch(s, true);
1265 return DISAS_NEXT;
1266 }
1267 if (use_goto_tb(s, dest)) {
1268 update_cc_op(s);
1269 per_breaking_event(s);
1270 tcg_gen_goto_tb(0);
1271 tcg_gen_movi_i64(psw_addr, dest);
1272 tcg_gen_exit_tb(s->base.tb, 0);
1273 return DISAS_GOTO_TB;
1274 } else {
1275 tcg_gen_movi_i64(psw_addr, dest);
1276 per_branch(s, false);
1277 return DISAS_PC_UPDATED;
1278 }
1279 }
1280
1281 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1282 bool is_imm, int imm, TCGv_i64 cdest)
1283 {
1284 DisasJumpType ret;
1285 uint64_t dest = s->base.pc_next + 2 * imm;
1286 TCGLabel *lab;
1287
1288 /* Take care of the special cases first. */
1289 if (c->cond == TCG_COND_NEVER) {
1290 ret = DISAS_NEXT;
1291 goto egress;
1292 }
1293 if (is_imm) {
1294 if (dest == s->pc_tmp) {
1295 /* Branch to next. */
1296 per_branch(s, true);
1297 ret = DISAS_NEXT;
1298 goto egress;
1299 }
1300 if (c->cond == TCG_COND_ALWAYS) {
1301 ret = help_goto_direct(s, dest);
1302 goto egress;
1303 }
1304 } else {
1305 if (!cdest) {
1306 /* E.g. bcr %r0 -> no branch. */
1307 ret = DISAS_NEXT;
1308 goto egress;
1309 }
1310 if (c->cond == TCG_COND_ALWAYS) {
1311 tcg_gen_mov_i64(psw_addr, cdest);
1312 per_branch(s, false);
1313 ret = DISAS_PC_UPDATED;
1314 goto egress;
1315 }
1316 }
1317
1318 if (use_goto_tb(s, s->pc_tmp)) {
1319 if (is_imm && use_goto_tb(s, dest)) {
1320 /* Both exits can use goto_tb. */
1321 update_cc_op(s);
1322
1323 lab = gen_new_label();
1324 if (c->is_64) {
1325 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1326 } else {
1327 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1328 }
1329
1330 /* Branch not taken. */
1331 tcg_gen_goto_tb(0);
1332 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1333 tcg_gen_exit_tb(s->base.tb, 0);
1334
1335 /* Branch taken. */
1336 gen_set_label(lab);
1337 per_breaking_event(s);
1338 tcg_gen_goto_tb(1);
1339 tcg_gen_movi_i64(psw_addr, dest);
1340 tcg_gen_exit_tb(s->base.tb, 1);
1341
1342 ret = DISAS_GOTO_TB;
1343 } else {
1344 /* Fallthru can use goto_tb, but taken branch cannot. */
1345 /* Store taken branch destination before the brcond. This
1346 avoids having to allocate a new local temp to hold it.
1347 We'll overwrite this in the not taken case anyway. */
1348 if (!is_imm) {
1349 tcg_gen_mov_i64(psw_addr, cdest);
1350 }
1351
1352 lab = gen_new_label();
1353 if (c->is_64) {
1354 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1355 } else {
1356 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1357 }
1358
1359 /* Branch not taken. */
1360 update_cc_op(s);
1361 tcg_gen_goto_tb(0);
1362 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1363 tcg_gen_exit_tb(s->base.tb, 0);
1364
1365 gen_set_label(lab);
1366 if (is_imm) {
1367 tcg_gen_movi_i64(psw_addr, dest);
1368 }
1369 per_breaking_event(s);
1370 ret = DISAS_PC_UPDATED;
1371 }
1372 } else {
1373 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1374 Most commonly we're single-stepping or some other condition that
1375 disables all use of goto_tb. Just update the PC and exit. */
1376
1377 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1378 if (is_imm) {
1379 cdest = tcg_const_i64(dest);
1380 }
1381
1382 if (c->is_64) {
1383 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1384 cdest, next);
1385 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1386 } else {
1387 TCGv_i32 t0 = tcg_temp_new_i32();
1388 TCGv_i64 t1 = tcg_temp_new_i64();
1389 TCGv_i64 z = tcg_const_i64(0);
1390 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1391 tcg_gen_extu_i32_i64(t1, t0);
1392 tcg_temp_free_i32(t0);
1393 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1394 per_branch_cond(s, TCG_COND_NE, t1, z);
1395 tcg_temp_free_i64(t1);
1396 tcg_temp_free_i64(z);
1397 }
1398
1399 if (is_imm) {
1400 tcg_temp_free_i64(cdest);
1401 }
1402 tcg_temp_free_i64(next);
1403
1404 ret = DISAS_PC_UPDATED;
1405 }
1406
1407 egress:
1408 free_compare(c);
1409 return ret;
1410 }
1411
1412 /* ====================================================================== */
1413 /* The operations. These perform the bulk of the work for any insn,
1414 usually after the operands have been loaded and output initialized. */
1415
1416 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1417 {
1418 tcg_gen_abs_i64(o->out, o->in2);
1419 return DISAS_NEXT;
1420 }
1421
1422 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1423 {
1424 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1425 return DISAS_NEXT;
1426 }
1427
1428 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1429 {
1430 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1431 return DISAS_NEXT;
1432 }
1433
1434 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1435 {
1436 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1437 tcg_gen_mov_i64(o->out2, o->in2);
1438 return DISAS_NEXT;
1439 }
1440
1441 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1442 {
1443 tcg_gen_add_i64(o->out, o->in1, o->in2);
1444 return DISAS_NEXT;
1445 }
1446
1447 static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1448 {
1449 DisasCompare cmp;
1450 TCGv_i64 carry;
1451
1452 tcg_gen_add_i64(o->out, o->in1, o->in2);
1453
1454 /* The carry flag is the msb of CC, therefore the branch mask that would
1455 create that comparison is 3. Feeding the generated comparison to
1456 setcond produces the carry flag that we desire. */
1457 disas_jcc(s, &cmp, 3);
1458 carry = tcg_temp_new_i64();
1459 if (cmp.is_64) {
1460 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1461 } else {
1462 TCGv_i32 t = tcg_temp_new_i32();
1463 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1464 tcg_gen_extu_i32_i64(carry, t);
1465 tcg_temp_free_i32(t);
1466 }
1467 free_compare(&cmp);
1468
1469 tcg_gen_add_i64(o->out, o->out, carry);
1470 tcg_temp_free_i64(carry);
1471 return DISAS_NEXT;
1472 }
1473
1474 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1475 {
1476 o->in1 = tcg_temp_new_i64();
1477
1478 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1479 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1480 } else {
1481 /* Perform the atomic addition in memory. */
1482 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1483 s->insn->data);
1484 }
1485
1486 /* Recompute also for atomic case: needed for setting CC. */
1487 tcg_gen_add_i64(o->out, o->in1, o->in2);
1488
1489 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1490 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1491 }
1492 return DISAS_NEXT;
1493 }
1494
1495 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1496 {
1497 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1498 return DISAS_NEXT;
1499 }
1500
1501 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1502 {
1503 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1504 return DISAS_NEXT;
1505 }
1506
1507 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1508 {
1509 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1510 return_low128(o->out2);
1511 return DISAS_NEXT;
1512 }
1513
1514 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1515 {
1516 tcg_gen_and_i64(o->out, o->in1, o->in2);
1517 return DISAS_NEXT;
1518 }
1519
1520 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1521 {
1522 int shift = s->insn->data & 0xff;
1523 int size = s->insn->data >> 8;
1524 uint64_t mask = ((1ull << size) - 1) << shift;
1525
1526 assert(!o->g_in2);
1527 tcg_gen_shli_i64(o->in2, o->in2, shift);
1528 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1529 tcg_gen_and_i64(o->out, o->in1, o->in2);
1530
1531 /* Produce the CC from only the bits manipulated. */
1532 tcg_gen_andi_i64(cc_dst, o->out, mask);
1533 set_cc_nz_u64(s, cc_dst);
1534 return DISAS_NEXT;
1535 }
1536
1537 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1538 {
1539 o->in1 = tcg_temp_new_i64();
1540
1541 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1542 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1543 } else {
1544 /* Perform the atomic operation in memory. */
1545 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1546 s->insn->data);
1547 }
1548
1549 /* Recompute also for atomic case: needed for setting CC. */
1550 tcg_gen_and_i64(o->out, o->in1, o->in2);
1551
1552 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1553 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1554 }
1555 return DISAS_NEXT;
1556 }
1557
1558 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1559 {
1560 pc_to_link_info(o->out, s, s->pc_tmp);
1561 if (o->in2) {
1562 tcg_gen_mov_i64(psw_addr, o->in2);
1563 per_branch(s, false);
1564 return DISAS_PC_UPDATED;
1565 } else {
1566 return DISAS_NEXT;
1567 }
1568 }
1569
1570 static void save_link_info(DisasContext *s, DisasOps *o)
1571 {
1572 TCGv_i64 t;
1573
1574 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1575 pc_to_link_info(o->out, s, s->pc_tmp);
1576 return;
1577 }
1578 gen_op_calc_cc(s);
1579 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1580 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1581 t = tcg_temp_new_i64();
1582 tcg_gen_shri_i64(t, psw_mask, 16);
1583 tcg_gen_andi_i64(t, t, 0x0f000000);
1584 tcg_gen_or_i64(o->out, o->out, t);
1585 tcg_gen_extu_i32_i64(t, cc_op);
1586 tcg_gen_shli_i64(t, t, 28);
1587 tcg_gen_or_i64(o->out, o->out, t);
1588 tcg_temp_free_i64(t);
1589 }
1590
1591 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1592 {
1593 save_link_info(s, o);
1594 if (o->in2) {
1595 tcg_gen_mov_i64(psw_addr, o->in2);
1596 per_branch(s, false);
1597 return DISAS_PC_UPDATED;
1598 } else {
1599 return DISAS_NEXT;
1600 }
1601 }
1602
1603 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1604 {
1605 pc_to_link_info(o->out, s, s->pc_tmp);
1606 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2));
1607 }
1608
1609 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1610 {
1611 int m1 = get_field(s, m1);
1612 bool is_imm = have_field(s, i2);
1613 int imm = is_imm ? get_field(s, i2) : 0;
1614 DisasCompare c;
1615
1616 /* BCR with R2 = 0 causes no branching */
1617 if (have_field(s, r2) && get_field(s, r2) == 0) {
1618 if (m1 == 14) {
1619 /* Perform serialization */
1620 /* FIXME: check for fast-BCR-serialization facility */
1621 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1622 }
1623 if (m1 == 15) {
1624 /* Perform serialization */
1625 /* FIXME: perform checkpoint-synchronisation */
1626 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1627 }
1628 return DISAS_NEXT;
1629 }
1630
1631 disas_jcc(s, &c, m1);
1632 return help_branch(s, &c, is_imm, imm, o->in2);
1633 }
1634
1635 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1636 {
1637 int r1 = get_field(s, r1);
1638 bool is_imm = have_field(s, i2);
1639 int imm = is_imm ? get_field(s, i2) : 0;
1640 DisasCompare c;
1641 TCGv_i64 t;
1642
1643 c.cond = TCG_COND_NE;
1644 c.is_64 = false;
1645 c.g1 = false;
1646 c.g2 = false;
1647
1648 t = tcg_temp_new_i64();
1649 tcg_gen_subi_i64(t, regs[r1], 1);
1650 store_reg32_i64(r1, t);
1651 c.u.s32.a = tcg_temp_new_i32();
1652 c.u.s32.b = tcg_const_i32(0);
1653 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1654 tcg_temp_free_i64(t);
1655
1656 return help_branch(s, &c, is_imm, imm, o->in2);
1657 }
1658
1659 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1660 {
1661 int r1 = get_field(s, r1);
1662 int imm = get_field(s, i2);
1663 DisasCompare c;
1664 TCGv_i64 t;
1665
1666 c.cond = TCG_COND_NE;
1667 c.is_64 = false;
1668 c.g1 = false;
1669 c.g2 = false;
1670
1671 t = tcg_temp_new_i64();
1672 tcg_gen_shri_i64(t, regs[r1], 32);
1673 tcg_gen_subi_i64(t, t, 1);
1674 store_reg32h_i64(r1, t);
1675 c.u.s32.a = tcg_temp_new_i32();
1676 c.u.s32.b = tcg_const_i32(0);
1677 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1678 tcg_temp_free_i64(t);
1679
1680 return help_branch(s, &c, 1, imm, o->in2);
1681 }
1682
1683 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1684 {
1685 int r1 = get_field(s, r1);
1686 bool is_imm = have_field(s, i2);
1687 int imm = is_imm ? get_field(s, i2) : 0;
1688 DisasCompare c;
1689
1690 c.cond = TCG_COND_NE;
1691 c.is_64 = true;
1692 c.g1 = true;
1693 c.g2 = false;
1694
1695 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1696 c.u.s64.a = regs[r1];
1697 c.u.s64.b = tcg_const_i64(0);
1698
1699 return help_branch(s, &c, is_imm, imm, o->in2);
1700 }
1701
1702 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1703 {
1704 int r1 = get_field(s, r1);
1705 int r3 = get_field(s, r3);
1706 bool is_imm = have_field(s, i2);
1707 int imm = is_imm ? get_field(s, i2) : 0;
1708 DisasCompare c;
1709 TCGv_i64 t;
1710
1711 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1712 c.is_64 = false;
1713 c.g1 = false;
1714 c.g2 = false;
1715
1716 t = tcg_temp_new_i64();
1717 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1718 c.u.s32.a = tcg_temp_new_i32();
1719 c.u.s32.b = tcg_temp_new_i32();
1720 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1721 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1722 store_reg32_i64(r1, t);
1723 tcg_temp_free_i64(t);
1724
1725 return help_branch(s, &c, is_imm, imm, o->in2);
1726 }
1727
1728 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1729 {
1730 int r1 = get_field(s, r1);
1731 int r3 = get_field(s, r3);
1732 bool is_imm = have_field(s, i2);
1733 int imm = is_imm ? get_field(s, i2) : 0;
1734 DisasCompare c;
1735
1736 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1737 c.is_64 = true;
1738
1739 if (r1 == (r3 | 1)) {
1740 c.u.s64.b = load_reg(r3 | 1);
1741 c.g2 = false;
1742 } else {
1743 c.u.s64.b = regs[r3 | 1];
1744 c.g2 = true;
1745 }
1746
1747 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1748 c.u.s64.a = regs[r1];
1749 c.g1 = true;
1750
1751 return help_branch(s, &c, is_imm, imm, o->in2);
1752 }
1753
1754 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1755 {
1756 int imm, m3 = get_field(s, m3);
1757 bool is_imm;
1758 DisasCompare c;
1759
1760 c.cond = ltgt_cond[m3];
1761 if (s->insn->data) {
1762 c.cond = tcg_unsigned_cond(c.cond);
1763 }
1764 c.is_64 = c.g1 = c.g2 = true;
1765 c.u.s64.a = o->in1;
1766 c.u.s64.b = o->in2;
1767
1768 is_imm = have_field(s, i4);
1769 if (is_imm) {
1770 imm = get_field(s, i4);
1771 } else {
1772 imm = 0;
1773 o->out = get_address(s, 0, get_field(s, b4),
1774 get_field(s, d4));
1775 }
1776
1777 return help_branch(s, &c, is_imm, imm, o->out);
1778 }
1779
1780 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1781 {
1782 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1783 set_cc_static(s);
1784 return DISAS_NEXT;
1785 }
1786
1787 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1788 {
1789 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1790 set_cc_static(s);
1791 return DISAS_NEXT;
1792 }
1793
1794 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1795 {
1796 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1797 set_cc_static(s);
1798 return DISAS_NEXT;
1799 }
1800
1801 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1802 bool m4_with_fpe)
1803 {
1804 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1805 uint8_t m3 = get_field(s, m3);
1806 uint8_t m4 = get_field(s, m4);
1807
1808 /* m3 field was introduced with FPE */
1809 if (!fpe && m3_with_fpe) {
1810 m3 = 0;
1811 }
1812 /* m4 field was introduced with FPE */
1813 if (!fpe && m4_with_fpe) {
1814 m4 = 0;
1815 }
1816
1817 /* Check for valid rounding modes. Mode 3 was introduced later. */
1818 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1819 gen_program_exception(s, PGM_SPECIFICATION);
1820 return NULL;
1821 }
1822
1823 return tcg_const_i32(deposit32(m3, 4, 4, m4));
1824 }
1825
1826 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1827 {
1828 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1829
1830 if (!m34) {
1831 return DISAS_NORETURN;
1832 }
1833 gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1834 tcg_temp_free_i32(m34);
1835 gen_set_cc_nz_f32(s, o->in2);
1836 return DISAS_NEXT;
1837 }
1838
1839 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1840 {
1841 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1842
1843 if (!m34) {
1844 return DISAS_NORETURN;
1845 }
1846 gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1847 tcg_temp_free_i32(m34);
1848 gen_set_cc_nz_f64(s, o->in2);
1849 return DISAS_NEXT;
1850 }
1851
1852 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1853 {
1854 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1855
1856 if (!m34) {
1857 return DISAS_NORETURN;
1858 }
1859 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1860 tcg_temp_free_i32(m34);
1861 gen_set_cc_nz_f128(s, o->in1, o->in2);
1862 return DISAS_NEXT;
1863 }
1864
1865 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1866 {
1867 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1868
1869 if (!m34) {
1870 return DISAS_NORETURN;
1871 }
1872 gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1873 tcg_temp_free_i32(m34);
1874 gen_set_cc_nz_f32(s, o->in2);
1875 return DISAS_NEXT;
1876 }
1877
1878 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1879 {
1880 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1881
1882 if (!m34) {
1883 return DISAS_NORETURN;
1884 }
1885 gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1886 tcg_temp_free_i32(m34);
1887 gen_set_cc_nz_f64(s, o->in2);
1888 return DISAS_NEXT;
1889 }
1890
1891 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1892 {
1893 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1894
1895 if (!m34) {
1896 return DISAS_NORETURN;
1897 }
1898 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1899 tcg_temp_free_i32(m34);
1900 gen_set_cc_nz_f128(s, o->in1, o->in2);
1901 return DISAS_NEXT;
1902 }
1903
1904 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1905 {
1906 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1907
1908 if (!m34) {
1909 return DISAS_NORETURN;
1910 }
1911 gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1912 tcg_temp_free_i32(m34);
1913 gen_set_cc_nz_f32(s, o->in2);
1914 return DISAS_NEXT;
1915 }
1916
1917 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1918 {
1919 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1920
1921 if (!m34) {
1922 return DISAS_NORETURN;
1923 }
1924 gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1925 tcg_temp_free_i32(m34);
1926 gen_set_cc_nz_f64(s, o->in2);
1927 return DISAS_NEXT;
1928 }
1929
1930 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1931 {
1932 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1933
1934 if (!m34) {
1935 return DISAS_NORETURN;
1936 }
1937 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1938 tcg_temp_free_i32(m34);
1939 gen_set_cc_nz_f128(s, o->in1, o->in2);
1940 return DISAS_NEXT;
1941 }
1942
1943 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1944 {
1945 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1946
1947 if (!m34) {
1948 return DISAS_NORETURN;
1949 }
1950 gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1951 tcg_temp_free_i32(m34);
1952 gen_set_cc_nz_f32(s, o->in2);
1953 return DISAS_NEXT;
1954 }
1955
1956 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1957 {
1958 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1959
1960 if (!m34) {
1961 return DISAS_NORETURN;
1962 }
1963 gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1964 tcg_temp_free_i32(m34);
1965 gen_set_cc_nz_f64(s, o->in2);
1966 return DISAS_NEXT;
1967 }
1968
1969 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1970 {
1971 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1972
1973 if (!m34) {
1974 return DISAS_NORETURN;
1975 }
1976 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1977 tcg_temp_free_i32(m34);
1978 gen_set_cc_nz_f128(s, o->in1, o->in2);
1979 return DISAS_NEXT;
1980 }
1981
1982 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1983 {
1984 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1985
1986 if (!m34) {
1987 return DISAS_NORETURN;
1988 }
1989 gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1990 tcg_temp_free_i32(m34);
1991 return DISAS_NEXT;
1992 }
1993
1994 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1995 {
1996 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1997
1998 if (!m34) {
1999 return DISAS_NORETURN;
2000 }
2001 gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
2002 tcg_temp_free_i32(m34);
2003 return DISAS_NEXT;
2004 }
2005
2006 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2007 {
2008 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2009
2010 if (!m34) {
2011 return DISAS_NORETURN;
2012 }
2013 gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2014 tcg_temp_free_i32(m34);
2015 return_low128(o->out2);
2016 return DISAS_NEXT;
2017 }
2018
2019 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2020 {
2021 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2022
2023 if (!m34) {
2024 return DISAS_NORETURN;
2025 }
2026 gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2027 tcg_temp_free_i32(m34);
2028 return DISAS_NEXT;
2029 }
2030
2031 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2032 {
2033 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2034
2035 if (!m34) {
2036 return DISAS_NORETURN;
2037 }
2038 gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2039 tcg_temp_free_i32(m34);
2040 return DISAS_NEXT;
2041 }
2042
2043 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2044 {
2045 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2046
2047 if (!m34) {
2048 return DISAS_NORETURN;
2049 }
2050 gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2051 tcg_temp_free_i32(m34);
2052 return_low128(o->out2);
2053 return DISAS_NEXT;
2054 }
2055
2056 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2057 {
2058 int r2 = get_field(s, r2);
2059 TCGv_i64 len = tcg_temp_new_i64();
2060
2061 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2062 set_cc_static(s);
2063 return_low128(o->out);
2064
2065 tcg_gen_add_i64(regs[r2], regs[r2], len);
2066 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2067 tcg_temp_free_i64(len);
2068
2069 return DISAS_NEXT;
2070 }
2071
2072 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2073 {
2074 int l = get_field(s, l1);
2075 TCGv_i32 vl;
2076
2077 switch (l + 1) {
2078 case 1:
2079 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2080 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2081 break;
2082 case 2:
2083 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2084 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2085 break;
2086 case 4:
2087 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2088 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2089 break;
2090 case 8:
2091 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2092 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2093 break;
2094 default:
2095 vl = tcg_const_i32(l);
2096 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2097 tcg_temp_free_i32(vl);
2098 set_cc_static(s);
2099 return DISAS_NEXT;
2100 }
2101 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2102 return DISAS_NEXT;
2103 }
2104
2105 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2106 {
2107 int r1 = get_field(s, r1);
2108 int r2 = get_field(s, r2);
2109 TCGv_i32 t1, t2;
2110
2111 /* r1 and r2 must be even. */
2112 if (r1 & 1 || r2 & 1) {
2113 gen_program_exception(s, PGM_SPECIFICATION);
2114 return DISAS_NORETURN;
2115 }
2116
2117 t1 = tcg_const_i32(r1);
2118 t2 = tcg_const_i32(r2);
2119 gen_helper_clcl(cc_op, cpu_env, t1, t2);
2120 tcg_temp_free_i32(t1);
2121 tcg_temp_free_i32(t2);
2122 set_cc_static(s);
2123 return DISAS_NEXT;
2124 }
2125
2126 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2127 {
2128 int r1 = get_field(s, r1);
2129 int r3 = get_field(s, r3);
2130 TCGv_i32 t1, t3;
2131
2132 /* r1 and r3 must be even. */
2133 if (r1 & 1 || r3 & 1) {
2134 gen_program_exception(s, PGM_SPECIFICATION);
2135 return DISAS_NORETURN;
2136 }
2137
2138 t1 = tcg_const_i32(r1);
2139 t3 = tcg_const_i32(r3);
2140 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2141 tcg_temp_free_i32(t1);
2142 tcg_temp_free_i32(t3);
2143 set_cc_static(s);
2144 return DISAS_NEXT;
2145 }
2146
2147 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2148 {
2149 int r1 = get_field(s, r1);
2150 int r3 = get_field(s, r3);
2151 TCGv_i32 t1, t3;
2152
2153 /* r1 and r3 must be even. */
2154 if (r1 & 1 || r3 & 1) {
2155 gen_program_exception(s, PGM_SPECIFICATION);
2156 return DISAS_NORETURN;
2157 }
2158
2159 t1 = tcg_const_i32(r1);
2160 t3 = tcg_const_i32(r3);
2161 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2162 tcg_temp_free_i32(t1);
2163 tcg_temp_free_i32(t3);
2164 set_cc_static(s);
2165 return DISAS_NEXT;
2166 }
2167
2168 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2169 {
2170 TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2171 TCGv_i32 t1 = tcg_temp_new_i32();
2172 tcg_gen_extrl_i64_i32(t1, o->in1);
2173 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2174 set_cc_static(s);
2175 tcg_temp_free_i32(t1);
2176 tcg_temp_free_i32(m3);
2177 return DISAS_NEXT;
2178 }
2179
2180 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2181 {
2182 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2183 set_cc_static(s);
2184 return_low128(o->in2);
2185 return DISAS_NEXT;
2186 }
2187
2188 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2189 {
2190 TCGv_i64 t = tcg_temp_new_i64();
2191 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2192 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2193 tcg_gen_or_i64(o->out, o->out, t);
2194 tcg_temp_free_i64(t);
2195 return DISAS_NEXT;
2196 }
2197
2198 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2199 {
2200 int d2 = get_field(s, d2);
2201 int b2 = get_field(s, b2);
2202 TCGv_i64 addr, cc;
2203
2204 /* Note that in1 = R3 (new value) and
2205 in2 = (zero-extended) R1 (expected value). */
2206
2207 addr = get_address(s, 0, b2, d2);
2208 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2209 get_mem_index(s), s->insn->data | MO_ALIGN);
2210 tcg_temp_free_i64(addr);
2211
2212 /* Are the memory and expected values (un)equal? Note that this setcond
2213 produces the output CC value, thus the NE sense of the test. */
2214 cc = tcg_temp_new_i64();
2215 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2216 tcg_gen_extrl_i64_i32(cc_op, cc);
2217 tcg_temp_free_i64(cc);
2218 set_cc_static(s);
2219
2220 return DISAS_NEXT;
2221 }
2222
2223 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2224 {
2225 int r1 = get_field(s, r1);
2226 int r3 = get_field(s, r3);
2227 int d2 = get_field(s, d2);
2228 int b2 = get_field(s, b2);
2229 DisasJumpType ret = DISAS_NEXT;
2230 TCGv_i64 addr;
2231 TCGv_i32 t_r1, t_r3;
2232
2233 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2234 addr = get_address(s, 0, b2, d2);
2235 t_r1 = tcg_const_i32(r1);
2236 t_r3 = tcg_const_i32(r3);
2237 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2238 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2239 } else if (HAVE_CMPXCHG128) {
2240 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2241 } else {
2242 gen_helper_exit_atomic(cpu_env);
2243 ret = DISAS_NORETURN;
2244 }
2245 tcg_temp_free_i64(addr);
2246 tcg_temp_free_i32(t_r1);
2247 tcg_temp_free_i32(t_r3);
2248
2249 set_cc_static(s);
2250 return ret;
2251 }
2252
2253 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2254 {
2255 int r3 = get_field(s, r3);
2256 TCGv_i32 t_r3 = tcg_const_i32(r3);
2257
2258 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2259 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2260 } else {
2261 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2262 }
2263 tcg_temp_free_i32(t_r3);
2264
2265 set_cc_static(s);
2266 return DISAS_NEXT;
2267 }
2268
2269 #ifndef CONFIG_USER_ONLY
2270 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2271 {
2272 MemOp mop = s->insn->data;
2273 TCGv_i64 addr, old, cc;
2274 TCGLabel *lab = gen_new_label();
2275
2276 /* Note that in1 = R1 (zero-extended expected value),
2277 out = R1 (original reg), out2 = R1+1 (new value). */
2278
2279 addr = tcg_temp_new_i64();
2280 old = tcg_temp_new_i64();
2281 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2282 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2283 get_mem_index(s), mop | MO_ALIGN);
2284 tcg_temp_free_i64(addr);
2285
2286 /* Are the memory and expected values (un)equal? */
2287 cc = tcg_temp_new_i64();
2288 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2289 tcg_gen_extrl_i64_i32(cc_op, cc);
2290
2291 /* Write back the output now, so that it happens before the
2292 following branch, so that we don't need local temps. */
2293 if ((mop & MO_SIZE) == MO_32) {
2294 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2295 } else {
2296 tcg_gen_mov_i64(o->out, old);
2297 }
2298 tcg_temp_free_i64(old);
2299
2300 /* If the comparison was equal, and the LSB of R2 was set,
2301 then we need to flush the TLB (for all cpus). */
2302 tcg_gen_xori_i64(cc, cc, 1);
2303 tcg_gen_and_i64(cc, cc, o->in2);
2304 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2305 tcg_temp_free_i64(cc);
2306
2307 gen_helper_purge(cpu_env);
2308 gen_set_label(lab);
2309
2310 return DISAS_NEXT;
2311 }
2312 #endif
2313
2314 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2315 {
2316 TCGv_i64 t1 = tcg_temp_new_i64();
2317 TCGv_i32 t2 = tcg_temp_new_i32();
2318 tcg_gen_extrl_i64_i32(t2, o->in1);
2319 gen_helper_cvd(t1, t2);
2320 tcg_temp_free_i32(t2);
2321 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2322 tcg_temp_free_i64(t1);
2323 return DISAS_NEXT;
2324 }
2325
2326 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2327 {
2328 int m3 = get_field(s, m3);
2329 TCGLabel *lab = gen_new_label();
2330 TCGCond c;
2331
2332 c = tcg_invert_cond(ltgt_cond[m3]);
2333 if (s->insn->data) {
2334 c = tcg_unsigned_cond(c);
2335 }
2336 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2337
2338 /* Trap. */
2339 gen_trap(s);
2340
2341 gen_set_label(lab);
2342 return DISAS_NEXT;
2343 }
2344
2345 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2346 {
2347 int m3 = get_field(s, m3);
2348 int r1 = get_field(s, r1);
2349 int r2 = get_field(s, r2);
2350 TCGv_i32 tr1, tr2, chk;
2351
2352 /* R1 and R2 must both be even. */
2353 if ((r1 | r2) & 1) {
2354 gen_program_exception(s, PGM_SPECIFICATION);
2355 return DISAS_NORETURN;
2356 }
2357 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2358 m3 = 0;
2359 }
2360
2361 tr1 = tcg_const_i32(r1);
2362 tr2 = tcg_const_i32(r2);
2363 chk = tcg_const_i32(m3);
2364
2365 switch (s->insn->data) {
2366 case 12:
2367 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2368 break;
2369 case 14:
2370 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2371 break;
2372 case 21:
2373 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2374 break;
2375 case 24:
2376 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2377 break;
2378 case 41:
2379 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2380 break;
2381 case 42:
2382 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2383 break;
2384 default:
2385 g_assert_not_reached();
2386 }
2387
2388 tcg_temp_free_i32(tr1);
2389 tcg_temp_free_i32(tr2);
2390 tcg_temp_free_i32(chk);
2391 set_cc_static(s);
2392 return DISAS_NEXT;
2393 }
2394
2395 #ifndef CONFIG_USER_ONLY
2396 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2397 {
2398 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2399 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2400 TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2401
2402 gen_helper_diag(cpu_env, r1, r3, func_code);
2403
2404 tcg_temp_free_i32(func_code);
2405 tcg_temp_free_i32(r3);
2406 tcg_temp_free_i32(r1);
2407 return DISAS_NEXT;
2408 }
2409 #endif
2410
2411 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2412 {
2413 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2414 return_low128(o->out);
2415 return DISAS_NEXT;
2416 }
2417
2418 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2419 {
2420 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2421 return_low128(o->out);
2422 return DISAS_NEXT;
2423 }
2424
2425 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2426 {
2427 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2428 return_low128(o->out);
2429 return DISAS_NEXT;
2430 }
2431
2432 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2433 {
2434 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2435 return_low128(o->out);
2436 return DISAS_NEXT;
2437 }
2438
2439 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2440 {
2441 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2442 return DISAS_NEXT;
2443 }
2444
2445 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2446 {
2447 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2448 return DISAS_NEXT;
2449 }
2450
2451 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2452 {
2453 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2454 return_low128(o->out2);
2455 return DISAS_NEXT;
2456 }
2457
2458 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2459 {
2460 int r2 = get_field(s, r2);
2461 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2462 return DISAS_NEXT;
2463 }
2464
2465 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2466 {
2467 /* No cache information provided. */
2468 tcg_gen_movi_i64(o->out, -1);
2469 return DISAS_NEXT;
2470 }
2471
2472 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2473 {
2474 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2475 return DISAS_NEXT;
2476 }
2477
2478 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2479 {
2480 int r1 = get_field(s, r1);
2481 int r2 = get_field(s, r2);
2482 TCGv_i64 t = tcg_temp_new_i64();
2483
2484 /* Note the "subsequently" in the PoO, which implies a defined result
2485 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2486 tcg_gen_shri_i64(t, psw_mask, 32);
2487 store_reg32_i64(r1, t);
2488 if (r2 != 0) {
2489 store_reg32_i64(r2, psw_mask);
2490 }
2491
2492 tcg_temp_free_i64(t);
2493 return DISAS_NEXT;
2494 }
2495
2496 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2497 {
2498 int r1 = get_field(s, r1);
2499 TCGv_i32 ilen;
2500 TCGv_i64 v1;
2501
2502 /* Nested EXECUTE is not allowed. */
2503 if (unlikely(s->ex_value)) {
2504 gen_program_exception(s, PGM_EXECUTE);
2505 return DISAS_NORETURN;
2506 }
2507
2508 update_psw_addr(s);
2509 update_cc_op(s);
2510
2511 if (r1 == 0) {
2512 v1 = tcg_const_i64(0);
2513 } else {
2514 v1 = regs[r1];
2515 }
2516
2517 ilen = tcg_const_i32(s->ilen);
2518 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2519 tcg_temp_free_i32(ilen);
2520
2521 if (r1 == 0) {
2522 tcg_temp_free_i64(v1);
2523 }
2524
2525 return DISAS_PC_CC_UPDATED;
2526 }
2527
2528 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2529 {
2530 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2531
2532 if (!m34) {
2533 return DISAS_NORETURN;
2534 }
2535 gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2536 tcg_temp_free_i32(m34);
2537 return DISAS_NEXT;
2538 }
2539
2540 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2541 {
2542 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2543
2544 if (!m34) {
2545 return DISAS_NORETURN;
2546 }
2547 gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2548 tcg_temp_free_i32(m34);
2549 return DISAS_NEXT;
2550 }
2551
2552 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2553 {
2554 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2555
2556 if (!m34) {
2557 return DISAS_NORETURN;
2558 }
2559 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2560 return_low128(o->out2);
2561 tcg_temp_free_i32(m34);
2562 return DISAS_NEXT;
2563 }
2564
2565 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2566 {
2567 /* We'll use the original input for cc computation, since we get to
2568 compare that against 0, which ought to be better than comparing
2569 the real output against 64. It also lets cc_dst be a convenient
2570 temporary during our computation. */
2571 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2572
2573 /* R1 = IN ? CLZ(IN) : 64. */
2574 tcg_gen_clzi_i64(o->out, o->in2, 64);
2575
2576 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2577 value by 64, which is undefined. But since the shift is 64 iff the
2578 input is zero, we still get the correct result after and'ing. */
2579 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2580 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2581 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2582 return DISAS_NEXT;
2583 }
2584
2585 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2586 {
2587 int m3 = get_field(s, m3);
2588 int pos, len, base = s->insn->data;
2589 TCGv_i64 tmp = tcg_temp_new_i64();
2590 uint64_t ccm;
2591
2592 switch (m3) {
2593 case 0xf:
2594 /* Effectively a 32-bit load. */
2595 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2596 len = 32;
2597 goto one_insert;
2598
2599 case 0xc:
2600 case 0x6:
2601 case 0x3:
2602 /* Effectively a 16-bit load. */
2603 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2604 len = 16;
2605 goto one_insert;
2606
2607 case 0x8:
2608 case 0x4:
2609 case 0x2:
2610 case 0x1:
2611 /* Effectively an 8-bit load. */
2612 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2613 len = 8;
2614 goto one_insert;
2615
2616 one_insert:
2617 pos = base + ctz32(m3) * 8;
2618 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2619 ccm = ((1ull << len) - 1) << pos;
2620 break;
2621
2622 default:
2623 /* This is going to be a sequence of loads and inserts. */
2624 pos = base + 32 - 8;
2625 ccm = 0;
2626 while (m3) {
2627 if (m3 & 0x8) {
2628 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2629 tcg_gen_addi_i64(o->in2, o->in2, 1);
2630 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2631 ccm |= 0xff << pos;
2632 }
2633 m3 = (m3 << 1) & 0xf;
2634 pos -= 8;
2635 }
2636 break;
2637 }
2638
2639 tcg_gen_movi_i64(tmp, ccm);
2640 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2641 tcg_temp_free_i64(tmp);
2642 return DISAS_NEXT;
2643 }
2644
2645 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2646 {
2647 int shift = s->insn->data & 0xff;
2648 int size = s->insn->data >> 8;
2649 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2650 return DISAS_NEXT;
2651 }
2652
2653 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2654 {
2655 TCGv_i64 t1, t2;
2656
2657 gen_op_calc_cc(s);
2658 t1 = tcg_temp_new_i64();
2659 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2660 t2 = tcg_temp_new_i64();
2661 tcg_gen_extu_i32_i64(t2, cc_op);
2662 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2663 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2664 tcg_temp_free_i64(t1);
2665 tcg_temp_free_i64(t2);
2666 return DISAS_NEXT;
2667 }
2668
2669 #ifndef CONFIG_USER_ONLY
2670 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2671 {
2672 TCGv_i32 m4;
2673
2674 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2675 m4 = tcg_const_i32(get_field(s, m4));
2676 } else {
2677 m4 = tcg_const_i32(0);
2678 }
2679 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2680 tcg_temp_free_i32(m4);
2681 return DISAS_NEXT;
2682 }
2683
2684 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2685 {
2686 TCGv_i32 m4;
2687
2688 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2689 m4 = tcg_const_i32(get_field(s, m4));
2690 } else {
2691 m4 = tcg_const_i32(0);
2692 }
2693 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2694 tcg_temp_free_i32(m4);
2695 return DISAS_NEXT;
2696 }
2697
2698 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2699 {
2700 gen_helper_iske(o->out, cpu_env, o->in2);
2701 return DISAS_NEXT;
2702 }
2703 #endif
2704
2705 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2706 {
2707 int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2708 int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2709 int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2710 TCGv_i32 t_r1, t_r2, t_r3, type;
2711
2712 switch (s->insn->data) {
2713 case S390_FEAT_TYPE_KMCTR:
2714 if (r3 & 1 || !r3) {
2715 gen_program_exception(s, PGM_SPECIFICATION);
2716 return DISAS_NORETURN;
2717 }
2718 /* FALL THROUGH */
2719 case S390_FEAT_TYPE_PPNO:
2720 case S390_FEAT_TYPE_KMF:
2721 case S390_FEAT_TYPE_KMC:
2722 case S390_FEAT_TYPE_KMO:
2723 case S390_FEAT_TYPE_KM:
2724 if (r1 & 1 || !r1) {
2725 gen_program_exception(s, PGM_SPECIFICATION);
2726 return DISAS_NORETURN;
2727 }
2728 /* FALL THROUGH */
2729 case S390_FEAT_TYPE_KMAC:
2730 case S390_FEAT_TYPE_KIMD:
2731 case S390_FEAT_TYPE_KLMD:
2732 if (r2 & 1 || !r2) {
2733 gen_program_exception(s, PGM_SPECIFICATION);
2734 return DISAS_NORETURN;
2735 }
2736 /* FALL THROUGH */
2737 case S390_FEAT_TYPE_PCKMO:
2738 case S390_FEAT_TYPE_PCC:
2739 break;
2740 default:
2741 g_assert_not_reached();
2742 };
2743
2744 t_r1 = tcg_const_i32(r1);
2745 t_r2 = tcg_const_i32(r2);
2746 t_r3 = tcg_const_i32(r3);
2747 type = tcg_const_i32(s->insn->data);
2748 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2749 set_cc_static(s);
2750 tcg_temp_free_i32(t_r1);
2751 tcg_temp_free_i32(t_r2);
2752 tcg_temp_free_i32(t_r3);
2753 tcg_temp_free_i32(type);
2754 return DISAS_NEXT;
2755 }
2756
2757 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2758 {
2759 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2760 set_cc_static(s);
2761 return DISAS_NEXT;
2762 }
2763
2764 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2765 {
2766 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2767 set_cc_static(s);
2768 return DISAS_NEXT;
2769 }
2770
2771 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2772 {
2773 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2774 set_cc_static(s);
2775 return DISAS_NEXT;
2776 }
2777
2778 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2779 {
2780 /* The real output is indeed the original value in memory;
2781 recompute the addition for the computation of CC. */
2782 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2783 s->insn->data | MO_ALIGN);
2784 /* However, we need to recompute the addition for setting CC. */
2785 tcg_gen_add_i64(o->out, o->in1, o->in2);
2786 return DISAS_NEXT;
2787 }
2788
2789 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2790 {
2791 /* The real output is indeed the original value in memory;
2792 recompute the addition for the computation of CC. */
2793 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2794 s->insn->data | MO_ALIGN);
2795 /* However, we need to recompute the operation for setting CC. */
2796 tcg_gen_and_i64(o->out, o->in1, o->in2);
2797 return DISAS_NEXT;
2798 }
2799
2800 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2801 {
2802 /* The real output is indeed the original value in memory;
2803 recompute the addition for the computation of CC. */
2804 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2805 s->insn->data | MO_ALIGN);
2806 /* However, we need to recompute the operation for setting CC. */
2807 tcg_gen_or_i64(o->out, o->in1, o->in2);
2808 return DISAS_NEXT;
2809 }
2810
2811 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2812 {
2813 /* The real output is indeed the original value in memory;
2814 recompute the addition for the computation of CC. */
2815 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2816 s->insn->data | MO_ALIGN);
2817 /* However, we need to recompute the operation for setting CC. */
2818 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2819 return DISAS_NEXT;
2820 }
2821
2822 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2823 {
2824 gen_helper_ldeb(o->out, cpu_env, o->in2);
2825 return DISAS_NEXT;
2826 }
2827
2828 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2829 {
2830 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2831
2832 if (!m34) {
2833 return DISAS_NORETURN;
2834 }
2835 gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2836 tcg_temp_free_i32(m34);
2837 return DISAS_NEXT;
2838 }
2839
2840 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2841 {
2842 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2843
2844 if (!m34) {
2845 return DISAS_NORETURN;
2846 }
2847 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2848 tcg_temp_free_i32(m34);
2849 return DISAS_NEXT;
2850 }
2851
2852 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2853 {
2854 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2855
2856 if (!m34) {
2857 return DISAS_NORETURN;
2858 }
2859 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2860 tcg_temp_free_i32(m34);
2861 return DISAS_NEXT;
2862 }
2863
2864 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2865 {
2866 gen_helper_lxdb(o->out, cpu_env, o->in2);
2867 return_low128(o->out2);
2868 return DISAS_NEXT;
2869 }
2870
2871 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2872 {
2873 gen_helper_lxeb(o->out, cpu_env, o->in2);
2874 return_low128(o->out2);
2875 return DISAS_NEXT;
2876 }
2877
2878 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2879 {
2880 tcg_gen_shli_i64(o->out, o->in2, 32);
2881 return DISAS_NEXT;
2882 }
2883
2884 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2885 {
2886 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2887 return DISAS_NEXT;
2888 }
2889
2890 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2891 {
2892 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2893 return DISAS_NEXT;
2894 }
2895
2896 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2897 {
2898 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2899 return DISAS_NEXT;
2900 }
2901
2902 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2903 {
2904 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2905 return DISAS_NEXT;
2906 }
2907
2908 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2909 {
2910 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2911 return DISAS_NEXT;
2912 }
2913
2914 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2915 {
2916 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2917 return DISAS_NEXT;
2918 }
2919
2920 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2921 {
2922 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2923 return DISAS_NEXT;
2924 }
2925
2926 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2927 {
2928 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2929 return DISAS_NEXT;
2930 }
2931
2932 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2933 {
2934 TCGLabel *lab = gen_new_label();
2935 store_reg32_i64(get_field(s, r1), o->in2);
2936 /* The value is stored even in case of trap. */
2937 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2938 gen_trap(s);
2939 gen_set_label(lab);
2940 return DISAS_NEXT;
2941 }
2942
2943 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2944 {
2945 TCGLabel *lab = gen_new_label();
2946 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2947 /* The value is stored even in case of trap. */
2948 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2949 gen_trap(s);
2950 gen_set_label(lab);
2951 return DISAS_NEXT;
2952 }
2953
2954 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2955 {
2956 TCGLabel *lab = gen_new_label();
2957 store_reg32h_i64(get_field(s, r1), o->in2);
2958 /* The value is stored even in case of trap. */
2959 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2960 gen_trap(s);
2961 gen_set_label(lab);
2962 return DISAS_NEXT;
2963 }
2964
2965 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2966 {
2967 TCGLabel *lab = gen_new_label();
2968 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2969 /* The value is stored even in case of trap. */
2970 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2971 gen_trap(s);
2972 gen_set_label(lab);
2973 return DISAS_NEXT;
2974 }
2975
2976 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2977 {
2978 TCGLabel *lab = gen_new_label();
2979 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2980 /* The value is stored even in case of trap. */
2981 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2982 gen_trap(s);
2983 gen_set_label(lab);
2984 return DISAS_NEXT;
2985 }
2986
2987 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2988 {
2989 DisasCompare c;
2990
2991 disas_jcc(s, &c, get_field(s, m3));
2992
2993 if (c.is_64) {
2994 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2995 o->in2, o->in1);
2996 free_compare(&c);
2997 } else {
2998 TCGv_i32 t32 = tcg_temp_new_i32();
2999 TCGv_i64 t, z;
3000
3001 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
3002 free_compare(&c);
3003
3004 t = tcg_temp_new_i64();
3005 tcg_gen_extu_i32_i64(t, t32);
3006 tcg_temp_free_i32(t32);
3007
3008 z = tcg_const_i64(0);
3009 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3010 tcg_temp_free_i64(t);
3011 tcg_temp_free_i64(z);
3012 }
3013
3014 return DISAS_NEXT;
3015 }
3016
3017 #ifndef CONFIG_USER_ONLY
3018 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3019 {
3020 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3021 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3022 gen_helper_lctl(cpu_env, r1, o->in2, r3);
3023 tcg_temp_free_i32(r1);
3024 tcg_temp_free_i32(r3);
3025 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3026 return DISAS_PC_STALE_NOCHAIN;
3027 }
3028
3029 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3030 {
3031 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3032 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3033 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3034 tcg_temp_free_i32(r1);
3035 tcg_temp_free_i32(r3);
3036 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3037 return DISAS_PC_STALE_NOCHAIN;
3038 }
3039
3040 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3041 {
3042 gen_helper_lra(o->out, cpu_env, o->in2);
3043 set_cc_static(s);
3044 return DISAS_NEXT;
3045 }
3046
3047 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3048 {
3049 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3050 return DISAS_NEXT;
3051 }
3052
3053 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3054 {
3055 TCGv_i64 t1, t2;
3056
3057 per_breaking_event(s);
3058
3059 t1 = tcg_temp_new_i64();
3060 t2 = tcg_temp_new_i64();
3061 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3062 MO_TEUL | MO_ALIGN_8);
3063 tcg_gen_addi_i64(o->in2, o->in2, 4);
3064 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3065 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3066 tcg_gen_shli_i64(t1, t1, 32);
3067 gen_helper_load_psw(cpu_env, t1, t2);
3068 tcg_temp_free_i64(t1);
3069 tcg_temp_free_i64(t2);
3070 return DISAS_NORETURN;
3071 }
3072
3073 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3074 {
3075 TCGv_i64 t1, t2;
3076
3077 per_breaking_event(s);
3078
3079 t1 = tcg_temp_new_i64();
3080 t2 = tcg_temp_new_i64();
3081 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3082 MO_TEQ | MO_ALIGN_8);
3083 tcg_gen_addi_i64(o->in2, o->in2, 8);
3084 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3085 gen_helper_load_psw(cpu_env, t1, t2);
3086 tcg_temp_free_i64(t1);
3087 tcg_temp_free_i64(t2);
3088 return DISAS_NORETURN;
3089 }
3090 #endif
3091
3092 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3093 {
3094 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3095 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3096 gen_helper_lam(cpu_env, r1, o->in2, r3);
3097 tcg_temp_free_i32(r1);
3098 tcg_temp_free_i32(r3);
3099 return DISAS_NEXT;
3100 }
3101
3102 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3103 {
3104 int r1 = get_field(s, r1);
3105 int r3 = get_field(s, r3);
3106 TCGv_i64 t1, t2;
3107
3108 /* Only one register to read. */
3109 t1 = tcg_temp_new_i64();
3110 if (unlikely(r1 == r3)) {
3111 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3112 store_reg32_i64(r1, t1);
3113 tcg_temp_free(t1);
3114 return DISAS_NEXT;
3115 }
3116
3117 /* First load the values of the first and last registers to trigger
3118 possible page faults. */
3119 t2 = tcg_temp_new_i64();
3120 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3121 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3122 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3123 store_reg32_i64(r1, t1);
3124 store_reg32_i64(r3, t2);
3125
3126 /* Only two registers to read. */
3127 if (((r1 + 1) & 15) == r3) {
3128 tcg_temp_free(t2);
3129 tcg_temp_free(t1);
3130 return DISAS_NEXT;
3131 }
3132
3133 /* Then load the remaining registers. Page fault can't occur. */
3134 r3 = (r3 - 1) & 15;
3135 tcg_gen_movi_i64(t2, 4);
3136 while (r1 != r3) {
3137 r1 = (r1 + 1) & 15;
3138 tcg_gen_add_i64(o->in2, o->in2, t2);
3139 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3140 store_reg32_i64(r1, t1);
3141 }
3142 tcg_temp_free(t2);
3143 tcg_temp_free(t1);
3144
3145 return DISAS_NEXT;
3146 }
3147
3148 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3149 {
3150 int r1 = get_field(s, r1);
3151 int r3 = get_field(s, r3);
3152 TCGv_i64 t1, t2;
3153
3154 /* Only one register to read. */
3155 t1 = tcg_temp_new_i64();
3156 if (unlikely(r1 == r3)) {
3157 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3158 store_reg32h_i64(r1, t1);
3159 tcg_temp_free(t1);
3160 return DISAS_NEXT;
3161 }
3162
3163 /* First load the values of the first and last registers to trigger
3164 possible page faults. */
3165 t2 = tcg_temp_new_i64();
3166 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3167 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3168 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3169 store_reg32h_i64(r1, t1);
3170 store_reg32h_i64(r3, t2);
3171
3172 /* Only two registers to read. */
3173 if (((r1 + 1) & 15) == r3) {
3174 tcg_temp_free(t2);
3175 tcg_temp_free(t1);
3176 return DISAS_NEXT;
3177 }
3178
3179 /* Then load the remaining registers. Page fault can't occur. */
3180 r3 = (r3 - 1) & 15;
3181 tcg_gen_movi_i64(t2, 4);
3182 while (r1 != r3) {
3183 r1 = (r1 + 1) & 15;
3184 tcg_gen_add_i64(o->in2, o->in2, t2);
3185 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3186 store_reg32h_i64(r1, t1);
3187 }
3188 tcg_temp_free(t2);
3189 tcg_temp_free(t1);
3190
3191 return DISAS_NEXT;
3192 }
3193
3194 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3195 {
3196 int r1 = get_field(s, r1);
3197 int r3 = get_field(s, r3);
3198 TCGv_i64 t1, t2;
3199
3200 /* Only one register to read. */
3201 if (unlikely(r1 == r3)) {
3202 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3203 return DISAS_NEXT;
3204 }
3205
3206 /* First load the values of the first and last registers to trigger
3207 possible page faults. */
3208 t1 = tcg_temp_new_i64();
3209 t2 = tcg_temp_new_i64();
3210 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3211 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3212 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3213 tcg_gen_mov_i64(regs[r1], t1);
3214 tcg_temp_free(t2);
3215
3216 /* Only two registers to read. */
3217 if (((r1 + 1) & 15) == r3) {
3218 tcg_temp_free(t1);
3219 return DISAS_NEXT;
3220 }
3221
3222 /* Then load the remaining registers. Page fault can't occur. */
3223 r3 = (r3 - 1) & 15;
3224 tcg_gen_movi_i64(t1, 8);
3225 while (r1 != r3) {
3226 r1 = (r1 + 1) & 15;
3227 tcg_gen_add_i64(o->in2, o->in2, t1);
3228 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3229 }
3230 tcg_temp_free(t1);
3231
3232 return DISAS_NEXT;
3233 }
3234
3235 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3236 {
3237 TCGv_i64 a1, a2;
3238 MemOp mop = s->insn->data;
3239
3240 /* In a parallel context, stop the world and single step. */
3241 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3242 update_psw_addr(s);
3243 update_cc_op(s);
3244 gen_exception(EXCP_ATOMIC);
3245 return DISAS_NORETURN;
3246 }
3247
3248 /* In a serial context, perform the two loads ... */
3249 a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3250 a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3251 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3252 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3253 tcg_temp_free_i64(a1);
3254 tcg_temp_free_i64(a2);