cpu: Move breakpoints field from CPU_COMMON to CPUState
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 };
59
60 /* Information carried about a condition to be evaluated. */
61 typedef struct {
62 TCGCond cond:8;
63 bool is_64;
64 bool g1;
65 bool g2;
66 union {
67 struct { TCGv_i64 a, b; } s64;
68 struct { TCGv_i32 a, b; } s32;
69 } u;
70 } DisasCompare;
71
72 #define DISAS_EXCP 4
73
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit[CC_OP_MAX];
76 static uint64_t inline_branch_miss[CC_OP_MAX];
77 #endif
78
79 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
80 {
81 if (!(s->tb->flags & FLAG_MASK_64)) {
82 if (s->tb->flags & FLAG_MASK_32) {
83 return pc | 0x80000000;
84 }
85 }
86 return pc;
87 }
88
89 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
90 int flags)
91 {
92 S390CPU *cpu = S390_CPU(cs);
93 CPUS390XState *env = &cpu->env;
94 int i;
95
96 if (env->cc_op > 3) {
97 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
98 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
99 } else {
100 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
101 env->psw.mask, env->psw.addr, env->cc_op);
102 }
103
104 for (i = 0; i < 16; i++) {
105 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
106 if ((i % 4) == 3) {
107 cpu_fprintf(f, "\n");
108 } else {
109 cpu_fprintf(f, " ");
110 }
111 }
112
113 for (i = 0; i < 16; i++) {
114 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
115 if ((i % 4) == 3) {
116 cpu_fprintf(f, "\n");
117 } else {
118 cpu_fprintf(f, " ");
119 }
120 }
121
122 #ifndef CONFIG_USER_ONLY
123 for (i = 0; i < 16; i++) {
124 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
125 if ((i % 4) == 3) {
126 cpu_fprintf(f, "\n");
127 } else {
128 cpu_fprintf(f, " ");
129 }
130 }
131 #endif
132
133 #ifdef DEBUG_INLINE_BRANCHES
134 for (i = 0; i < CC_OP_MAX; i++) {
135 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
136 inline_branch_miss[i], inline_branch_hit[i]);
137 }
138 #endif
139
140 cpu_fprintf(f, "\n");
141 }
142
143 static TCGv_i64 psw_addr;
144 static TCGv_i64 psw_mask;
145
146 static TCGv_i32 cc_op;
147 static TCGv_i64 cc_src;
148 static TCGv_i64 cc_dst;
149 static TCGv_i64 cc_vr;
150
151 static char cpu_reg_names[32][4];
152 static TCGv_i64 regs[16];
153 static TCGv_i64 fregs[16];
154
155 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
156
157 void s390x_translate_init(void)
158 {
159 int i;
160
161 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
162 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
163 offsetof(CPUS390XState, psw.addr),
164 "psw_addr");
165 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
166 offsetof(CPUS390XState, psw.mask),
167 "psw_mask");
168
169 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
170 "cc_op");
171 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
172 "cc_src");
173 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
174 "cc_dst");
175 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
176 "cc_vr");
177
178 for (i = 0; i < 16; i++) {
179 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
180 regs[i] = tcg_global_mem_new(TCG_AREG0,
181 offsetof(CPUS390XState, regs[i]),
182 cpu_reg_names[i]);
183 }
184
185 for (i = 0; i < 16; i++) {
186 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
187 fregs[i] = tcg_global_mem_new(TCG_AREG0,
188 offsetof(CPUS390XState, fregs[i].d),
189 cpu_reg_names[i + 16]);
190 }
191 }
192
193 static TCGv_i64 load_reg(int reg)
194 {
195 TCGv_i64 r = tcg_temp_new_i64();
196 tcg_gen_mov_i64(r, regs[reg]);
197 return r;
198 }
199
200 static TCGv_i64 load_freg32_i64(int reg)
201 {
202 TCGv_i64 r = tcg_temp_new_i64();
203 tcg_gen_shri_i64(r, fregs[reg], 32);
204 return r;
205 }
206
207 static void store_reg(int reg, TCGv_i64 v)
208 {
209 tcg_gen_mov_i64(regs[reg], v);
210 }
211
212 static void store_freg(int reg, TCGv_i64 v)
213 {
214 tcg_gen_mov_i64(fregs[reg], v);
215 }
216
217 static void store_reg32_i64(int reg, TCGv_i64 v)
218 {
219 /* 32 bit register writes keep the upper half */
220 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
221 }
222
223 static void store_reg32h_i64(int reg, TCGv_i64 v)
224 {
225 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
226 }
227
228 static void store_freg32_i64(int reg, TCGv_i64 v)
229 {
230 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
231 }
232
233 static void return_low128(TCGv_i64 dest)
234 {
235 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
236 }
237
238 static void update_psw_addr(DisasContext *s)
239 {
240 /* psw.addr */
241 tcg_gen_movi_i64(psw_addr, s->pc);
242 }
243
244 static void update_cc_op(DisasContext *s)
245 {
246 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
247 tcg_gen_movi_i32(cc_op, s->cc_op);
248 }
249 }
250
251 static void potential_page_fault(DisasContext *s)
252 {
253 update_psw_addr(s);
254 update_cc_op(s);
255 }
256
257 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
258 {
259 return (uint64_t)cpu_lduw_code(env, pc);
260 }
261
262 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
263 {
264 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
265 }
266
267 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
268 {
269 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
270 }
271
272 static int get_mem_index(DisasContext *s)
273 {
274 switch (s->tb->flags & FLAG_MASK_ASC) {
275 case PSW_ASC_PRIMARY >> 32:
276 return 0;
277 case PSW_ASC_SECONDARY >> 32:
278 return 1;
279 case PSW_ASC_HOME >> 32:
280 return 2;
281 default:
282 tcg_abort();
283 break;
284 }
285 }
286
287 static void gen_exception(int excp)
288 {
289 TCGv_i32 tmp = tcg_const_i32(excp);
290 gen_helper_exception(cpu_env, tmp);
291 tcg_temp_free_i32(tmp);
292 }
293
294 static void gen_program_exception(DisasContext *s, int code)
295 {
296 TCGv_i32 tmp;
297
298 /* Remember what pgm exeption this was. */
299 tmp = tcg_const_i32(code);
300 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
301 tcg_temp_free_i32(tmp);
302
303 tmp = tcg_const_i32(s->next_pc - s->pc);
304 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
305 tcg_temp_free_i32(tmp);
306
307 /* Advance past instruction. */
308 s->pc = s->next_pc;
309 update_psw_addr(s);
310
311 /* Save off cc. */
312 update_cc_op(s);
313
314 /* Trigger exception. */
315 gen_exception(EXCP_PGM);
316 }
317
318 static inline void gen_illegal_opcode(DisasContext *s)
319 {
320 gen_program_exception(s, PGM_SPECIFICATION);
321 }
322
323 static inline void check_privileged(DisasContext *s)
324 {
325 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
326 gen_program_exception(s, PGM_PRIVILEGED);
327 }
328 }
329
330 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
331 {
332 TCGv_i64 tmp = tcg_temp_new_i64();
333 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
334
335 /* Note that d2 is limited to 20 bits, signed. If we crop negative
336 displacements early we create larger immedate addends. */
337
338 /* Note that addi optimizes the imm==0 case. */
339 if (b2 && x2) {
340 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
341 tcg_gen_addi_i64(tmp, tmp, d2);
342 } else if (b2) {
343 tcg_gen_addi_i64(tmp, regs[b2], d2);
344 } else if (x2) {
345 tcg_gen_addi_i64(tmp, regs[x2], d2);
346 } else {
347 if (need_31) {
348 d2 &= 0x7fffffff;
349 need_31 = false;
350 }
351 tcg_gen_movi_i64(tmp, d2);
352 }
353 if (need_31) {
354 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
355 }
356
357 return tmp;
358 }
359
360 static inline bool live_cc_data(DisasContext *s)
361 {
362 return (s->cc_op != CC_OP_DYNAMIC
363 && s->cc_op != CC_OP_STATIC
364 && s->cc_op > 3);
365 }
366
367 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
368 {
369 if (live_cc_data(s)) {
370 tcg_gen_discard_i64(cc_src);
371 tcg_gen_discard_i64(cc_dst);
372 tcg_gen_discard_i64(cc_vr);
373 }
374 s->cc_op = CC_OP_CONST0 + val;
375 }
376
377 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
378 {
379 if (live_cc_data(s)) {
380 tcg_gen_discard_i64(cc_src);
381 tcg_gen_discard_i64(cc_vr);
382 }
383 tcg_gen_mov_i64(cc_dst, dst);
384 s->cc_op = op;
385 }
386
387 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
388 TCGv_i64 dst)
389 {
390 if (live_cc_data(s)) {
391 tcg_gen_discard_i64(cc_vr);
392 }
393 tcg_gen_mov_i64(cc_src, src);
394 tcg_gen_mov_i64(cc_dst, dst);
395 s->cc_op = op;
396 }
397
398 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
399 TCGv_i64 dst, TCGv_i64 vr)
400 {
401 tcg_gen_mov_i64(cc_src, src);
402 tcg_gen_mov_i64(cc_dst, dst);
403 tcg_gen_mov_i64(cc_vr, vr);
404 s->cc_op = op;
405 }
406
407 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
408 {
409 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
410 }
411
412 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
413 {
414 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
415 }
416
417 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
418 {
419 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
420 }
421
422 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
423 {
424 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
425 }
426
427 /* CC value is in env->cc_op */
428 static void set_cc_static(DisasContext *s)
429 {
430 if (live_cc_data(s)) {
431 tcg_gen_discard_i64(cc_src);
432 tcg_gen_discard_i64(cc_dst);
433 tcg_gen_discard_i64(cc_vr);
434 }
435 s->cc_op = CC_OP_STATIC;
436 }
437
438 /* calculates cc into cc_op */
439 static void gen_op_calc_cc(DisasContext *s)
440 {
441 TCGv_i32 local_cc_op;
442 TCGv_i64 dummy;
443
444 TCGV_UNUSED_I32(local_cc_op);
445 TCGV_UNUSED_I64(dummy);
446 switch (s->cc_op) {
447 default:
448 dummy = tcg_const_i64(0);
449 /* FALLTHRU */
450 case CC_OP_ADD_64:
451 case CC_OP_ADDU_64:
452 case CC_OP_ADDC_64:
453 case CC_OP_SUB_64:
454 case CC_OP_SUBU_64:
455 case CC_OP_SUBB_64:
456 case CC_OP_ADD_32:
457 case CC_OP_ADDU_32:
458 case CC_OP_ADDC_32:
459 case CC_OP_SUB_32:
460 case CC_OP_SUBU_32:
461 case CC_OP_SUBB_32:
462 local_cc_op = tcg_const_i32(s->cc_op);
463 break;
464 case CC_OP_CONST0:
465 case CC_OP_CONST1:
466 case CC_OP_CONST2:
467 case CC_OP_CONST3:
468 case CC_OP_STATIC:
469 case CC_OP_DYNAMIC:
470 break;
471 }
472
473 switch (s->cc_op) {
474 case CC_OP_CONST0:
475 case CC_OP_CONST1:
476 case CC_OP_CONST2:
477 case CC_OP_CONST3:
478 /* s->cc_op is the cc value */
479 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
480 break;
481 case CC_OP_STATIC:
482 /* env->cc_op already is the cc value */
483 break;
484 case CC_OP_NZ:
485 case CC_OP_ABS_64:
486 case CC_OP_NABS_64:
487 case CC_OP_ABS_32:
488 case CC_OP_NABS_32:
489 case CC_OP_LTGT0_32:
490 case CC_OP_LTGT0_64:
491 case CC_OP_COMP_32:
492 case CC_OP_COMP_64:
493 case CC_OP_NZ_F32:
494 case CC_OP_NZ_F64:
495 case CC_OP_FLOGR:
496 /* 1 argument */
497 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
498 break;
499 case CC_OP_ICM:
500 case CC_OP_LTGT_32:
501 case CC_OP_LTGT_64:
502 case CC_OP_LTUGTU_32:
503 case CC_OP_LTUGTU_64:
504 case CC_OP_TM_32:
505 case CC_OP_TM_64:
506 case CC_OP_SLA_32:
507 case CC_OP_SLA_64:
508 case CC_OP_NZ_F128:
509 /* 2 arguments */
510 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
511 break;
512 case CC_OP_ADD_64:
513 case CC_OP_ADDU_64:
514 case CC_OP_ADDC_64:
515 case CC_OP_SUB_64:
516 case CC_OP_SUBU_64:
517 case CC_OP_SUBB_64:
518 case CC_OP_ADD_32:
519 case CC_OP_ADDU_32:
520 case CC_OP_ADDC_32:
521 case CC_OP_SUB_32:
522 case CC_OP_SUBU_32:
523 case CC_OP_SUBB_32:
524 /* 3 arguments */
525 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
526 break;
527 case CC_OP_DYNAMIC:
528 /* unknown operation - assume 3 arguments and cc_op in env */
529 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
530 break;
531 default:
532 tcg_abort();
533 }
534
535 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
536 tcg_temp_free_i32(local_cc_op);
537 }
538 if (!TCGV_IS_UNUSED_I64(dummy)) {
539 tcg_temp_free_i64(dummy);
540 }
541
542 /* We now have cc in cc_op as constant */
543 set_cc_static(s);
544 }
545
546 static int use_goto_tb(DisasContext *s, uint64_t dest)
547 {
548 /* NOTE: we handle the case where the TB spans two pages here */
549 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
550 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
551 && !s->singlestep_enabled
552 && !(s->tb->cflags & CF_LAST_IO));
553 }
554
555 static void account_noninline_branch(DisasContext *s, int cc_op)
556 {
557 #ifdef DEBUG_INLINE_BRANCHES
558 inline_branch_miss[cc_op]++;
559 #endif
560 }
561
562 static void account_inline_branch(DisasContext *s, int cc_op)
563 {
564 #ifdef DEBUG_INLINE_BRANCHES
565 inline_branch_hit[cc_op]++;
566 #endif
567 }
568
569 /* Table of mask values to comparison codes, given a comparison as input.
570 For such, CC=3 should not be possible. */
571 static const TCGCond ltgt_cond[16] = {
572 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
573 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
574 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
575 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
576 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
577 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
578 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
579 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
580 };
581
582 /* Table of mask values to comparison codes, given a logic op as input.
583 For such, only CC=0 and CC=1 should be possible. */
584 static const TCGCond nz_cond[16] = {
585 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
586 TCG_COND_NEVER, TCG_COND_NEVER,
587 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
588 TCG_COND_NE, TCG_COND_NE,
589 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
590 TCG_COND_EQ, TCG_COND_EQ,
591 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
592 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
593 };
594
595 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
596 details required to generate a TCG comparison. */
597 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
598 {
599 TCGCond cond;
600 enum cc_op old_cc_op = s->cc_op;
601
602 if (mask == 15 || mask == 0) {
603 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
604 c->u.s32.a = cc_op;
605 c->u.s32.b = cc_op;
606 c->g1 = c->g2 = true;
607 c->is_64 = false;
608 return;
609 }
610
611 /* Find the TCG condition for the mask + cc op. */
612 switch (old_cc_op) {
613 case CC_OP_LTGT0_32:
614 case CC_OP_LTGT0_64:
615 case CC_OP_LTGT_32:
616 case CC_OP_LTGT_64:
617 cond = ltgt_cond[mask];
618 if (cond == TCG_COND_NEVER) {
619 goto do_dynamic;
620 }
621 account_inline_branch(s, old_cc_op);
622 break;
623
624 case CC_OP_LTUGTU_32:
625 case CC_OP_LTUGTU_64:
626 cond = tcg_unsigned_cond(ltgt_cond[mask]);
627 if (cond == TCG_COND_NEVER) {
628 goto do_dynamic;
629 }
630 account_inline_branch(s, old_cc_op);
631 break;
632
633 case CC_OP_NZ:
634 cond = nz_cond[mask];
635 if (cond == TCG_COND_NEVER) {
636 goto do_dynamic;
637 }
638 account_inline_branch(s, old_cc_op);
639 break;
640
641 case CC_OP_TM_32:
642 case CC_OP_TM_64:
643 switch (mask) {
644 case 8:
645 cond = TCG_COND_EQ;
646 break;
647 case 4 | 2 | 1:
648 cond = TCG_COND_NE;
649 break;
650 default:
651 goto do_dynamic;
652 }
653 account_inline_branch(s, old_cc_op);
654 break;
655
656 case CC_OP_ICM:
657 switch (mask) {
658 case 8:
659 cond = TCG_COND_EQ;
660 break;
661 case 4 | 2 | 1:
662 case 4 | 2:
663 cond = TCG_COND_NE;
664 break;
665 default:
666 goto do_dynamic;
667 }
668 account_inline_branch(s, old_cc_op);
669 break;
670
671 case CC_OP_FLOGR:
672 switch (mask & 0xa) {
673 case 8: /* src == 0 -> no one bit found */
674 cond = TCG_COND_EQ;
675 break;
676 case 2: /* src != 0 -> one bit found */
677 cond = TCG_COND_NE;
678 break;
679 default:
680 goto do_dynamic;
681 }
682 account_inline_branch(s, old_cc_op);
683 break;
684
685 case CC_OP_ADDU_32:
686 case CC_OP_ADDU_64:
687 switch (mask) {
688 case 8 | 2: /* vr == 0 */
689 cond = TCG_COND_EQ;
690 break;
691 case 4 | 1: /* vr != 0 */
692 cond = TCG_COND_NE;
693 break;
694 case 8 | 4: /* no carry -> vr >= src */
695 cond = TCG_COND_GEU;
696 break;
697 case 2 | 1: /* carry -> vr < src */
698 cond = TCG_COND_LTU;
699 break;
700 default:
701 goto do_dynamic;
702 }
703 account_inline_branch(s, old_cc_op);
704 break;
705
706 case CC_OP_SUBU_32:
707 case CC_OP_SUBU_64:
708 /* Note that CC=0 is impossible; treat it as dont-care. */
709 switch (mask & 7) {
710 case 2: /* zero -> op1 == op2 */
711 cond = TCG_COND_EQ;
712 break;
713 case 4 | 1: /* !zero -> op1 != op2 */
714 cond = TCG_COND_NE;
715 break;
716 case 4: /* borrow (!carry) -> op1 < op2 */
717 cond = TCG_COND_LTU;
718 break;
719 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
720 cond = TCG_COND_GEU;
721 break;
722 default:
723 goto do_dynamic;
724 }
725 account_inline_branch(s, old_cc_op);
726 break;
727
728 default:
729 do_dynamic:
730 /* Calculate cc value. */
731 gen_op_calc_cc(s);
732 /* FALLTHRU */
733
734 case CC_OP_STATIC:
735 /* Jump based on CC. We'll load up the real cond below;
736 the assignment here merely avoids a compiler warning. */
737 account_noninline_branch(s, old_cc_op);
738 old_cc_op = CC_OP_STATIC;
739 cond = TCG_COND_NEVER;
740 break;
741 }
742
743 /* Load up the arguments of the comparison. */
744 c->is_64 = true;
745 c->g1 = c->g2 = false;
746 switch (old_cc_op) {
747 case CC_OP_LTGT0_32:
748 c->is_64 = false;
749 c->u.s32.a = tcg_temp_new_i32();
750 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
751 c->u.s32.b = tcg_const_i32(0);
752 break;
753 case CC_OP_LTGT_32:
754 case CC_OP_LTUGTU_32:
755 case CC_OP_SUBU_32:
756 c->is_64 = false;
757 c->u.s32.a = tcg_temp_new_i32();
758 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
759 c->u.s32.b = tcg_temp_new_i32();
760 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
761 break;
762
763 case CC_OP_LTGT0_64:
764 case CC_OP_NZ:
765 case CC_OP_FLOGR:
766 c->u.s64.a = cc_dst;
767 c->u.s64.b = tcg_const_i64(0);
768 c->g1 = true;
769 break;
770 case CC_OP_LTGT_64:
771 case CC_OP_LTUGTU_64:
772 case CC_OP_SUBU_64:
773 c->u.s64.a = cc_src;
774 c->u.s64.b = cc_dst;
775 c->g1 = c->g2 = true;
776 break;
777
778 case CC_OP_TM_32:
779 case CC_OP_TM_64:
780 case CC_OP_ICM:
781 c->u.s64.a = tcg_temp_new_i64();
782 c->u.s64.b = tcg_const_i64(0);
783 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
784 break;
785
786 case CC_OP_ADDU_32:
787 c->is_64 = false;
788 c->u.s32.a = tcg_temp_new_i32();
789 c->u.s32.b = tcg_temp_new_i32();
790 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
791 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
792 tcg_gen_movi_i32(c->u.s32.b, 0);
793 } else {
794 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
795 }
796 break;
797
798 case CC_OP_ADDU_64:
799 c->u.s64.a = cc_vr;
800 c->g1 = true;
801 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
802 c->u.s64.b = tcg_const_i64(0);
803 } else {
804 c->u.s64.b = cc_src;
805 c->g2 = true;
806 }
807 break;
808
809 case CC_OP_STATIC:
810 c->is_64 = false;
811 c->u.s32.a = cc_op;
812 c->g1 = true;
813 switch (mask) {
814 case 0x8 | 0x4 | 0x2: /* cc != 3 */
815 cond = TCG_COND_NE;
816 c->u.s32.b = tcg_const_i32(3);
817 break;
818 case 0x8 | 0x4 | 0x1: /* cc != 2 */
819 cond = TCG_COND_NE;
820 c->u.s32.b = tcg_const_i32(2);
821 break;
822 case 0x8 | 0x2 | 0x1: /* cc != 1 */
823 cond = TCG_COND_NE;
824 c->u.s32.b = tcg_const_i32(1);
825 break;
826 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
827 cond = TCG_COND_EQ;
828 c->g1 = false;
829 c->u.s32.a = tcg_temp_new_i32();
830 c->u.s32.b = tcg_const_i32(0);
831 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
832 break;
833 case 0x8 | 0x4: /* cc < 2 */
834 cond = TCG_COND_LTU;
835 c->u.s32.b = tcg_const_i32(2);
836 break;
837 case 0x8: /* cc == 0 */
838 cond = TCG_COND_EQ;
839 c->u.s32.b = tcg_const_i32(0);
840 break;
841 case 0x4 | 0x2 | 0x1: /* cc != 0 */
842 cond = TCG_COND_NE;
843 c->u.s32.b = tcg_const_i32(0);
844 break;
845 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
846 cond = TCG_COND_NE;
847 c->g1 = false;
848 c->u.s32.a = tcg_temp_new_i32();
849 c->u.s32.b = tcg_const_i32(0);
850 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
851 break;
852 case 0x4: /* cc == 1 */
853 cond = TCG_COND_EQ;
854 c->u.s32.b = tcg_const_i32(1);
855 break;
856 case 0x2 | 0x1: /* cc > 1 */
857 cond = TCG_COND_GTU;
858 c->u.s32.b = tcg_const_i32(1);
859 break;
860 case 0x2: /* cc == 2 */
861 cond = TCG_COND_EQ;
862 c->u.s32.b = tcg_const_i32(2);
863 break;
864 case 0x1: /* cc == 3 */
865 cond = TCG_COND_EQ;
866 c->u.s32.b = tcg_const_i32(3);
867 break;
868 default:
869 /* CC is masked by something else: (8 >> cc) & mask. */
870 cond = TCG_COND_NE;
871 c->g1 = false;
872 c->u.s32.a = tcg_const_i32(8);
873 c->u.s32.b = tcg_const_i32(0);
874 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
875 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
876 break;
877 }
878 break;
879
880 default:
881 abort();
882 }
883 c->cond = cond;
884 }
885
886 static void free_compare(DisasCompare *c)
887 {
888 if (!c->g1) {
889 if (c->is_64) {
890 tcg_temp_free_i64(c->u.s64.a);
891 } else {
892 tcg_temp_free_i32(c->u.s32.a);
893 }
894 }
895 if (!c->g2) {
896 if (c->is_64) {
897 tcg_temp_free_i64(c->u.s64.b);
898 } else {
899 tcg_temp_free_i32(c->u.s32.b);
900 }
901 }
902 }
903
904 /* ====================================================================== */
905 /* Define the insn format enumeration. */
906 #define F0(N) FMT_##N,
907 #define F1(N, X1) F0(N)
908 #define F2(N, X1, X2) F0(N)
909 #define F3(N, X1, X2, X3) F0(N)
910 #define F4(N, X1, X2, X3, X4) F0(N)
911 #define F5(N, X1, X2, X3, X4, X5) F0(N)
912
913 typedef enum {
914 #include "insn-format.def"
915 } DisasFormat;
916
917 #undef F0
918 #undef F1
919 #undef F2
920 #undef F3
921 #undef F4
922 #undef F5
923
924 /* Define a structure to hold the decoded fields. We'll store each inside
925 an array indexed by an enum. In order to conserve memory, we'll arrange
926 for fields that do not exist at the same time to overlap, thus the "C"
927 for compact. For checking purposes there is an "O" for original index
928 as well that will be applied to availability bitmaps. */
929
930 enum DisasFieldIndexO {
931 FLD_O_r1,
932 FLD_O_r2,
933 FLD_O_r3,
934 FLD_O_m1,
935 FLD_O_m3,
936 FLD_O_m4,
937 FLD_O_b1,
938 FLD_O_b2,
939 FLD_O_b4,
940 FLD_O_d1,
941 FLD_O_d2,
942 FLD_O_d4,
943 FLD_O_x2,
944 FLD_O_l1,
945 FLD_O_l2,
946 FLD_O_i1,
947 FLD_O_i2,
948 FLD_O_i3,
949 FLD_O_i4,
950 FLD_O_i5
951 };
952
953 enum DisasFieldIndexC {
954 FLD_C_r1 = 0,
955 FLD_C_m1 = 0,
956 FLD_C_b1 = 0,
957 FLD_C_i1 = 0,
958
959 FLD_C_r2 = 1,
960 FLD_C_b2 = 1,
961 FLD_C_i2 = 1,
962
963 FLD_C_r3 = 2,
964 FLD_C_m3 = 2,
965 FLD_C_i3 = 2,
966
967 FLD_C_m4 = 3,
968 FLD_C_b4 = 3,
969 FLD_C_i4 = 3,
970 FLD_C_l1 = 3,
971
972 FLD_C_i5 = 4,
973 FLD_C_d1 = 4,
974
975 FLD_C_d2 = 5,
976
977 FLD_C_d4 = 6,
978 FLD_C_x2 = 6,
979 FLD_C_l2 = 6,
980
981 NUM_C_FIELD = 7
982 };
983
984 struct DisasFields {
985 unsigned op:8;
986 unsigned op2:8;
987 unsigned presentC:16;
988 unsigned int presentO;
989 int c[NUM_C_FIELD];
990 };
991
992 /* This is the way fields are to be accessed out of DisasFields. */
993 #define have_field(S, F) have_field1((S), FLD_O_##F)
994 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
995
996 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
997 {
998 return (f->presentO >> c) & 1;
999 }
1000
1001 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1002 enum DisasFieldIndexC c)
1003 {
1004 assert(have_field1(f, o));
1005 return f->c[c];
1006 }
1007
1008 /* Describe the layout of each field in each format. */
1009 typedef struct DisasField {
1010 unsigned int beg:8;
1011 unsigned int size:8;
1012 unsigned int type:2;
1013 unsigned int indexC:6;
1014 enum DisasFieldIndexO indexO:8;
1015 } DisasField;
1016
1017 typedef struct DisasFormatInfo {
1018 DisasField op[NUM_C_FIELD];
1019 } DisasFormatInfo;
1020
1021 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1022 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1023 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1025 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1027 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1028 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1029 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1030 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1031 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1032 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1033 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1034 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1035
1036 #define F0(N) { { } },
1037 #define F1(N, X1) { { X1 } },
1038 #define F2(N, X1, X2) { { X1, X2 } },
1039 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1040 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1041 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1042
1043 static const DisasFormatInfo format_info[] = {
1044 #include "insn-format.def"
1045 };
1046
1047 #undef F0
1048 #undef F1
1049 #undef F2
1050 #undef F3
1051 #undef F4
1052 #undef F5
1053 #undef R
1054 #undef M
1055 #undef BD
1056 #undef BXD
1057 #undef BDL
1058 #undef BXDL
1059 #undef I
1060 #undef L
1061
1062 /* Generally, we'll extract operands into this structures, operate upon
1063 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1064 of routines below for more details. */
1065 typedef struct {
1066 bool g_out, g_out2, g_in1, g_in2;
1067 TCGv_i64 out, out2, in1, in2;
1068 TCGv_i64 addr1;
1069 } DisasOps;
1070
1071 /* Instructions can place constraints on their operands, raising specification
1072 exceptions if they are violated. To make this easy to automate, each "in1",
1073 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1074 of the following, or 0. To make this easy to document, we'll put the
1075 SPEC_<name> defines next to <name>. */
1076
1077 #define SPEC_r1_even 1
1078 #define SPEC_r2_even 2
1079 #define SPEC_r3_even 4
1080 #define SPEC_r1_f128 8
1081 #define SPEC_r2_f128 16
1082
1083 /* Return values from translate_one, indicating the state of the TB. */
1084 typedef enum {
1085 /* Continue the TB. */
1086 NO_EXIT,
1087 /* We have emitted one or more goto_tb. No fixup required. */
1088 EXIT_GOTO_TB,
1089 /* We are not using a goto_tb (for whatever reason), but have updated
1090 the PC (for whatever reason), so there's no need to do it again on
1091 exiting the TB. */
1092 EXIT_PC_UPDATED,
1093 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1094 updated the PC for the next instruction to be executed. */
1095 EXIT_PC_STALE,
1096 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1097 No following code will be executed. */
1098 EXIT_NORETURN,
1099 } ExitStatus;
1100
1101 typedef enum DisasFacility {
1102 FAC_Z, /* zarch (default) */
1103 FAC_CASS, /* compare and swap and store */
1104 FAC_CASS2, /* compare and swap and store 2*/
1105 FAC_DFP, /* decimal floating point */
1106 FAC_DFPR, /* decimal floating point rounding */
1107 FAC_DO, /* distinct operands */
1108 FAC_EE, /* execute extensions */
1109 FAC_EI, /* extended immediate */
1110 FAC_FPE, /* floating point extension */
1111 FAC_FPSSH, /* floating point support sign handling */
1112 FAC_FPRGR, /* FPR-GR transfer */
1113 FAC_GIE, /* general instructions extension */
1114 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1115 FAC_HW, /* high-word */
1116 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1117 FAC_LOC, /* load/store on condition */
1118 FAC_LD, /* long displacement */
1119 FAC_PC, /* population count */
1120 FAC_SCF, /* store clock fast */
1121 FAC_SFLE, /* store facility list extended */
1122 } DisasFacility;
1123
1124 struct DisasInsn {
1125 unsigned opc:16;
1126 DisasFormat fmt:8;
1127 DisasFacility fac:8;
1128 unsigned spec:8;
1129
1130 const char *name;
1131
1132 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1133 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1134 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1135 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1136 void (*help_cout)(DisasContext *, DisasOps *);
1137 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1138
1139 uint64_t data;
1140 };
1141
1142 /* ====================================================================== */
1143 /* Miscellaneous helpers, used by several operations. */
1144
1145 static void help_l2_shift(DisasContext *s, DisasFields *f,
1146 DisasOps *o, int mask)
1147 {
1148 int b2 = get_field(f, b2);
1149 int d2 = get_field(f, d2);
1150
1151 if (b2 == 0) {
1152 o->in2 = tcg_const_i64(d2 & mask);
1153 } else {
1154 o->in2 = get_address(s, 0, b2, d2);
1155 tcg_gen_andi_i64(o->in2, o->in2, mask);
1156 }
1157 }
1158
1159 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1160 {
1161 if (dest == s->next_pc) {
1162 return NO_EXIT;
1163 }
1164 if (use_goto_tb(s, dest)) {
1165 update_cc_op(s);
1166 tcg_gen_goto_tb(0);
1167 tcg_gen_movi_i64(psw_addr, dest);
1168 tcg_gen_exit_tb((uintptr_t)s->tb);
1169 return EXIT_GOTO_TB;
1170 } else {
1171 tcg_gen_movi_i64(psw_addr, dest);
1172 return EXIT_PC_UPDATED;
1173 }
1174 }
1175
1176 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1177 bool is_imm, int imm, TCGv_i64 cdest)
1178 {
1179 ExitStatus ret;
1180 uint64_t dest = s->pc + 2 * imm;
1181 int lab;
1182
1183 /* Take care of the special cases first. */
1184 if (c->cond == TCG_COND_NEVER) {
1185 ret = NO_EXIT;
1186 goto egress;
1187 }
1188 if (is_imm) {
1189 if (dest == s->next_pc) {
1190 /* Branch to next. */
1191 ret = NO_EXIT;
1192 goto egress;
1193 }
1194 if (c->cond == TCG_COND_ALWAYS) {
1195 ret = help_goto_direct(s, dest);
1196 goto egress;
1197 }
1198 } else {
1199 if (TCGV_IS_UNUSED_I64(cdest)) {
1200 /* E.g. bcr %r0 -> no branch. */
1201 ret = NO_EXIT;
1202 goto egress;
1203 }
1204 if (c->cond == TCG_COND_ALWAYS) {
1205 tcg_gen_mov_i64(psw_addr, cdest);
1206 ret = EXIT_PC_UPDATED;
1207 goto egress;
1208 }
1209 }
1210
1211 if (use_goto_tb(s, s->next_pc)) {
1212 if (is_imm && use_goto_tb(s, dest)) {
1213 /* Both exits can use goto_tb. */
1214 update_cc_op(s);
1215
1216 lab = gen_new_label();
1217 if (c->is_64) {
1218 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1219 } else {
1220 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1221 }
1222
1223 /* Branch not taken. */
1224 tcg_gen_goto_tb(0);
1225 tcg_gen_movi_i64(psw_addr, s->next_pc);
1226 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1227
1228 /* Branch taken. */
1229 gen_set_label(lab);
1230 tcg_gen_goto_tb(1);
1231 tcg_gen_movi_i64(psw_addr, dest);
1232 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1233
1234 ret = EXIT_GOTO_TB;
1235 } else {
1236 /* Fallthru can use goto_tb, but taken branch cannot. */
1237 /* Store taken branch destination before the brcond. This
1238 avoids having to allocate a new local temp to hold it.
1239 We'll overwrite this in the not taken case anyway. */
1240 if (!is_imm) {
1241 tcg_gen_mov_i64(psw_addr, cdest);
1242 }
1243
1244 lab = gen_new_label();
1245 if (c->is_64) {
1246 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1247 } else {
1248 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1249 }
1250
1251 /* Branch not taken. */
1252 update_cc_op(s);
1253 tcg_gen_goto_tb(0);
1254 tcg_gen_movi_i64(psw_addr, s->next_pc);
1255 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1256
1257 gen_set_label(lab);
1258 if (is_imm) {
1259 tcg_gen_movi_i64(psw_addr, dest);
1260 }
1261 ret = EXIT_PC_UPDATED;
1262 }
1263 } else {
1264 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1265 Most commonly we're single-stepping or some other condition that
1266 disables all use of goto_tb. Just update the PC and exit. */
1267
1268 TCGv_i64 next = tcg_const_i64(s->next_pc);
1269 if (is_imm) {
1270 cdest = tcg_const_i64(dest);
1271 }
1272
1273 if (c->is_64) {
1274 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1275 cdest, next);
1276 } else {
1277 TCGv_i32 t0 = tcg_temp_new_i32();
1278 TCGv_i64 t1 = tcg_temp_new_i64();
1279 TCGv_i64 z = tcg_const_i64(0);
1280 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1281 tcg_gen_extu_i32_i64(t1, t0);
1282 tcg_temp_free_i32(t0);
1283 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1284 tcg_temp_free_i64(t1);
1285 tcg_temp_free_i64(z);
1286 }
1287
1288 if (is_imm) {
1289 tcg_temp_free_i64(cdest);
1290 }
1291 tcg_temp_free_i64(next);
1292
1293 ret = EXIT_PC_UPDATED;
1294 }
1295
1296 egress:
1297 free_compare(c);
1298 return ret;
1299 }
1300
1301 /* ====================================================================== */
1302 /* The operations. These perform the bulk of the work for any insn,
1303 usually after the operands have been loaded and output initialized. */
1304
1305 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1306 {
1307 gen_helper_abs_i64(o->out, o->in2);
1308 return NO_EXIT;
1309 }
1310
1311 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1312 {
1313 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1314 return NO_EXIT;
1315 }
1316
1317 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1318 {
1319 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1320 return NO_EXIT;
1321 }
1322
1323 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1324 {
1325 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1326 tcg_gen_mov_i64(o->out2, o->in2);
1327 return NO_EXIT;
1328 }
1329
1330 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1331 {
1332 tcg_gen_add_i64(o->out, o->in1, o->in2);
1333 return NO_EXIT;
1334 }
1335
1336 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1337 {
1338 DisasCompare cmp;
1339 TCGv_i64 carry;
1340
1341 tcg_gen_add_i64(o->out, o->in1, o->in2);
1342
1343 /* The carry flag is the msb of CC, therefore the branch mask that would
1344 create that comparison is 3. Feeding the generated comparison to
1345 setcond produces the carry flag that we desire. */
1346 disas_jcc(s, &cmp, 3);
1347 carry = tcg_temp_new_i64();
1348 if (cmp.is_64) {
1349 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1350 } else {
1351 TCGv_i32 t = tcg_temp_new_i32();
1352 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1353 tcg_gen_extu_i32_i64(carry, t);
1354 tcg_temp_free_i32(t);
1355 }
1356 free_compare(&cmp);
1357
1358 tcg_gen_add_i64(o->out, o->out, carry);
1359 tcg_temp_free_i64(carry);
1360 return NO_EXIT;
1361 }
1362
1363 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1364 {
1365 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1366 return NO_EXIT;
1367 }
1368
1369 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1370 {
1371 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1372 return NO_EXIT;
1373 }
1374
1375 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1376 {
1377 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1378 return_low128(o->out2);
1379 return NO_EXIT;
1380 }
1381
1382 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1383 {
1384 tcg_gen_and_i64(o->out, o->in1, o->in2);
1385 return NO_EXIT;
1386 }
1387
1388 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1389 {
1390 int shift = s->insn->data & 0xff;
1391 int size = s->insn->data >> 8;
1392 uint64_t mask = ((1ull << size) - 1) << shift;
1393
1394 assert(!o->g_in2);
1395 tcg_gen_shli_i64(o->in2, o->in2, shift);
1396 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1397 tcg_gen_and_i64(o->out, o->in1, o->in2);
1398
1399 /* Produce the CC from only the bits manipulated. */
1400 tcg_gen_andi_i64(cc_dst, o->out, mask);
1401 set_cc_nz_u64(s, cc_dst);
1402 return NO_EXIT;
1403 }
1404
1405 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1406 {
1407 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1408 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1409 tcg_gen_mov_i64(psw_addr, o->in2);
1410 return EXIT_PC_UPDATED;
1411 } else {
1412 return NO_EXIT;
1413 }
1414 }
1415
1416 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1417 {
1418 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1419 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1420 }
1421
1422 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1423 {
1424 int m1 = get_field(s->fields, m1);
1425 bool is_imm = have_field(s->fields, i2);
1426 int imm = is_imm ? get_field(s->fields, i2) : 0;
1427 DisasCompare c;
1428
1429 disas_jcc(s, &c, m1);
1430 return help_branch(s, &c, is_imm, imm, o->in2);
1431 }
1432
1433 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1434 {
1435 int r1 = get_field(s->fields, r1);
1436 bool is_imm = have_field(s->fields, i2);
1437 int imm = is_imm ? get_field(s->fields, i2) : 0;
1438 DisasCompare c;
1439 TCGv_i64 t;
1440
1441 c.cond = TCG_COND_NE;
1442 c.is_64 = false;
1443 c.g1 = false;
1444 c.g2 = false;
1445
1446 t = tcg_temp_new_i64();
1447 tcg_gen_subi_i64(t, regs[r1], 1);
1448 store_reg32_i64(r1, t);
1449 c.u.s32.a = tcg_temp_new_i32();
1450 c.u.s32.b = tcg_const_i32(0);
1451 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1452 tcg_temp_free_i64(t);
1453
1454 return help_branch(s, &c, is_imm, imm, o->in2);
1455 }
1456
1457 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1458 {
1459 int r1 = get_field(s->fields, r1);
1460 bool is_imm = have_field(s->fields, i2);
1461 int imm = is_imm ? get_field(s->fields, i2) : 0;
1462 DisasCompare c;
1463
1464 c.cond = TCG_COND_NE;
1465 c.is_64 = true;
1466 c.g1 = true;
1467 c.g2 = false;
1468
1469 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1470 c.u.s64.a = regs[r1];
1471 c.u.s64.b = tcg_const_i64(0);
1472
1473 return help_branch(s, &c, is_imm, imm, o->in2);
1474 }
1475
1476 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1477 {
1478 int r1 = get_field(s->fields, r1);
1479 int r3 = get_field(s->fields, r3);
1480 bool is_imm = have_field(s->fields, i2);
1481 int imm = is_imm ? get_field(s->fields, i2) : 0;
1482 DisasCompare c;
1483 TCGv_i64 t;
1484
1485 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1486 c.is_64 = false;
1487 c.g1 = false;
1488 c.g2 = false;
1489
1490 t = tcg_temp_new_i64();
1491 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1492 c.u.s32.a = tcg_temp_new_i32();
1493 c.u.s32.b = tcg_temp_new_i32();
1494 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1495 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1496 store_reg32_i64(r1, t);
1497 tcg_temp_free_i64(t);
1498
1499 return help_branch(s, &c, is_imm, imm, o->in2);
1500 }
1501
1502 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1503 {
1504 int r1 = get_field(s->fields, r1);
1505 int r3 = get_field(s->fields, r3);
1506 bool is_imm = have_field(s->fields, i2);
1507 int imm = is_imm ? get_field(s->fields, i2) : 0;
1508 DisasCompare c;
1509
1510 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1511 c.is_64 = true;
1512
1513 if (r1 == (r3 | 1)) {
1514 c.u.s64.b = load_reg(r3 | 1);
1515 c.g2 = false;
1516 } else {
1517 c.u.s64.b = regs[r3 | 1];
1518 c.g2 = true;
1519 }
1520
1521 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1522 c.u.s64.a = regs[r1];
1523 c.g1 = true;
1524
1525 return help_branch(s, &c, is_imm, imm, o->in2);
1526 }
1527
1528 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1529 {
1530 int imm, m3 = get_field(s->fields, m3);
1531 bool is_imm;
1532 DisasCompare c;
1533
1534 c.cond = ltgt_cond[m3];
1535 if (s->insn->data) {
1536 c.cond = tcg_unsigned_cond(c.cond);
1537 }
1538 c.is_64 = c.g1 = c.g2 = true;
1539 c.u.s64.a = o->in1;
1540 c.u.s64.b = o->in2;
1541
1542 is_imm = have_field(s->fields, i4);
1543 if (is_imm) {
1544 imm = get_field(s->fields, i4);
1545 } else {
1546 imm = 0;
1547 o->out = get_address(s, 0, get_field(s->fields, b4),
1548 get_field(s->fields, d4));
1549 }
1550
1551 return help_branch(s, &c, is_imm, imm, o->out);
1552 }
1553
1554 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1555 {
1556 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1557 set_cc_static(s);
1558 return NO_EXIT;
1559 }
1560
1561 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1562 {
1563 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1564 set_cc_static(s);
1565 return NO_EXIT;
1566 }
1567
1568 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1569 {
1570 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1571 set_cc_static(s);
1572 return NO_EXIT;
1573 }
1574
1575 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1576 {
1577 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1578 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1579 tcg_temp_free_i32(m3);
1580 gen_set_cc_nz_f32(s, o->in2);
1581 return NO_EXIT;
1582 }
1583
1584 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1585 {
1586 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1587 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1588 tcg_temp_free_i32(m3);
1589 gen_set_cc_nz_f64(s, o->in2);
1590 return NO_EXIT;
1591 }
1592
1593 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1594 {
1595 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1596 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1597 tcg_temp_free_i32(m3);
1598 gen_set_cc_nz_f128(s, o->in1, o->in2);
1599 return NO_EXIT;
1600 }
1601
1602 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1603 {
1604 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1605 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1606 tcg_temp_free_i32(m3);
1607 gen_set_cc_nz_f32(s, o->in2);
1608 return NO_EXIT;
1609 }
1610
1611 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1612 {
1613 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1614 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1615 tcg_temp_free_i32(m3);
1616 gen_set_cc_nz_f64(s, o->in2);
1617 return NO_EXIT;
1618 }
1619
1620 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1621 {
1622 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1623 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1624 tcg_temp_free_i32(m3);
1625 gen_set_cc_nz_f128(s, o->in1, o->in2);
1626 return NO_EXIT;
1627 }
1628
1629 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1630 {
1631 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1632 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1633 tcg_temp_free_i32(m3);
1634 gen_set_cc_nz_f32(s, o->in2);
1635 return NO_EXIT;
1636 }
1637
1638 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1639 {
1640 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1641 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1642 tcg_temp_free_i32(m3);
1643 gen_set_cc_nz_f64(s, o->in2);
1644 return NO_EXIT;
1645 }
1646
1647 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1648 {
1649 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1650 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1651 tcg_temp_free_i32(m3);
1652 gen_set_cc_nz_f128(s, o->in1, o->in2);
1653 return NO_EXIT;
1654 }
1655
1656 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1657 {
1658 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1659 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1660 tcg_temp_free_i32(m3);
1661 gen_set_cc_nz_f32(s, o->in2);
1662 return NO_EXIT;
1663 }
1664
1665 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1666 {
1667 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1668 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1669 tcg_temp_free_i32(m3);
1670 gen_set_cc_nz_f64(s, o->in2);
1671 return NO_EXIT;
1672 }
1673
1674 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1675 {
1676 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1677 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1678 tcg_temp_free_i32(m3);
1679 gen_set_cc_nz_f128(s, o->in1, o->in2);
1680 return NO_EXIT;
1681 }
1682
1683 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1684 {
1685 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1686 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1687 tcg_temp_free_i32(m3);
1688 return NO_EXIT;
1689 }
1690
1691 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1692 {
1693 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1694 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1695 tcg_temp_free_i32(m3);
1696 return NO_EXIT;
1697 }
1698
1699 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1700 {
1701 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1702 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1703 tcg_temp_free_i32(m3);
1704 return_low128(o->out2);
1705 return NO_EXIT;
1706 }
1707
1708 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1709 {
1710 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1711 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1712 tcg_temp_free_i32(m3);
1713 return NO_EXIT;
1714 }
1715
1716 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1717 {
1718 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1719 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1720 tcg_temp_free_i32(m3);
1721 return NO_EXIT;
1722 }
1723
1724 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1725 {
1726 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1727 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1728 tcg_temp_free_i32(m3);
1729 return_low128(o->out2);
1730 return NO_EXIT;
1731 }
1732
1733 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1734 {
1735 int r2 = get_field(s->fields, r2);
1736 TCGv_i64 len = tcg_temp_new_i64();
1737
1738 potential_page_fault(s);
1739 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1740 set_cc_static(s);
1741 return_low128(o->out);
1742
1743 tcg_gen_add_i64(regs[r2], regs[r2], len);
1744 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1745 tcg_temp_free_i64(len);
1746
1747 return NO_EXIT;
1748 }
1749
1750 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1751 {
1752 int l = get_field(s->fields, l1);
1753 TCGv_i32 vl;
1754
1755 switch (l + 1) {
1756 case 1:
1757 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1758 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1759 break;
1760 case 2:
1761 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1762 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1763 break;
1764 case 4:
1765 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1766 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1767 break;
1768 case 8:
1769 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1770 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1771 break;
1772 default:
1773 potential_page_fault(s);
1774 vl = tcg_const_i32(l);
1775 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1776 tcg_temp_free_i32(vl);
1777 set_cc_static(s);
1778 return NO_EXIT;
1779 }
1780 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1781 return NO_EXIT;
1782 }
1783
1784 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1785 {
1786 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1787 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1788 potential_page_fault(s);
1789 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1790 tcg_temp_free_i32(r1);
1791 tcg_temp_free_i32(r3);
1792 set_cc_static(s);
1793 return NO_EXIT;
1794 }
1795
1796 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1797 {
1798 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1799 TCGv_i32 t1 = tcg_temp_new_i32();
1800 tcg_gen_trunc_i64_i32(t1, o->in1);
1801 potential_page_fault(s);
1802 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1803 set_cc_static(s);
1804 tcg_temp_free_i32(t1);
1805 tcg_temp_free_i32(m3);
1806 return NO_EXIT;
1807 }
1808
1809 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1810 {
1811 potential_page_fault(s);
1812 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1813 set_cc_static(s);
1814 return_low128(o->in2);
1815 return NO_EXIT;
1816 }
1817
1818 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1819 {
1820 TCGv_i64 t = tcg_temp_new_i64();
1821 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1822 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1823 tcg_gen_or_i64(o->out, o->out, t);
1824 tcg_temp_free_i64(t);
1825 return NO_EXIT;
1826 }
1827
1828 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1829 {
1830 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1831 int d2 = get_field(s->fields, d2);
1832 int b2 = get_field(s->fields, b2);
1833 int is_64 = s->insn->data;
1834 TCGv_i64 addr, mem, cc, z;
1835
1836 /* Note that in1 = R3 (new value) and
1837 in2 = (zero-extended) R1 (expected value). */
1838
1839 /* Load the memory into the (temporary) output. While the PoO only talks
1840 about moving the memory to R1 on inequality, if we include equality it
1841 means that R1 is equal to the memory in all conditions. */
1842 addr = get_address(s, 0, b2, d2);
1843 if (is_64) {
1844 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1845 } else {
1846 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1847 }
1848
1849 /* Are the memory and expected values (un)equal? Note that this setcond
1850 produces the output CC value, thus the NE sense of the test. */
1851 cc = tcg_temp_new_i64();
1852 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1853
1854 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1855 Recall that we are allowed to unconditionally issue the store (and
1856 thus any possible write trap), so (re-)store the original contents
1857 of MEM in case of inequality. */
1858 z = tcg_const_i64(0);
1859 mem = tcg_temp_new_i64();
1860 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1861 if (is_64) {
1862 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1863 } else {
1864 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1865 }
1866 tcg_temp_free_i64(z);
1867 tcg_temp_free_i64(mem);
1868 tcg_temp_free_i64(addr);
1869
1870 /* Store CC back to cc_op. Wait until after the store so that any
1871 exception gets the old cc_op value. */
1872 tcg_gen_trunc_i64_i32(cc_op, cc);
1873 tcg_temp_free_i64(cc);
1874 set_cc_static(s);
1875 return NO_EXIT;
1876 }
1877
1878 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1879 {
1880 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1881 int r1 = get_field(s->fields, r1);
1882 int r3 = get_field(s->fields, r3);
1883 int d2 = get_field(s->fields, d2);
1884 int b2 = get_field(s->fields, b2);
1885 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1886
1887 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1888
1889 addrh = get_address(s, 0, b2, d2);
1890 addrl = get_address(s, 0, b2, d2 + 8);
1891 outh = tcg_temp_new_i64();
1892 outl = tcg_temp_new_i64();
1893
1894 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1895 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1896
1897 /* Fold the double-word compare with arithmetic. */
1898 cc = tcg_temp_new_i64();
1899 z = tcg_temp_new_i64();
1900 tcg_gen_xor_i64(cc, outh, regs[r1]);
1901 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1902 tcg_gen_or_i64(cc, cc, z);
1903 tcg_gen_movi_i64(z, 0);
1904 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1905
1906 memh = tcg_temp_new_i64();
1907 meml = tcg_temp_new_i64();
1908 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1909 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1910 tcg_temp_free_i64(z);
1911
1912 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1913 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1914 tcg_temp_free_i64(memh);
1915 tcg_temp_free_i64(meml);
1916 tcg_temp_free_i64(addrh);
1917 tcg_temp_free_i64(addrl);
1918
1919 /* Save back state now that we've passed all exceptions. */
1920 tcg_gen_mov_i64(regs[r1], outh);
1921 tcg_gen_mov_i64(regs[r1 + 1], outl);
1922 tcg_gen_trunc_i64_i32(cc_op, cc);
1923 tcg_temp_free_i64(outh);
1924 tcg_temp_free_i64(outl);
1925 tcg_temp_free_i64(cc);
1926 set_cc_static(s);
1927 return NO_EXIT;
1928 }
1929
1930 #ifndef CONFIG_USER_ONLY
1931 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1932 {
1933 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1934 check_privileged(s);
1935 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1936 tcg_temp_free_i32(r1);
1937 set_cc_static(s);
1938 return NO_EXIT;
1939 }
1940 #endif
1941
1942 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1943 {
1944 TCGv_i64 t1 = tcg_temp_new_i64();
1945 TCGv_i32 t2 = tcg_temp_new_i32();
1946 tcg_gen_trunc_i64_i32(t2, o->in1);
1947 gen_helper_cvd(t1, t2);
1948 tcg_temp_free_i32(t2);
1949 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1950 tcg_temp_free_i64(t1);
1951 return NO_EXIT;
1952 }
1953
1954 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1955 {
1956 int m3 = get_field(s->fields, m3);
1957 int lab = gen_new_label();
1958 TCGv_i32 t;
1959 TCGCond c;
1960
1961 c = tcg_invert_cond(ltgt_cond[m3]);
1962 if (s->insn->data) {
1963 c = tcg_unsigned_cond(c);
1964 }
1965 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1966
1967 /* Set DXC to 0xff. */
1968 t = tcg_temp_new_i32();
1969 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1970 tcg_gen_ori_i32(t, t, 0xff00);
1971 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1972 tcg_temp_free_i32(t);
1973
1974 /* Trap. */
1975 gen_program_exception(s, PGM_DATA);
1976
1977 gen_set_label(lab);
1978 return NO_EXIT;
1979 }
1980
1981 #ifndef CONFIG_USER_ONLY
1982 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1983 {
1984 TCGv_i32 tmp;
1985
1986 check_privileged(s);
1987 potential_page_fault(s);
1988
1989 /* We pretend the format is RX_a so that D2 is the field we want. */
1990 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1991 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1992 tcg_temp_free_i32(tmp);
1993 return NO_EXIT;
1994 }
1995 #endif
1996
1997 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1998 {
1999 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2000 return_low128(o->out);
2001 return NO_EXIT;
2002 }
2003
2004 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2005 {
2006 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2007 return_low128(o->out);
2008 return NO_EXIT;
2009 }
2010
2011 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2012 {
2013 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2014 return_low128(o->out);
2015 return NO_EXIT;
2016 }
2017
2018 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2019 {
2020 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2021 return_low128(o->out);
2022 return NO_EXIT;
2023 }
2024
2025 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2026 {
2027 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2028 return NO_EXIT;
2029 }
2030
2031 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2032 {
2033 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2034 return NO_EXIT;
2035 }
2036
2037 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2038 {
2039 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2040 return_low128(o->out2);
2041 return NO_EXIT;
2042 }
2043
2044 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2045 {
2046 int r2 = get_field(s->fields, r2);
2047 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2048 return NO_EXIT;
2049 }
2050
2051 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2052 {
2053 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2054 return NO_EXIT;
2055 }
2056
2057 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2058 {
2059 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2060 tb->flags, (ab)use the tb->cs_base field as the address of
2061 the template in memory, and grab 8 bits of tb->flags/cflags for
2062 the contents of the register. We would then recognize all this
2063 in gen_intermediate_code_internal, generating code for exactly
2064 one instruction. This new TB then gets executed normally.
2065
2066 On the other hand, this seems to be mostly used for modifying
2067 MVC inside of memcpy, which needs a helper call anyway. So
2068 perhaps this doesn't bear thinking about any further. */
2069
2070 TCGv_i64 tmp;
2071
2072 update_psw_addr(s);
2073 update_cc_op(s);
2074
2075 tmp = tcg_const_i64(s->next_pc);
2076 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2077 tcg_temp_free_i64(tmp);
2078
2079 set_cc_static(s);
2080 return NO_EXIT;
2081 }
2082
2083 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2084 {
2085 /* We'll use the original input for cc computation, since we get to
2086 compare that against 0, which ought to be better than comparing
2087 the real output against 64. It also lets cc_dst be a convenient
2088 temporary during our computation. */
2089 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2090
2091 /* R1 = IN ? CLZ(IN) : 64. */
2092 gen_helper_clz(o->out, o->in2);
2093
2094 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2095 value by 64, which is undefined. But since the shift is 64 iff the
2096 input is zero, we still get the correct result after and'ing. */
2097 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2098 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2099 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2100 return NO_EXIT;
2101 }
2102
2103 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2104 {
2105 int m3 = get_field(s->fields, m3);
2106 int pos, len, base = s->insn->data;
2107 TCGv_i64 tmp = tcg_temp_new_i64();
2108 uint64_t ccm;
2109
2110 switch (m3) {
2111 case 0xf:
2112 /* Effectively a 32-bit load. */
2113 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2114 len = 32;
2115 goto one_insert;
2116
2117 case 0xc:
2118 case 0x6:
2119 case 0x3:
2120 /* Effectively a 16-bit load. */
2121 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2122 len = 16;
2123 goto one_insert;
2124
2125 case 0x8:
2126 case 0x4:
2127 case 0x2:
2128 case 0x1:
2129 /* Effectively an 8-bit load. */
2130 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2131 len = 8;
2132 goto one_insert;
2133
2134 one_insert:
2135 pos = base + ctz32(m3) * 8;
2136 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2137 ccm = ((1ull << len) - 1) << pos;
2138 break;
2139
2140 default:
2141 /* This is going to be a sequence of loads and inserts. */
2142 pos = base + 32 - 8;
2143 ccm = 0;
2144 while (m3) {
2145 if (m3 & 0x8) {
2146 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2147 tcg_gen_addi_i64(o->in2, o->in2, 1);
2148 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2149 ccm |= 0xff << pos;
2150 }
2151 m3 = (m3 << 1) & 0xf;
2152 pos -= 8;
2153 }
2154 break;
2155 }
2156
2157 tcg_gen_movi_i64(tmp, ccm);
2158 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2159 tcg_temp_free_i64(tmp);
2160 return NO_EXIT;
2161 }
2162
2163 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2164 {
2165 int shift = s->insn->data & 0xff;
2166 int size = s->insn->data >> 8;
2167 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2168 return NO_EXIT;
2169 }
2170
2171 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2172 {
2173 TCGv_i64 t1;
2174
2175 gen_op_calc_cc(s);
2176 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2177
2178 t1 = tcg_temp_new_i64();
2179 tcg_gen_shli_i64(t1, psw_mask, 20);
2180 tcg_gen_shri_i64(t1, t1, 36);
2181 tcg_gen_or_i64(o->out, o->out, t1);
2182
2183 tcg_gen_extu_i32_i64(t1, cc_op);
2184 tcg_gen_shli_i64(t1, t1, 28);
2185 tcg_gen_or_i64(o->out, o->out, t1);
2186 tcg_temp_free_i64(t1);
2187 return NO_EXIT;
2188 }
2189
2190 #ifndef CONFIG_USER_ONLY
2191 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2192 {
2193 check_privileged(s);
2194 gen_helper_ipte(cpu_env, o->in1, o->in2);
2195 return NO_EXIT;
2196 }
2197
2198 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2199 {
2200 check_privileged(s);
2201 gen_helper_iske(o->out, cpu_env, o->in2);
2202 return NO_EXIT;
2203 }
2204 #endif
2205
2206 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2207 {
2208 gen_helper_ldeb(o->out, cpu_env, o->in2);
2209 return NO_EXIT;
2210 }
2211
2212 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2213 {
2214 gen_helper_ledb(o->out, cpu_env, o->in2);
2215 return NO_EXIT;
2216 }
2217
2218 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2219 {
2220 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2221 return NO_EXIT;
2222 }
2223
2224 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2225 {
2226 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2227 return NO_EXIT;
2228 }
2229
2230 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2231 {
2232 gen_helper_lxdb(o->out, cpu_env, o->in2);
2233 return_low128(o->out2);
2234 return NO_EXIT;
2235 }
2236
2237 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2238 {
2239 gen_helper_lxeb(o->out, cpu_env, o->in2);
2240 return_low128(o->out2);
2241 return NO_EXIT;
2242 }
2243
2244 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2245 {
2246 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2247 return NO_EXIT;
2248 }
2249
2250 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2251 {
2252 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2253 return NO_EXIT;
2254 }
2255
2256 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2257 {
2258 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2259 return NO_EXIT;
2260 }
2261
2262 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2263 {
2264 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2265 return NO_EXIT;
2266 }
2267
2268 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2269 {
2270 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2271 return NO_EXIT;
2272 }
2273
2274 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2275 {
2276 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2277 return NO_EXIT;
2278 }
2279
2280 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2281 {
2282 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2283 return NO_EXIT;
2284 }
2285
2286 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2287 {
2288 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2289 return NO_EXIT;
2290 }
2291
2292 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2293 {
2294 DisasCompare c;
2295
2296 disas_jcc(s, &c, get_field(s->fields, m3));
2297
2298 if (c.is_64) {
2299 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2300 o->in2, o->in1);
2301 free_compare(&c);
2302 } else {
2303 TCGv_i32 t32 = tcg_temp_new_i32();
2304 TCGv_i64 t, z;
2305
2306 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2307 free_compare(&c);
2308
2309 t = tcg_temp_new_i64();
2310 tcg_gen_extu_i32_i64(t, t32);
2311 tcg_temp_free_i32(t32);
2312
2313 z = tcg_const_i64(0);
2314 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2315 tcg_temp_free_i64(t);
2316 tcg_temp_free_i64(z);
2317 }
2318
2319 return NO_EXIT;
2320 }
2321
2322 #ifndef CONFIG_USER_ONLY
2323 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2324 {
2325 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2326 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2327 check_privileged(s);
2328 potential_page_fault(s);
2329 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2330 tcg_temp_free_i32(r1);
2331 tcg_temp_free_i32(r3);
2332 return NO_EXIT;
2333 }
2334
2335 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2336 {
2337 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2338 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2339 check_privileged(s);
2340 potential_page_fault(s);
2341 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2342 tcg_temp_free_i32(r1);
2343 tcg_temp_free_i32(r3);
2344 return NO_EXIT;
2345 }
2346 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2347 {
2348 check_privileged(s);
2349 potential_page_fault(s);
2350 gen_helper_lra(o->out, cpu_env, o->in2);
2351 set_cc_static(s);
2352 return NO_EXIT;
2353 }
2354
2355 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2356 {
2357 TCGv_i64 t1, t2;
2358
2359 check_privileged(s);
2360
2361 t1 = tcg_temp_new_i64();
2362 t2 = tcg_temp_new_i64();
2363 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2364 tcg_gen_addi_i64(o->in2, o->in2, 4);
2365 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2366 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2367 tcg_gen_shli_i64(t1, t1, 32);
2368 gen_helper_load_psw(cpu_env, t1, t2);
2369 tcg_temp_free_i64(t1);
2370 tcg_temp_free_i64(t2);
2371 return EXIT_NORETURN;
2372 }
2373
2374 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2375 {
2376 TCGv_i64 t1, t2;
2377
2378 check_privileged(s);
2379
2380 t1 = tcg_temp_new_i64();
2381 t2 = tcg_temp_new_i64();
2382 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2383 tcg_gen_addi_i64(o->in2, o->in2, 8);
2384 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2385 gen_helper_load_psw(cpu_env, t1, t2);
2386 tcg_temp_free_i64(t1);
2387 tcg_temp_free_i64(t2);
2388 return EXIT_NORETURN;
2389 }
2390 #endif
2391
2392 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2393 {
2394 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2395 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2396 potential_page_fault(s);
2397 gen_helper_lam(cpu_env, r1, o->in2, r3);
2398 tcg_temp_free_i32(r1);
2399 tcg_temp_free_i32(r3);
2400 return NO_EXIT;
2401 }
2402
2403 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2404 {
2405 int r1 = get_field(s->fields, r1);
2406 int r3 = get_field(s->fields, r3);
2407 TCGv_i64 t = tcg_temp_new_i64();
2408 TCGv_i64 t4 = tcg_const_i64(4);
2409
2410 while (1) {
2411 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2412 store_reg32_i64(r1, t);
2413 if (r1 == r3) {
2414 break;
2415 }
2416 tcg_gen_add_i64(o->in2, o->in2, t4);
2417 r1 = (r1 + 1) & 15;
2418 }
2419
2420 tcg_temp_free_i64(t);
2421 tcg_temp_free_i64(t4);
2422 return NO_EXIT;
2423 }
2424
2425 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2426 {
2427 int r1 = get_field(s->fields, r1);
2428 int r3 = get_field(s->fields, r3);
2429 TCGv_i64 t = tcg_temp_new_i64();
2430 TCGv_i64 t4 = tcg_const_i64(4);
2431
2432 while (1) {
2433 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2434 store_reg32h_i64(r1, t);
2435 if (r1 == r3) {
2436 break;
2437 }
2438 tcg_gen_add_i64(o->in2, o->in2, t4);
2439 r1 = (r1 + 1) & 15;
2440 }
2441
2442 tcg_temp_free_i64(t);
2443 tcg_temp_free_i64(t4);
2444 return NO_EXIT;
2445 }
2446
2447 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2448 {
2449 int r1 = get_field(s->fields, r1);
2450 int r3 = get_field(s->fields, r3);
2451 TCGv_i64 t8 = tcg_const_i64(8);
2452
2453 while (1) {
2454 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2455 if (r1 == r3) {
2456 break;
2457 }
2458 tcg_gen_add_i64(o->in2, o->in2, t8);
2459 r1 = (r1 + 1) & 15;
2460 }
2461
2462 tcg_temp_free_i64(t8);
2463 return NO_EXIT;
2464 }
2465
2466 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2467 {
2468 o->out = o->in2;
2469 o->g_out = o->g_in2;
2470 TCGV_UNUSED_I64(o->in2);
2471 o->g_in2 = false;
2472 return NO_EXIT;
2473 }
2474
2475 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2476 {
2477 o->out = o->in1;
2478 o->out2 = o->in2;
2479 o->g_out = o->g_in1;
2480 o->g_out2 = o->g_in2;
2481 TCGV_UNUSED_I64(o->in1);
2482 TCGV_UNUSED_I64(o->in2);
2483 o->g_in1 = o->g_in2 = false;
2484 return NO_EXIT;
2485 }
2486
2487 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2488 {
2489 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2490 potential_page_fault(s);
2491 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2492 tcg_temp_free_i32(l);
2493 return NO_EXIT;
2494 }
2495
2496 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2497 {
2498 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2499 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2500 potential_page_fault(s);
2501 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2502 tcg_temp_free_i32(r1);
2503 tcg_temp_free_i32(r2);
2504 set_cc_static(s);
2505 return NO_EXIT;
2506 }
2507
2508 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2509 {
2510 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2511 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2512 potential_page_fault(s);
2513 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2514 tcg_temp_free_i32(r1);
2515 tcg_temp_free_i32(r3);
2516 set_cc_static(s);
2517 return NO_EXIT;
2518 }
2519
2520 #ifndef CONFIG_USER_ONLY
2521 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2522 {
2523 int r1 = get_field(s->fields, l1);
2524 check_privileged(s);
2525 potential_page_fault(s);
2526 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2527 set_cc_static(s);
2528 return NO_EXIT;
2529 }
2530
2531 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2532 {
2533 int r1 = get_field(s->fields, l1);
2534 check_privileged(s);
2535 potential_page_fault(s);
2536 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2537 set_cc_static(s);
2538 return NO_EXIT;
2539 }
2540 #endif
2541
2542 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2543 {
2544 potential_page_fault(s);
2545 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2546 set_cc_static(s);
2547 return NO_EXIT;
2548 }
2549
2550 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2551 {
2552 potential_page_fault(s);
2553 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2554 set_cc_static(s);
2555 return_low128(o->in2);
2556 return NO_EXIT;
2557 }
2558
2559 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2560 {
2561 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2562 return NO_EXIT;
2563 }
2564
2565 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2566 {
2567 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2568 return NO_EXIT;
2569 }
2570
2571 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2572 {
2573 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2574 return NO_EXIT;
2575 }
2576
2577 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2578 {
2579 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2580 return NO_EXIT;
2581 }
2582
2583 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2584 {
2585 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2586 return NO_EXIT;
2587 }
2588
2589 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2590 {
2591 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2592 return_low128(o->out2);
2593 return NO_EXIT;
2594 }
2595
2596 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2597 {
2598 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2599 return_low128(o->out2);
2600 return NO_EXIT;
2601 }
2602
2603 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2604 {
2605 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2606 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2607 tcg_temp_free_i64(r3);
2608 return NO_EXIT;
2609 }
2610
2611 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2612 {
2613 int r3 = get_field(s->fields, r3);
2614 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2615 return NO_EXIT;
2616 }
2617
2618 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2619 {
2620 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2621 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2622 tcg_temp_free_i64(r3);
2623 return NO_EXIT;
2624 }
2625
2626 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2627 {
2628 int r3 = get_field(s->fields, r3);
2629 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2630 return NO_EXIT;
2631 }
2632
2633 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2634 {
2635 gen_helper_nabs_i64(o->out, o->in2);
2636 return NO_EXIT;
2637 }
2638
2639 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2640 {
2641 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2642 return NO_EXIT;
2643 }
2644
2645 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2646 {
2647 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2648 return NO_EXIT;
2649 }
2650
2651 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2652 {
2653 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2654 tcg_gen_mov_i64(o->out2, o->in2);
2655 return NO_EXIT;
2656 }
2657
2658 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2659 {
2660 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2661 potential_page_fault(s);
2662 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2663 tcg_temp_free_i32(l);
2664 set_cc_static(s);
2665 return NO_EXIT;
2666 }
2667
2668 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2669 {
2670 tcg_gen_neg_i64(o->out, o->in2);
2671 return NO_EXIT;
2672 }
2673
2674 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2675 {
2676 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2677 return NO_EXIT;
2678 }
2679
2680 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2681 {
2682 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2683 return NO_EXIT;
2684 }
2685
2686 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2687 {
2688 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2689 tcg_gen_mov_i64(o->out2, o->in2);
2690 return NO_EXIT;
2691 }
2692
2693 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2694 {
2695 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2696 potential_page_fault(s);
2697 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2698 tcg_temp_free_i32(l);
2699 set_cc_static(s);
2700 return NO_EXIT;
2701 }
2702
2703 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2704 {
2705 tcg_gen_or_i64(o->out, o->in1, o->in2);
2706 return NO_EXIT;
2707 }
2708
2709 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2710 {
2711 int shift = s->insn->data & 0xff;
2712 int size = s->insn->data >> 8;
2713 uint64_t mask = ((1ull << size) - 1) << shift;
2714
2715 assert(!o->g_in2);
2716 tcg_gen_shli_i64(o->in2, o->in2, shift);
2717 tcg_gen_or_i64(o->out, o->in1, o->in2);
2718
2719 /* Produce the CC from only the bits manipulated. */
2720 tcg_gen_andi_i64(cc_dst, o->out, mask);
2721 set_cc_nz_u64(s, cc_dst);
2722 return NO_EXIT;
2723 }
2724
2725 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2726 {
2727 gen_helper_popcnt(o->out, o->in2);
2728 return NO_EXIT;
2729 }
2730
2731 #ifndef CONFIG_USER_ONLY
2732 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2733 {
2734 check_privileged(s);
2735 gen_helper_ptlb(cpu_env);
2736 return NO_EXIT;
2737 }
2738 #endif
2739
2740 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2741 {
2742 int i3 = get_field(s->fields, i3);
2743 int i4 = get_field(s->fields, i4);
2744 int i5 = get_field(s->fields, i5);
2745 int do_zero = i4 & 0x80;
2746 uint64_t mask, imask, pmask;
2747 int pos, len, rot;
2748
2749 /* Adjust the arguments for the specific insn. */
2750 switch (s->fields->op2) {
2751 case 0x55: /* risbg */
2752 i3 &= 63;
2753 i4 &= 63;
2754 pmask = ~0;
2755 break;
2756 case 0x5d: /* risbhg */
2757 i3 &= 31;
2758 i4 &= 31;
2759 pmask = 0xffffffff00000000ull;
2760 break;
2761 case 0x51: /* risblg */
2762 i3 &= 31;
2763 i4 &= 31;
2764 pmask = 0x00000000ffffffffull;
2765 break;
2766 default:
2767 abort();
2768 }
2769
2770 /* MASK is the set of bits to be inserted from R2.
2771 Take care for I3/I4 wraparound. */
2772 mask = pmask >> i3;
2773 if (i3 <= i4) {
2774 mask ^= pmask >> i4 >> 1;
2775 } else {
2776 mask |= ~(pmask >> i4 >> 1);
2777 }
2778 mask &= pmask;
2779
2780 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2781 insns, we need to keep the other half of the register. */
2782 imask = ~mask | ~pmask;
2783 if (do_zero) {
2784 if (s->fields->op2 == 0x55) {
2785 imask = 0;
2786 } else {
2787 imask = ~pmask;
2788 }
2789 }
2790
2791 /* In some cases we can implement this with deposit, which can be more
2792 efficient on some hosts. */
2793 if (~mask == imask && i3 <= i4) {
2794 if (s->fields->op2 == 0x5d) {
2795 i3 += 32, i4 += 32;
2796 }
2797 /* Note that we rotate the bits to be inserted to the lsb, not to
2798 the position as described in the PoO. */
2799 len = i4 - i3 + 1;
2800 pos = 63 - i4;
2801 rot = (i5 - pos) & 63;
2802 } else {
2803 pos = len = -1;
2804 rot = i5 & 63;
2805 }
2806
2807 /* Rotate the input as necessary. */
2808 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2809
2810 /* Insert the selected bits into the output. */
2811 if (pos >= 0) {
2812 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2813 } else if (imask == 0) {
2814 tcg_gen_andi_i64(o->out, o->in2, mask);
2815 } else {
2816 tcg_gen_andi_i64(o->in2, o->in2, mask);
2817 tcg_gen_andi_i64(o->out, o->out, imask);
2818 tcg_gen_or_i64(o->out, o->out, o->in2);
2819 }
2820 return NO_EXIT;
2821 }
2822
2823 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2824 {
2825 int i3 = get_field(s->fields, i3);
2826 int i4 = get_field(s->fields, i4);
2827 int i5 = get_field(s->fields, i5);
2828 uint64_t mask;
2829
2830 /* If this is a test-only form, arrange to discard the result. */
2831 if (i3 & 0x80) {
2832 o->out = tcg_temp_new_i64();
2833 o->g_out = false;
2834 }
2835
2836 i3 &= 63;
2837 i4 &= 63;
2838 i5 &= 63;
2839
2840 /* MASK is the set of bits to be operated on from R2.
2841 Take care for I3/I4 wraparound. */
2842 mask = ~0ull >> i3;
2843 if (i3 <= i4) {
2844 mask ^= ~0ull >> i4 >> 1;
2845 } else {
2846 mask |= ~(~0ull >> i4 >> 1);
2847 }
2848
2849 /* Rotate the input as necessary. */
2850 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2851
2852 /* Operate. */
2853 switch (s->fields->op2) {
2854 case 0x55: /* AND */
2855 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2856 tcg_gen_and_i64(o->out, o->out, o->in2);
2857 break;
2858 case 0x56: /* OR */
2859 tcg_gen_andi_i64(o->in2, o->in2, mask);
2860 tcg_gen_or_i64(o->out, o->out, o->in2);
2861 break;
2862 case 0x57: /* XOR */
2863 tcg_gen_andi_i64(o->in2, o->in2, mask);
2864 tcg_gen_xor_i64(o->out, o->out, o->in2);
2865 break;
2866 default:
2867 abort();
2868 }
2869
2870 /* Set the CC. */
2871 tcg_gen_andi_i64(cc_dst, o->out, mask);
2872 set_cc_nz_u64(s, cc_dst);
2873 return NO_EXIT;
2874 }
2875
2876 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2877 {
2878 tcg_gen_bswap16_i64(o->out, o->in2);
2879 return NO_EXIT;
2880 }
2881
2882 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2883 {
2884 tcg_gen_bswap32_i64(o->out, o->in2);
2885 return NO_EXIT;
2886 }
2887
2888 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2889 {
2890 tcg_gen_bswap64_i64(o->out, o->in2);
2891 return NO_EXIT;
2892 }
2893
2894 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2895 {
2896 TCGv_i32 t1 = tcg_temp_new_i32();
2897 TCGv_i32 t2 = tcg_temp_new_i32();
2898 TCGv_i32 to = tcg_temp_new_i32();
2899 tcg_gen_trunc_i64_i32(t1, o->in1);
2900 tcg_gen_trunc_i64_i32(t2, o->in2);
2901 tcg_gen_rotl_i32(to, t1, t2);
2902 tcg_gen_extu_i32_i64(o->out, to);
2903 tcg_temp_free_i32(t1);
2904 tcg_temp_free_i32(t2);
2905 tcg_temp_free_i32(to);
2906 return NO_EXIT;
2907 }
2908
2909 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2910 {
2911 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2912 return NO_EXIT;
2913 }
2914
2915 #ifndef CONFIG_USER_ONLY
2916 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2917 {
2918 check_privileged(s);
2919 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2920 set_cc_static(s);
2921 return NO_EXIT;
2922 }
2923
2924 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2925 {
2926 check_privileged(s);
2927 gen_helper_sacf(cpu_env, o->in2);
2928 /* Addressing mode has changed, so end the block. */
2929 return EXIT_PC_STALE;
2930 }
2931 #endif
2932
2933 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2934 {
2935 int r1 = get_field(s->fields, r1);
2936 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2937 return NO_EXIT;
2938 }
2939
2940 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2941 {
2942 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2943 return NO_EXIT;
2944 }
2945
2946 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2947 {
2948 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2949 return NO_EXIT;
2950 }
2951
2952 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2953 {
2954 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2955 return_low128(o->out2);
2956 return NO_EXIT;
2957 }
2958
2959 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2960 {
2961 gen_helper_sqeb(o->out, cpu_env, o->in2);
2962 return NO_EXIT;
2963 }
2964
2965 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2966 {
2967 gen_helper_sqdb(o->out, cpu_env, o->in2);
2968 return NO_EXIT;
2969 }
2970
2971 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2972 {
2973 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2974 return_low128(o->out2);
2975 return NO_EXIT;
2976 }
2977
2978 #ifndef CONFIG_USER_ONLY
2979 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2980 {
2981 check_privileged(s);
2982 potential_page_fault(s);
2983 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2984 set_cc_static(s);
2985 return NO_EXIT;
2986 }
2987
2988 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2989 {
2990 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2991 check_privileged(s);
2992 potential_page_fault(s);
2993 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2994 tcg_temp_free_i32(r1);
2995 return NO_EXIT;
2996 }
2997 #endif
2998
2999 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3000 {
3001 DisasCompare c;
3002 TCGv_i64 a;
3003 int lab, r1;
3004
3005 disas_jcc(s, &c, get_field(s->fields, m3));
3006
3007 lab = gen_new_label();
3008 if (c.is_64) {
3009 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3010 } else {
3011 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3012 }
3013 free_compare(&c);
3014
3015 r1 = get_field(s->fields, r1);
3016 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3017 if (s->insn->data) {
3018 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3019 } else {
3020 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3021 }
3022 tcg_temp_free_i64(a);
3023
3024 gen_set_label(lab);
3025 return NO_EXIT;
3026 }
3027
3028 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3029 {
3030 uint64_t sign = 1ull << s->insn->data;
3031 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3032 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3033 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3034 /* The arithmetic left shift is curious in that it does not affect
3035 the sign bit. Copy that over from the source unchanged. */
3036 tcg_gen_andi_i64(o->out, o->out, ~sign);
3037 tcg_gen_andi_i64(o->in1, o->in1, sign);
3038 tcg_gen_or_i64(o->out, o->out, o->in1);
3039 return NO_EXIT;
3040 }
3041
3042 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3043 {
3044 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3045 return NO_EXIT;
3046 }
3047
3048 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3049 {
3050 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3051 return NO_EXIT;
3052 }
3053
3054 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3055 {
3056 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3057 return NO_EXIT;
3058 }
3059
3060 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3061 {
3062 gen_helper_sfpc(cpu_env, o->in2);
3063 return NO_EXIT;
3064 }
3065
3066 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3067 {
3068 gen_helper_sfas(cpu_env, o->in2);
3069 return NO_EXIT;
3070 }
3071
3072 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3073 {
3074 int b2 = get_field(s->fields, b2);
3075 int d2 = get_field(s->fields, d2);
3076 TCGv_i64 t1 = tcg_temp_new_i64();
3077 TCGv_i64 t2 = tcg_temp_new_i64();
3078 int mask, pos, len;
3079
3080 switch (s->fields->op2) {
3081 case 0x99: /* SRNM */
3082 pos = 0, len = 2;
3083 break;
3084 case 0xb8: /* SRNMB */
3085 pos = 0, len = 3;
3086 break;
3087 case 0xb9: /* SRNMT */
3088 pos = 4, len = 3;
3089 break;
3090 default:
3091 tcg_abort();
3092 }
3093 mask = (1 << len) - 1;
3094
3095 /* Insert the value into the appropriate field of the FPC. */
3096 if (b2 == 0) {
3097 tcg_gen_movi_i64(t1, d2 & mask);
3098 } else {
3099 tcg_gen_addi_i64(t1, regs[b2], d2);
3100 tcg_gen_andi_i64(t1, t1, mask);
3101 }
3102 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3103 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3104 tcg_temp_free_i64(t1);
3105
3106 /* Then install the new FPC to set the rounding mode in fpu_status. */
3107 gen_helper_sfpc(cpu_env, t2);
3108 tcg_temp_free_i64(t2);
3109 return NO_EXIT;
3110 }
3111
3112 #ifndef CONFIG_USER_ONLY
3113 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3114 {
3115 check_privileged(s);
3116 tcg_gen_shri_i64(o->in2, o->in2, 4);
3117 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3118 return NO_EXIT;
3119 }
3120
3121 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3122 {
3123 check_privileged(s);
3124 gen_helper_sske(cpu_env, o->in1, o->in2);
3125 return NO_EXIT;
3126 }
3127
3128 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3129 {
3130 check_privileged(s);
3131 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3132 return NO_EXIT;
3133 }
3134
3135 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3136 {
3137 check_privileged(s);
3138 /* ??? Surely cpu address != cpu number. In any case the previous
3139 version of this stored more than the required half-word, so it
3140 is unlikely this has ever been tested. */
3141 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3142 return NO_EXIT;
3143 }
3144
3145 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3146 {
3147 gen_helper_stck(o->out, cpu_env);
3148 /* ??? We don't implement clock states. */
3149 gen_op_movi_cc(s, 0);
3150 return NO_EXIT;
3151 }
3152
3153 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3154 {
3155 TCGv_i64 c1 = tcg_temp_new_i64();
3156 TCGv_i64 c2 = tcg_temp_new_i64();
3157 gen_helper_stck(c1, cpu_env);
3158 /* Shift the 64-bit value into its place as a zero-extended
3159 104-bit value. Note that "bit positions 64-103 are always
3160 non-zero so that they compare differently to STCK"; we set
3161 the least significant bit to 1. */
3162 tcg_gen_shli_i64(c2, c1, 56);
3163 tcg_gen_shri_i64(c1, c1, 8);
3164 tcg_gen_ori_i64(c2, c2, 0x10000);
3165 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3166 tcg_gen_addi_i64(o->in2, o->in2, 8);
3167 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3168 tcg_temp_free_i64(c1);
3169 tcg_temp_free_i64(c2);
3170 /* ??? We don't implement clock states. */
3171 gen_op_movi_cc(s, 0);
3172 return NO_EXIT;
3173 }
3174
3175 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3176 {
3177 check_privileged(s);
3178 gen_helper_sckc(cpu_env, o->in2);
3179 return NO_EXIT;
3180 }
3181
3182 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3183 {
3184 check_privileged(s);
3185 gen_helper_stckc(o->out, cpu_env);
3186 return NO_EXIT;
3187 }
3188
3189 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3190 {
3191 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3192 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3193 check_privileged(s);
3194 potential_page_fault(s);
3195 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3196 tcg_temp_free_i32(r1);
3197 tcg_temp_free_i32(r3);
3198 return NO_EXIT;
3199 }
3200
3201 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3202 {
3203 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3204 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3205 check_privileged(s);
3206 potential_page_fault(s);
3207 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3208 tcg_temp_free_i32(r1);
3209 tcg_temp_free_i32(r3);
3210 return NO_EXIT;
3211 }
3212
3213 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3214 {
3215 check_privileged(s);
3216 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));