ahci: factor out FIS decomposition from handle_cmd
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
37
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
40
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44
45 #include "trace-tcg.h"
46
47
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
52
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t pc, next_pc;
58 enum cc_op cc_op;
59 bool singlestep_enabled;
60 };
61
62 /* Information carried about a condition to be evaluated. */
63 typedef struct {
64 TCGCond cond:8;
65 bool is_64;
66 bool g1;
67 bool g2;
68 union {
69 struct { TCGv_i64 a, b; } s64;
70 struct { TCGv_i32 a, b; } s32;
71 } u;
72 } DisasCompare;
73
74 #define DISAS_EXCP 4
75
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
79 #endif
80
81 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
82 {
83 if (!(s->tb->flags & FLAG_MASK_64)) {
84 if (s->tb->flags & FLAG_MASK_32) {
85 return pc | 0x80000000;
86 }
87 }
88 return pc;
89 }
90
91 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
92 int flags)
93 {
94 S390CPU *cpu = S390_CPU(cs);
95 CPUS390XState *env = &cpu->env;
96 int i;
97
98 if (env->cc_op > 3) {
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
100 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
101 } else {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
103 env->psw.mask, env->psw.addr, env->cc_op);
104 }
105
106 for (i = 0; i < 16; i++) {
107 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
108 if ((i % 4) == 3) {
109 cpu_fprintf(f, "\n");
110 } else {
111 cpu_fprintf(f, " ");
112 }
113 }
114
115 for (i = 0; i < 16; i++) {
116 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
117 if ((i % 4) == 3) {
118 cpu_fprintf(f, "\n");
119 } else {
120 cpu_fprintf(f, " ");
121 }
122 }
123
124 #ifndef CONFIG_USER_ONLY
125 for (i = 0; i < 16; i++) {
126 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
127 if ((i % 4) == 3) {
128 cpu_fprintf(f, "\n");
129 } else {
130 cpu_fprintf(f, " ");
131 }
132 }
133 #endif
134
135 #ifdef DEBUG_INLINE_BRANCHES
136 for (i = 0; i < CC_OP_MAX; i++) {
137 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
138 inline_branch_miss[i], inline_branch_hit[i]);
139 }
140 #endif
141
142 cpu_fprintf(f, "\n");
143 }
144
145 static TCGv_i64 psw_addr;
146 static TCGv_i64 psw_mask;
147
148 static TCGv_i32 cc_op;
149 static TCGv_i64 cc_src;
150 static TCGv_i64 cc_dst;
151 static TCGv_i64 cc_vr;
152
153 static char cpu_reg_names[32][4];
154 static TCGv_i64 regs[16];
155 static TCGv_i64 fregs[16];
156
157 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
158
159 void s390x_translate_init(void)
160 {
161 int i;
162
163 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
164 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
165 offsetof(CPUS390XState, psw.addr),
166 "psw_addr");
167 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
168 offsetof(CPUS390XState, psw.mask),
169 "psw_mask");
170
171 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
172 "cc_op");
173 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
174 "cc_src");
175 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
176 "cc_dst");
177 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
178 "cc_vr");
179
180 for (i = 0; i < 16; i++) {
181 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
182 regs[i] = tcg_global_mem_new(TCG_AREG0,
183 offsetof(CPUS390XState, regs[i]),
184 cpu_reg_names[i]);
185 }
186
187 for (i = 0; i < 16; i++) {
188 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
189 fregs[i] = tcg_global_mem_new(TCG_AREG0,
190 offsetof(CPUS390XState, fregs[i].d),
191 cpu_reg_names[i + 16]);
192 }
193 }
194
195 static TCGv_i64 load_reg(int reg)
196 {
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
199 return r;
200 }
201
202 static TCGv_i64 load_freg32_i64(int reg)
203 {
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
206 return r;
207 }
208
209 static void store_reg(int reg, TCGv_i64 v)
210 {
211 tcg_gen_mov_i64(regs[reg], v);
212 }
213
214 static void store_freg(int reg, TCGv_i64 v)
215 {
216 tcg_gen_mov_i64(fregs[reg], v);
217 }
218
219 static void store_reg32_i64(int reg, TCGv_i64 v)
220 {
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
223 }
224
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
226 {
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
228 }
229
230 static void store_freg32_i64(int reg, TCGv_i64 v)
231 {
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
233 }
234
235 static void return_low128(TCGv_i64 dest)
236 {
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
238 }
239
240 static void update_psw_addr(DisasContext *s)
241 {
242 /* psw.addr */
243 tcg_gen_movi_i64(psw_addr, s->pc);
244 }
245
246 static void update_cc_op(DisasContext *s)
247 {
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
250 }
251 }
252
253 static void potential_page_fault(DisasContext *s)
254 {
255 update_psw_addr(s);
256 update_cc_op(s);
257 }
258
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
260 {
261 return (uint64_t)cpu_lduw_code(env, pc);
262 }
263
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
265 {
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
267 }
268
269 static int get_mem_index(DisasContext *s)
270 {
271 switch (s->tb->flags & FLAG_MASK_ASC) {
272 case PSW_ASC_PRIMARY >> 32:
273 return 0;
274 case PSW_ASC_SECONDARY >> 32:
275 return 1;
276 case PSW_ASC_HOME >> 32:
277 return 2;
278 default:
279 tcg_abort();
280 break;
281 }
282 }
283
284 static void gen_exception(int excp)
285 {
286 TCGv_i32 tmp = tcg_const_i32(excp);
287 gen_helper_exception(cpu_env, tmp);
288 tcg_temp_free_i32(tmp);
289 }
290
291 static void gen_program_exception(DisasContext *s, int code)
292 {
293 TCGv_i32 tmp;
294
295 /* Remember what pgm exeption this was. */
296 tmp = tcg_const_i32(code);
297 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
298 tcg_temp_free_i32(tmp);
299
300 tmp = tcg_const_i32(s->next_pc - s->pc);
301 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
302 tcg_temp_free_i32(tmp);
303
304 /* Advance past instruction. */
305 s->pc = s->next_pc;
306 update_psw_addr(s);
307
308 /* Save off cc. */
309 update_cc_op(s);
310
311 /* Trigger exception. */
312 gen_exception(EXCP_PGM);
313 }
314
315 static inline void gen_illegal_opcode(DisasContext *s)
316 {
317 gen_program_exception(s, PGM_SPECIFICATION);
318 }
319
320 static inline void check_privileged(DisasContext *s)
321 {
322 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
323 gen_program_exception(s, PGM_PRIVILEGED);
324 }
325 }
326
327 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
328 {
329 TCGv_i64 tmp = tcg_temp_new_i64();
330 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
331
332 /* Note that d2 is limited to 20 bits, signed. If we crop negative
333 displacements early we create larger immedate addends. */
334
335 /* Note that addi optimizes the imm==0 case. */
336 if (b2 && x2) {
337 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
338 tcg_gen_addi_i64(tmp, tmp, d2);
339 } else if (b2) {
340 tcg_gen_addi_i64(tmp, regs[b2], d2);
341 } else if (x2) {
342 tcg_gen_addi_i64(tmp, regs[x2], d2);
343 } else {
344 if (need_31) {
345 d2 &= 0x7fffffff;
346 need_31 = false;
347 }
348 tcg_gen_movi_i64(tmp, d2);
349 }
350 if (need_31) {
351 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
352 }
353
354 return tmp;
355 }
356
357 static inline bool live_cc_data(DisasContext *s)
358 {
359 return (s->cc_op != CC_OP_DYNAMIC
360 && s->cc_op != CC_OP_STATIC
361 && s->cc_op > 3);
362 }
363
364 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
365 {
366 if (live_cc_data(s)) {
367 tcg_gen_discard_i64(cc_src);
368 tcg_gen_discard_i64(cc_dst);
369 tcg_gen_discard_i64(cc_vr);
370 }
371 s->cc_op = CC_OP_CONST0 + val;
372 }
373
374 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
375 {
376 if (live_cc_data(s)) {
377 tcg_gen_discard_i64(cc_src);
378 tcg_gen_discard_i64(cc_vr);
379 }
380 tcg_gen_mov_i64(cc_dst, dst);
381 s->cc_op = op;
382 }
383
384 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
385 TCGv_i64 dst)
386 {
387 if (live_cc_data(s)) {
388 tcg_gen_discard_i64(cc_vr);
389 }
390 tcg_gen_mov_i64(cc_src, src);
391 tcg_gen_mov_i64(cc_dst, dst);
392 s->cc_op = op;
393 }
394
395 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
396 TCGv_i64 dst, TCGv_i64 vr)
397 {
398 tcg_gen_mov_i64(cc_src, src);
399 tcg_gen_mov_i64(cc_dst, dst);
400 tcg_gen_mov_i64(cc_vr, vr);
401 s->cc_op = op;
402 }
403
404 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
405 {
406 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
407 }
408
409 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
410 {
411 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
412 }
413
414 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
415 {
416 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
417 }
418
419 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
420 {
421 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
422 }
423
424 /* CC value is in env->cc_op */
425 static void set_cc_static(DisasContext *s)
426 {
427 if (live_cc_data(s)) {
428 tcg_gen_discard_i64(cc_src);
429 tcg_gen_discard_i64(cc_dst);
430 tcg_gen_discard_i64(cc_vr);
431 }
432 s->cc_op = CC_OP_STATIC;
433 }
434
435 /* calculates cc into cc_op */
436 static void gen_op_calc_cc(DisasContext *s)
437 {
438 TCGv_i32 local_cc_op;
439 TCGv_i64 dummy;
440
441 TCGV_UNUSED_I32(local_cc_op);
442 TCGV_UNUSED_I64(dummy);
443 switch (s->cc_op) {
444 default:
445 dummy = tcg_const_i64(0);
446 /* FALLTHRU */
447 case CC_OP_ADD_64:
448 case CC_OP_ADDU_64:
449 case CC_OP_ADDC_64:
450 case CC_OP_SUB_64:
451 case CC_OP_SUBU_64:
452 case CC_OP_SUBB_64:
453 case CC_OP_ADD_32:
454 case CC_OP_ADDU_32:
455 case CC_OP_ADDC_32:
456 case CC_OP_SUB_32:
457 case CC_OP_SUBU_32:
458 case CC_OP_SUBB_32:
459 local_cc_op = tcg_const_i32(s->cc_op);
460 break;
461 case CC_OP_CONST0:
462 case CC_OP_CONST1:
463 case CC_OP_CONST2:
464 case CC_OP_CONST3:
465 case CC_OP_STATIC:
466 case CC_OP_DYNAMIC:
467 break;
468 }
469
470 switch (s->cc_op) {
471 case CC_OP_CONST0:
472 case CC_OP_CONST1:
473 case CC_OP_CONST2:
474 case CC_OP_CONST3:
475 /* s->cc_op is the cc value */
476 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
477 break;
478 case CC_OP_STATIC:
479 /* env->cc_op already is the cc value */
480 break;
481 case CC_OP_NZ:
482 case CC_OP_ABS_64:
483 case CC_OP_NABS_64:
484 case CC_OP_ABS_32:
485 case CC_OP_NABS_32:
486 case CC_OP_LTGT0_32:
487 case CC_OP_LTGT0_64:
488 case CC_OP_COMP_32:
489 case CC_OP_COMP_64:
490 case CC_OP_NZ_F32:
491 case CC_OP_NZ_F64:
492 case CC_OP_FLOGR:
493 /* 1 argument */
494 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
495 break;
496 case CC_OP_ICM:
497 case CC_OP_LTGT_32:
498 case CC_OP_LTGT_64:
499 case CC_OP_LTUGTU_32:
500 case CC_OP_LTUGTU_64:
501 case CC_OP_TM_32:
502 case CC_OP_TM_64:
503 case CC_OP_SLA_32:
504 case CC_OP_SLA_64:
505 case CC_OP_NZ_F128:
506 /* 2 arguments */
507 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
508 break;
509 case CC_OP_ADD_64:
510 case CC_OP_ADDU_64:
511 case CC_OP_ADDC_64:
512 case CC_OP_SUB_64:
513 case CC_OP_SUBU_64:
514 case CC_OP_SUBB_64:
515 case CC_OP_ADD_32:
516 case CC_OP_ADDU_32:
517 case CC_OP_ADDC_32:
518 case CC_OP_SUB_32:
519 case CC_OP_SUBU_32:
520 case CC_OP_SUBB_32:
521 /* 3 arguments */
522 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
523 break;
524 case CC_OP_DYNAMIC:
525 /* unknown operation - assume 3 arguments and cc_op in env */
526 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
527 break;
528 default:
529 tcg_abort();
530 }
531
532 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
533 tcg_temp_free_i32(local_cc_op);
534 }
535 if (!TCGV_IS_UNUSED_I64(dummy)) {
536 tcg_temp_free_i64(dummy);
537 }
538
539 /* We now have cc in cc_op as constant */
540 set_cc_static(s);
541 }
542
543 static int use_goto_tb(DisasContext *s, uint64_t dest)
544 {
545 /* NOTE: we handle the case where the TB spans two pages here */
546 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
547 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
548 && !s->singlestep_enabled
549 && !(s->tb->cflags & CF_LAST_IO));
550 }
551
552 static void account_noninline_branch(DisasContext *s, int cc_op)
553 {
554 #ifdef DEBUG_INLINE_BRANCHES
555 inline_branch_miss[cc_op]++;
556 #endif
557 }
558
559 static void account_inline_branch(DisasContext *s, int cc_op)
560 {
561 #ifdef DEBUG_INLINE_BRANCHES
562 inline_branch_hit[cc_op]++;
563 #endif
564 }
565
566 /* Table of mask values to comparison codes, given a comparison as input.
567 For such, CC=3 should not be possible. */
568 static const TCGCond ltgt_cond[16] = {
569 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
570 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
571 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
572 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
573 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
574 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
575 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
576 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
577 };
578
579 /* Table of mask values to comparison codes, given a logic op as input.
580 For such, only CC=0 and CC=1 should be possible. */
581 static const TCGCond nz_cond[16] = {
582 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
583 TCG_COND_NEVER, TCG_COND_NEVER,
584 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
585 TCG_COND_NE, TCG_COND_NE,
586 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
587 TCG_COND_EQ, TCG_COND_EQ,
588 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
589 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
590 };
591
592 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
593 details required to generate a TCG comparison. */
594 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
595 {
596 TCGCond cond;
597 enum cc_op old_cc_op = s->cc_op;
598
599 if (mask == 15 || mask == 0) {
600 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
601 c->u.s32.a = cc_op;
602 c->u.s32.b = cc_op;
603 c->g1 = c->g2 = true;
604 c->is_64 = false;
605 return;
606 }
607
608 /* Find the TCG condition for the mask + cc op. */
609 switch (old_cc_op) {
610 case CC_OP_LTGT0_32:
611 case CC_OP_LTGT0_64:
612 case CC_OP_LTGT_32:
613 case CC_OP_LTGT_64:
614 cond = ltgt_cond[mask];
615 if (cond == TCG_COND_NEVER) {
616 goto do_dynamic;
617 }
618 account_inline_branch(s, old_cc_op);
619 break;
620
621 case CC_OP_LTUGTU_32:
622 case CC_OP_LTUGTU_64:
623 cond = tcg_unsigned_cond(ltgt_cond[mask]);
624 if (cond == TCG_COND_NEVER) {
625 goto do_dynamic;
626 }
627 account_inline_branch(s, old_cc_op);
628 break;
629
630 case CC_OP_NZ:
631 cond = nz_cond[mask];
632 if (cond == TCG_COND_NEVER) {
633 goto do_dynamic;
634 }
635 account_inline_branch(s, old_cc_op);
636 break;
637
638 case CC_OP_TM_32:
639 case CC_OP_TM_64:
640 switch (mask) {
641 case 8:
642 cond = TCG_COND_EQ;
643 break;
644 case 4 | 2 | 1:
645 cond = TCG_COND_NE;
646 break;
647 default:
648 goto do_dynamic;
649 }
650 account_inline_branch(s, old_cc_op);
651 break;
652
653 case CC_OP_ICM:
654 switch (mask) {
655 case 8:
656 cond = TCG_COND_EQ;
657 break;
658 case 4 | 2 | 1:
659 case 4 | 2:
660 cond = TCG_COND_NE;
661 break;
662 default:
663 goto do_dynamic;
664 }
665 account_inline_branch(s, old_cc_op);
666 break;
667
668 case CC_OP_FLOGR:
669 switch (mask & 0xa) {
670 case 8: /* src == 0 -> no one bit found */
671 cond = TCG_COND_EQ;
672 break;
673 case 2: /* src != 0 -> one bit found */
674 cond = TCG_COND_NE;
675 break;
676 default:
677 goto do_dynamic;
678 }
679 account_inline_branch(s, old_cc_op);
680 break;
681
682 case CC_OP_ADDU_32:
683 case CC_OP_ADDU_64:
684 switch (mask) {
685 case 8 | 2: /* vr == 0 */
686 cond = TCG_COND_EQ;
687 break;
688 case 4 | 1: /* vr != 0 */
689 cond = TCG_COND_NE;
690 break;
691 case 8 | 4: /* no carry -> vr >= src */
692 cond = TCG_COND_GEU;
693 break;
694 case 2 | 1: /* carry -> vr < src */
695 cond = TCG_COND_LTU;
696 break;
697 default:
698 goto do_dynamic;
699 }
700 account_inline_branch(s, old_cc_op);
701 break;
702
703 case CC_OP_SUBU_32:
704 case CC_OP_SUBU_64:
705 /* Note that CC=0 is impossible; treat it as dont-care. */
706 switch (mask & 7) {
707 case 2: /* zero -> op1 == op2 */
708 cond = TCG_COND_EQ;
709 break;
710 case 4 | 1: /* !zero -> op1 != op2 */
711 cond = TCG_COND_NE;
712 break;
713 case 4: /* borrow (!carry) -> op1 < op2 */
714 cond = TCG_COND_LTU;
715 break;
716 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
717 cond = TCG_COND_GEU;
718 break;
719 default:
720 goto do_dynamic;
721 }
722 account_inline_branch(s, old_cc_op);
723 break;
724
725 default:
726 do_dynamic:
727 /* Calculate cc value. */
728 gen_op_calc_cc(s);
729 /* FALLTHRU */
730
731 case CC_OP_STATIC:
732 /* Jump based on CC. We'll load up the real cond below;
733 the assignment here merely avoids a compiler warning. */
734 account_noninline_branch(s, old_cc_op);
735 old_cc_op = CC_OP_STATIC;
736 cond = TCG_COND_NEVER;
737 break;
738 }
739
740 /* Load up the arguments of the comparison. */
741 c->is_64 = true;
742 c->g1 = c->g2 = false;
743 switch (old_cc_op) {
744 case CC_OP_LTGT0_32:
745 c->is_64 = false;
746 c->u.s32.a = tcg_temp_new_i32();
747 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
748 c->u.s32.b = tcg_const_i32(0);
749 break;
750 case CC_OP_LTGT_32:
751 case CC_OP_LTUGTU_32:
752 case CC_OP_SUBU_32:
753 c->is_64 = false;
754 c->u.s32.a = tcg_temp_new_i32();
755 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
756 c->u.s32.b = tcg_temp_new_i32();
757 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
758 break;
759
760 case CC_OP_LTGT0_64:
761 case CC_OP_NZ:
762 case CC_OP_FLOGR:
763 c->u.s64.a = cc_dst;
764 c->u.s64.b = tcg_const_i64(0);
765 c->g1 = true;
766 break;
767 case CC_OP_LTGT_64:
768 case CC_OP_LTUGTU_64:
769 case CC_OP_SUBU_64:
770 c->u.s64.a = cc_src;
771 c->u.s64.b = cc_dst;
772 c->g1 = c->g2 = true;
773 break;
774
775 case CC_OP_TM_32:
776 case CC_OP_TM_64:
777 case CC_OP_ICM:
778 c->u.s64.a = tcg_temp_new_i64();
779 c->u.s64.b = tcg_const_i64(0);
780 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
781 break;
782
783 case CC_OP_ADDU_32:
784 c->is_64 = false;
785 c->u.s32.a = tcg_temp_new_i32();
786 c->u.s32.b = tcg_temp_new_i32();
787 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
788 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
789 tcg_gen_movi_i32(c->u.s32.b, 0);
790 } else {
791 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
792 }
793 break;
794
795 case CC_OP_ADDU_64:
796 c->u.s64.a = cc_vr;
797 c->g1 = true;
798 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
799 c->u.s64.b = tcg_const_i64(0);
800 } else {
801 c->u.s64.b = cc_src;
802 c->g2 = true;
803 }
804 break;
805
806 case CC_OP_STATIC:
807 c->is_64 = false;
808 c->u.s32.a = cc_op;
809 c->g1 = true;
810 switch (mask) {
811 case 0x8 | 0x4 | 0x2: /* cc != 3 */
812 cond = TCG_COND_NE;
813 c->u.s32.b = tcg_const_i32(3);
814 break;
815 case 0x8 | 0x4 | 0x1: /* cc != 2 */
816 cond = TCG_COND_NE;
817 c->u.s32.b = tcg_const_i32(2);
818 break;
819 case 0x8 | 0x2 | 0x1: /* cc != 1 */
820 cond = TCG_COND_NE;
821 c->u.s32.b = tcg_const_i32(1);
822 break;
823 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
824 cond = TCG_COND_EQ;
825 c->g1 = false;
826 c->u.s32.a = tcg_temp_new_i32();
827 c->u.s32.b = tcg_const_i32(0);
828 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
829 break;
830 case 0x8 | 0x4: /* cc < 2 */
831 cond = TCG_COND_LTU;
832 c->u.s32.b = tcg_const_i32(2);
833 break;
834 case 0x8: /* cc == 0 */
835 cond = TCG_COND_EQ;
836 c->u.s32.b = tcg_const_i32(0);
837 break;
838 case 0x4 | 0x2 | 0x1: /* cc != 0 */
839 cond = TCG_COND_NE;
840 c->u.s32.b = tcg_const_i32(0);
841 break;
842 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
843 cond = TCG_COND_NE;
844 c->g1 = false;
845 c->u.s32.a = tcg_temp_new_i32();
846 c->u.s32.b = tcg_const_i32(0);
847 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
848 break;
849 case 0x4: /* cc == 1 */
850 cond = TCG_COND_EQ;
851 c->u.s32.b = tcg_const_i32(1);
852 break;
853 case 0x2 | 0x1: /* cc > 1 */
854 cond = TCG_COND_GTU;
855 c->u.s32.b = tcg_const_i32(1);
856 break;
857 case 0x2: /* cc == 2 */
858 cond = TCG_COND_EQ;
859 c->u.s32.b = tcg_const_i32(2);
860 break;
861 case 0x1: /* cc == 3 */
862 cond = TCG_COND_EQ;
863 c->u.s32.b = tcg_const_i32(3);
864 break;
865 default:
866 /* CC is masked by something else: (8 >> cc) & mask. */
867 cond = TCG_COND_NE;
868 c->g1 = false;
869 c->u.s32.a = tcg_const_i32(8);
870 c->u.s32.b = tcg_const_i32(0);
871 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
872 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
873 break;
874 }
875 break;
876
877 default:
878 abort();
879 }
880 c->cond = cond;
881 }
882
883 static void free_compare(DisasCompare *c)
884 {
885 if (!c->g1) {
886 if (c->is_64) {
887 tcg_temp_free_i64(c->u.s64.a);
888 } else {
889 tcg_temp_free_i32(c->u.s32.a);
890 }
891 }
892 if (!c->g2) {
893 if (c->is_64) {
894 tcg_temp_free_i64(c->u.s64.b);
895 } else {
896 tcg_temp_free_i32(c->u.s32.b);
897 }
898 }
899 }
900
901 /* ====================================================================== */
902 /* Define the insn format enumeration. */
903 #define F0(N) FMT_##N,
904 #define F1(N, X1) F0(N)
905 #define F2(N, X1, X2) F0(N)
906 #define F3(N, X1, X2, X3) F0(N)
907 #define F4(N, X1, X2, X3, X4) F0(N)
908 #define F5(N, X1, X2, X3, X4, X5) F0(N)
909
910 typedef enum {
911 #include "insn-format.def"
912 } DisasFormat;
913
914 #undef F0
915 #undef F1
916 #undef F2
917 #undef F3
918 #undef F4
919 #undef F5
920
921 /* Define a structure to hold the decoded fields. We'll store each inside
922 an array indexed by an enum. In order to conserve memory, we'll arrange
923 for fields that do not exist at the same time to overlap, thus the "C"
924 for compact. For checking purposes there is an "O" for original index
925 as well that will be applied to availability bitmaps. */
926
927 enum DisasFieldIndexO {
928 FLD_O_r1,
929 FLD_O_r2,
930 FLD_O_r3,
931 FLD_O_m1,
932 FLD_O_m3,
933 FLD_O_m4,
934 FLD_O_b1,
935 FLD_O_b2,
936 FLD_O_b4,
937 FLD_O_d1,
938 FLD_O_d2,
939 FLD_O_d4,
940 FLD_O_x2,
941 FLD_O_l1,
942 FLD_O_l2,
943 FLD_O_i1,
944 FLD_O_i2,
945 FLD_O_i3,
946 FLD_O_i4,
947 FLD_O_i5
948 };
949
950 enum DisasFieldIndexC {
951 FLD_C_r1 = 0,
952 FLD_C_m1 = 0,
953 FLD_C_b1 = 0,
954 FLD_C_i1 = 0,
955
956 FLD_C_r2 = 1,
957 FLD_C_b2 = 1,
958 FLD_C_i2 = 1,
959
960 FLD_C_r3 = 2,
961 FLD_C_m3 = 2,
962 FLD_C_i3 = 2,
963
964 FLD_C_m4 = 3,
965 FLD_C_b4 = 3,
966 FLD_C_i4 = 3,
967 FLD_C_l1 = 3,
968
969 FLD_C_i5 = 4,
970 FLD_C_d1 = 4,
971
972 FLD_C_d2 = 5,
973
974 FLD_C_d4 = 6,
975 FLD_C_x2 = 6,
976 FLD_C_l2 = 6,
977
978 NUM_C_FIELD = 7
979 };
980
981 struct DisasFields {
982 unsigned op:8;
983 unsigned op2:8;
984 unsigned presentC:16;
985 unsigned int presentO;
986 int c[NUM_C_FIELD];
987 };
988
989 /* This is the way fields are to be accessed out of DisasFields. */
990 #define have_field(S, F) have_field1((S), FLD_O_##F)
991 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
992
993 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
994 {
995 return (f->presentO >> c) & 1;
996 }
997
998 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
999 enum DisasFieldIndexC c)
1000 {
1001 assert(have_field1(f, o));
1002 return f->c[c];
1003 }
1004
1005 /* Describe the layout of each field in each format. */
1006 typedef struct DisasField {
1007 unsigned int beg:8;
1008 unsigned int size:8;
1009 unsigned int type:2;
1010 unsigned int indexC:6;
1011 enum DisasFieldIndexO indexO:8;
1012 } DisasField;
1013
1014 typedef struct DisasFormatInfo {
1015 DisasField op[NUM_C_FIELD];
1016 } DisasFormatInfo;
1017
1018 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1019 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1020 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1021 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1022 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1023 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1024 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1025 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1027 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1028 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1029 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1030 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1031 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1032
1033 #define F0(N) { { } },
1034 #define F1(N, X1) { { X1 } },
1035 #define F2(N, X1, X2) { { X1, X2 } },
1036 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1037 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1038 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1039
1040 static const DisasFormatInfo format_info[] = {
1041 #include "insn-format.def"
1042 };
1043
1044 #undef F0
1045 #undef F1
1046 #undef F2
1047 #undef F3
1048 #undef F4
1049 #undef F5
1050 #undef R
1051 #undef M
1052 #undef BD
1053 #undef BXD
1054 #undef BDL
1055 #undef BXDL
1056 #undef I
1057 #undef L
1058
1059 /* Generally, we'll extract operands into this structures, operate upon
1060 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1061 of routines below for more details. */
1062 typedef struct {
1063 bool g_out, g_out2, g_in1, g_in2;
1064 TCGv_i64 out, out2, in1, in2;
1065 TCGv_i64 addr1;
1066 } DisasOps;
1067
1068 /* Instructions can place constraints on their operands, raising specification
1069 exceptions if they are violated. To make this easy to automate, each "in1",
1070 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1071 of the following, or 0. To make this easy to document, we'll put the
1072 SPEC_<name> defines next to <name>. */
1073
1074 #define SPEC_r1_even 1
1075 #define SPEC_r2_even 2
1076 #define SPEC_r3_even 4
1077 #define SPEC_r1_f128 8
1078 #define SPEC_r2_f128 16
1079
1080 /* Return values from translate_one, indicating the state of the TB. */
1081 typedef enum {
1082 /* Continue the TB. */
1083 NO_EXIT,
1084 /* We have emitted one or more goto_tb. No fixup required. */
1085 EXIT_GOTO_TB,
1086 /* We are not using a goto_tb (for whatever reason), but have updated
1087 the PC (for whatever reason), so there's no need to do it again on
1088 exiting the TB. */
1089 EXIT_PC_UPDATED,
1090 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1091 updated the PC for the next instruction to be executed. */
1092 EXIT_PC_STALE,
1093 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1094 No following code will be executed. */
1095 EXIT_NORETURN,
1096 } ExitStatus;
1097
1098 typedef enum DisasFacility {
1099 FAC_Z, /* zarch (default) */
1100 FAC_CASS, /* compare and swap and store */
1101 FAC_CASS2, /* compare and swap and store 2*/
1102 FAC_DFP, /* decimal floating point */
1103 FAC_DFPR, /* decimal floating point rounding */
1104 FAC_DO, /* distinct operands */
1105 FAC_EE, /* execute extensions */
1106 FAC_EI, /* extended immediate */
1107 FAC_FPE, /* floating point extension */
1108 FAC_FPSSH, /* floating point support sign handling */
1109 FAC_FPRGR, /* FPR-GR transfer */
1110 FAC_GIE, /* general instructions extension */
1111 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1112 FAC_HW, /* high-word */
1113 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1114 FAC_LOC, /* load/store on condition */
1115 FAC_LD, /* long displacement */
1116 FAC_PC, /* population count */
1117 FAC_SCF, /* store clock fast */
1118 FAC_SFLE, /* store facility list extended */
1119 } DisasFacility;
1120
1121 struct DisasInsn {
1122 unsigned opc:16;
1123 DisasFormat fmt:8;
1124 DisasFacility fac:8;
1125 unsigned spec:8;
1126
1127 const char *name;
1128
1129 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1130 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1131 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1132 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1133 void (*help_cout)(DisasContext *, DisasOps *);
1134 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1135
1136 uint64_t data;
1137 };
1138
1139 /* ====================================================================== */
1140 /* Miscellaneous helpers, used by several operations. */
1141
1142 static void help_l2_shift(DisasContext *s, DisasFields *f,
1143 DisasOps *o, int mask)
1144 {
1145 int b2 = get_field(f, b2);
1146 int d2 = get_field(f, d2);
1147
1148 if (b2 == 0) {
1149 o->in2 = tcg_const_i64(d2 & mask);
1150 } else {
1151 o->in2 = get_address(s, 0, b2, d2);
1152 tcg_gen_andi_i64(o->in2, o->in2, mask);
1153 }
1154 }
1155
1156 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1157 {
1158 if (dest == s->next_pc) {
1159 return NO_EXIT;
1160 }
1161 if (use_goto_tb(s, dest)) {
1162 update_cc_op(s);
1163 tcg_gen_goto_tb(0);
1164 tcg_gen_movi_i64(psw_addr, dest);
1165 tcg_gen_exit_tb((uintptr_t)s->tb);
1166 return EXIT_GOTO_TB;
1167 } else {
1168 tcg_gen_movi_i64(psw_addr, dest);
1169 return EXIT_PC_UPDATED;
1170 }
1171 }
1172
1173 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1174 bool is_imm, int imm, TCGv_i64 cdest)
1175 {
1176 ExitStatus ret;
1177 uint64_t dest = s->pc + 2 * imm;
1178 int lab;
1179
1180 /* Take care of the special cases first. */
1181 if (c->cond == TCG_COND_NEVER) {
1182 ret = NO_EXIT;
1183 goto egress;
1184 }
1185 if (is_imm) {
1186 if (dest == s->next_pc) {
1187 /* Branch to next. */
1188 ret = NO_EXIT;
1189 goto egress;
1190 }
1191 if (c->cond == TCG_COND_ALWAYS) {
1192 ret = help_goto_direct(s, dest);
1193 goto egress;
1194 }
1195 } else {
1196 if (TCGV_IS_UNUSED_I64(cdest)) {
1197 /* E.g. bcr %r0 -> no branch. */
1198 ret = NO_EXIT;
1199 goto egress;
1200 }
1201 if (c->cond == TCG_COND_ALWAYS) {
1202 tcg_gen_mov_i64(psw_addr, cdest);
1203 ret = EXIT_PC_UPDATED;
1204 goto egress;
1205 }
1206 }
1207
1208 if (use_goto_tb(s, s->next_pc)) {
1209 if (is_imm && use_goto_tb(s, dest)) {
1210 /* Both exits can use goto_tb. */
1211 update_cc_op(s);
1212
1213 lab = gen_new_label();
1214 if (c->is_64) {
1215 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1216 } else {
1217 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1218 }
1219
1220 /* Branch not taken. */
1221 tcg_gen_goto_tb(0);
1222 tcg_gen_movi_i64(psw_addr, s->next_pc);
1223 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1224
1225 /* Branch taken. */
1226 gen_set_label(lab);
1227 tcg_gen_goto_tb(1);
1228 tcg_gen_movi_i64(psw_addr, dest);
1229 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1230
1231 ret = EXIT_GOTO_TB;
1232 } else {
1233 /* Fallthru can use goto_tb, but taken branch cannot. */
1234 /* Store taken branch destination before the brcond. This
1235 avoids having to allocate a new local temp to hold it.
1236 We'll overwrite this in the not taken case anyway. */
1237 if (!is_imm) {
1238 tcg_gen_mov_i64(psw_addr, cdest);
1239 }
1240
1241 lab = gen_new_label();
1242 if (c->is_64) {
1243 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1244 } else {
1245 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1246 }
1247
1248 /* Branch not taken. */
1249 update_cc_op(s);
1250 tcg_gen_goto_tb(0);
1251 tcg_gen_movi_i64(psw_addr, s->next_pc);
1252 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1253
1254 gen_set_label(lab);
1255 if (is_imm) {
1256 tcg_gen_movi_i64(psw_addr, dest);
1257 }
1258 ret = EXIT_PC_UPDATED;
1259 }
1260 } else {
1261 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1262 Most commonly we're single-stepping or some other condition that
1263 disables all use of goto_tb. Just update the PC and exit. */
1264
1265 TCGv_i64 next = tcg_const_i64(s->next_pc);
1266 if (is_imm) {
1267 cdest = tcg_const_i64(dest);
1268 }
1269
1270 if (c->is_64) {
1271 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1272 cdest, next);
1273 } else {
1274 TCGv_i32 t0 = tcg_temp_new_i32();
1275 TCGv_i64 t1 = tcg_temp_new_i64();
1276 TCGv_i64 z = tcg_const_i64(0);
1277 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1278 tcg_gen_extu_i32_i64(t1, t0);
1279 tcg_temp_free_i32(t0);
1280 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1281 tcg_temp_free_i64(t1);
1282 tcg_temp_free_i64(z);
1283 }
1284
1285 if (is_imm) {
1286 tcg_temp_free_i64(cdest);
1287 }
1288 tcg_temp_free_i64(next);
1289
1290 ret = EXIT_PC_UPDATED;
1291 }
1292
1293 egress:
1294 free_compare(c);
1295 return ret;
1296 }
1297
1298 /* ====================================================================== */
1299 /* The operations. These perform the bulk of the work for any insn,
1300 usually after the operands have been loaded and output initialized. */
1301
1302 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1303 {
1304 gen_helper_abs_i64(o->out, o->in2);
1305 return NO_EXIT;
1306 }
1307
1308 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1309 {
1310 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1311 return NO_EXIT;
1312 }
1313
1314 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1315 {
1316 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1317 return NO_EXIT;
1318 }
1319
1320 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1321 {
1322 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1323 tcg_gen_mov_i64(o->out2, o->in2);
1324 return NO_EXIT;
1325 }
1326
1327 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1328 {
1329 tcg_gen_add_i64(o->out, o->in1, o->in2);
1330 return NO_EXIT;
1331 }
1332
1333 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1334 {
1335 DisasCompare cmp;
1336 TCGv_i64 carry;
1337
1338 tcg_gen_add_i64(o->out, o->in1, o->in2);
1339
1340 /* The carry flag is the msb of CC, therefore the branch mask that would
1341 create that comparison is 3. Feeding the generated comparison to
1342 setcond produces the carry flag that we desire. */
1343 disas_jcc(s, &cmp, 3);
1344 carry = tcg_temp_new_i64();
1345 if (cmp.is_64) {
1346 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1347 } else {
1348 TCGv_i32 t = tcg_temp_new_i32();
1349 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1350 tcg_gen_extu_i32_i64(carry, t);
1351 tcg_temp_free_i32(t);
1352 }
1353 free_compare(&cmp);
1354
1355 tcg_gen_add_i64(o->out, o->out, carry);
1356 tcg_temp_free_i64(carry);
1357 return NO_EXIT;
1358 }
1359
1360 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1361 {
1362 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1363 return NO_EXIT;
1364 }
1365
1366 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1367 {
1368 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1369 return NO_EXIT;
1370 }
1371
1372 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1373 {
1374 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1375 return_low128(o->out2);
1376 return NO_EXIT;
1377 }
1378
1379 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1380 {
1381 tcg_gen_and_i64(o->out, o->in1, o->in2);
1382 return NO_EXIT;
1383 }
1384
1385 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1386 {
1387 int shift = s->insn->data & 0xff;
1388 int size = s->insn->data >> 8;
1389 uint64_t mask = ((1ull << size) - 1) << shift;
1390
1391 assert(!o->g_in2);
1392 tcg_gen_shli_i64(o->in2, o->in2, shift);
1393 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1394 tcg_gen_and_i64(o->out, o->in1, o->in2);
1395
1396 /* Produce the CC from only the bits manipulated. */
1397 tcg_gen_andi_i64(cc_dst, o->out, mask);
1398 set_cc_nz_u64(s, cc_dst);
1399 return NO_EXIT;
1400 }
1401
1402 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1403 {
1404 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1405 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1406 tcg_gen_mov_i64(psw_addr, o->in2);
1407 return EXIT_PC_UPDATED;
1408 } else {
1409 return NO_EXIT;
1410 }
1411 }
1412
1413 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1414 {
1415 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1416 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1417 }
1418
1419 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1420 {
1421 int m1 = get_field(s->fields, m1);
1422 bool is_imm = have_field(s->fields, i2);
1423 int imm = is_imm ? get_field(s->fields, i2) : 0;
1424 DisasCompare c;
1425
1426 disas_jcc(s, &c, m1);
1427 return help_branch(s, &c, is_imm, imm, o->in2);
1428 }
1429
1430 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1431 {
1432 int r1 = get_field(s->fields, r1);
1433 bool is_imm = have_field(s->fields, i2);
1434 int imm = is_imm ? get_field(s->fields, i2) : 0;
1435 DisasCompare c;
1436 TCGv_i64 t;
1437
1438 c.cond = TCG_COND_NE;
1439 c.is_64 = false;
1440 c.g1 = false;
1441 c.g2 = false;
1442
1443 t = tcg_temp_new_i64();
1444 tcg_gen_subi_i64(t, regs[r1], 1);
1445 store_reg32_i64(r1, t);
1446 c.u.s32.a = tcg_temp_new_i32();
1447 c.u.s32.b = tcg_const_i32(0);
1448 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1449 tcg_temp_free_i64(t);
1450
1451 return help_branch(s, &c, is_imm, imm, o->in2);
1452 }
1453
1454 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1455 {
1456 int r1 = get_field(s->fields, r1);
1457 bool is_imm = have_field(s->fields, i2);
1458 int imm = is_imm ? get_field(s->fields, i2) : 0;
1459 DisasCompare c;
1460
1461 c.cond = TCG_COND_NE;
1462 c.is_64 = true;
1463 c.g1 = true;
1464 c.g2 = false;
1465
1466 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1467 c.u.s64.a = regs[r1];
1468 c.u.s64.b = tcg_const_i64(0);
1469
1470 return help_branch(s, &c, is_imm, imm, o->in2);
1471 }
1472
1473 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1474 {
1475 int r1 = get_field(s->fields, r1);
1476 int r3 = get_field(s->fields, r3);
1477 bool is_imm = have_field(s->fields, i2);
1478 int imm = is_imm ? get_field(s->fields, i2) : 0;
1479 DisasCompare c;
1480 TCGv_i64 t;
1481
1482 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1483 c.is_64 = false;
1484 c.g1 = false;
1485 c.g2 = false;
1486
1487 t = tcg_temp_new_i64();
1488 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1489 c.u.s32.a = tcg_temp_new_i32();
1490 c.u.s32.b = tcg_temp_new_i32();
1491 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1492 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1493 store_reg32_i64(r1, t);
1494 tcg_temp_free_i64(t);
1495
1496 return help_branch(s, &c, is_imm, imm, o->in2);
1497 }
1498
1499 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1500 {
1501 int r1 = get_field(s->fields, r1);
1502 int r3 = get_field(s->fields, r3);
1503 bool is_imm = have_field(s->fields, i2);
1504 int imm = is_imm ? get_field(s->fields, i2) : 0;
1505 DisasCompare c;
1506
1507 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1508 c.is_64 = true;
1509
1510 if (r1 == (r3 | 1)) {
1511 c.u.s64.b = load_reg(r3 | 1);
1512 c.g2 = false;
1513 } else {
1514 c.u.s64.b = regs[r3 | 1];
1515 c.g2 = true;
1516 }
1517
1518 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1519 c.u.s64.a = regs[r1];
1520 c.g1 = true;
1521
1522 return help_branch(s, &c, is_imm, imm, o->in2);
1523 }
1524
1525 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1526 {
1527 int imm, m3 = get_field(s->fields, m3);
1528 bool is_imm;
1529 DisasCompare c;
1530
1531 c.cond = ltgt_cond[m3];
1532 if (s->insn->data) {
1533 c.cond = tcg_unsigned_cond(c.cond);
1534 }
1535 c.is_64 = c.g1 = c.g2 = true;
1536 c.u.s64.a = o->in1;
1537 c.u.s64.b = o->in2;
1538
1539 is_imm = have_field(s->fields, i4);
1540 if (is_imm) {
1541 imm = get_field(s->fields, i4);
1542 } else {
1543 imm = 0;
1544 o->out = get_address(s, 0, get_field(s->fields, b4),
1545 get_field(s->fields, d4));
1546 }
1547
1548 return help_branch(s, &c, is_imm, imm, o->out);
1549 }
1550
1551 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1552 {
1553 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1554 set_cc_static(s);
1555 return NO_EXIT;
1556 }
1557
1558 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1559 {
1560 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1561 set_cc_static(s);
1562 return NO_EXIT;
1563 }
1564
1565 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1566 {
1567 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1568 set_cc_static(s);
1569 return NO_EXIT;
1570 }
1571
1572 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1573 {
1574 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1575 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1576 tcg_temp_free_i32(m3);
1577 gen_set_cc_nz_f32(s, o->in2);
1578 return NO_EXIT;
1579 }
1580
1581 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1582 {
1583 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1584 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1585 tcg_temp_free_i32(m3);
1586 gen_set_cc_nz_f64(s, o->in2);
1587 return NO_EXIT;
1588 }
1589
1590 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1591 {
1592 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1593 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1594 tcg_temp_free_i32(m3);
1595 gen_set_cc_nz_f128(s, o->in1, o->in2);
1596 return NO_EXIT;
1597 }
1598
1599 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1600 {
1601 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1602 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1603 tcg_temp_free_i32(m3);
1604 gen_set_cc_nz_f32(s, o->in2);
1605 return NO_EXIT;
1606 }
1607
1608 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1609 {
1610 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1611 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1612 tcg_temp_free_i32(m3);
1613 gen_set_cc_nz_f64(s, o->in2);
1614 return NO_EXIT;
1615 }
1616
1617 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1618 {
1619 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1620 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1621 tcg_temp_free_i32(m3);
1622 gen_set_cc_nz_f128(s, o->in1, o->in2);
1623 return NO_EXIT;
1624 }
1625
1626 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1627 {
1628 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1629 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1630 tcg_temp_free_i32(m3);
1631 gen_set_cc_nz_f32(s, o->in2);
1632 return NO_EXIT;
1633 }
1634
1635 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1636 {
1637 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1638 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1639 tcg_temp_free_i32(m3);
1640 gen_set_cc_nz_f64(s, o->in2);
1641 return NO_EXIT;
1642 }
1643
1644 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1645 {
1646 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1647 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1648 tcg_temp_free_i32(m3);
1649 gen_set_cc_nz_f128(s, o->in1, o->in2);
1650 return NO_EXIT;
1651 }
1652
1653 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1654 {
1655 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1656 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1657 tcg_temp_free_i32(m3);
1658 gen_set_cc_nz_f32(s, o->in2);
1659 return NO_EXIT;
1660 }
1661
1662 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1663 {
1664 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1665 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1666 tcg_temp_free_i32(m3);
1667 gen_set_cc_nz_f64(s, o->in2);
1668 return NO_EXIT;
1669 }
1670
1671 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1672 {
1673 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1674 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1675 tcg_temp_free_i32(m3);
1676 gen_set_cc_nz_f128(s, o->in1, o->in2);
1677 return NO_EXIT;
1678 }
1679
1680 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1681 {
1682 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1683 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1684 tcg_temp_free_i32(m3);
1685 return NO_EXIT;
1686 }
1687
1688 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1689 {
1690 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1691 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1692 tcg_temp_free_i32(m3);
1693 return NO_EXIT;
1694 }
1695
1696 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1697 {
1698 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1699 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1700 tcg_temp_free_i32(m3);
1701 return_low128(o->out2);
1702 return NO_EXIT;
1703 }
1704
1705 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1706 {
1707 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1708 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1709 tcg_temp_free_i32(m3);
1710 return NO_EXIT;
1711 }
1712
1713 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1714 {
1715 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1716 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1717 tcg_temp_free_i32(m3);
1718 return NO_EXIT;
1719 }
1720
1721 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1722 {
1723 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1724 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1725 tcg_temp_free_i32(m3);
1726 return_low128(o->out2);
1727 return NO_EXIT;
1728 }
1729
1730 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1731 {
1732 int r2 = get_field(s->fields, r2);
1733 TCGv_i64 len = tcg_temp_new_i64();
1734
1735 potential_page_fault(s);
1736 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1737 set_cc_static(s);
1738 return_low128(o->out);
1739
1740 tcg_gen_add_i64(regs[r2], regs[r2], len);
1741 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1742 tcg_temp_free_i64(len);
1743
1744 return NO_EXIT;
1745 }
1746
1747 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1748 {
1749 int l = get_field(s->fields, l1);
1750 TCGv_i32 vl;
1751
1752 switch (l + 1) {
1753 case 1:
1754 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1755 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1756 break;
1757 case 2:
1758 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1759 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1760 break;
1761 case 4:
1762 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1763 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1764 break;
1765 case 8:
1766 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1767 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1768 break;
1769 default:
1770 potential_page_fault(s);
1771 vl = tcg_const_i32(l);
1772 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1773 tcg_temp_free_i32(vl);
1774 set_cc_static(s);
1775 return NO_EXIT;
1776 }
1777 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1778 return NO_EXIT;
1779 }
1780
1781 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1782 {
1783 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1784 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1785 potential_page_fault(s);
1786 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1787 tcg_temp_free_i32(r1);
1788 tcg_temp_free_i32(r3);
1789 set_cc_static(s);
1790 return NO_EXIT;
1791 }
1792
1793 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1794 {
1795 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1796 TCGv_i32 t1 = tcg_temp_new_i32();
1797 tcg_gen_trunc_i64_i32(t1, o->in1);
1798 potential_page_fault(s);
1799 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1800 set_cc_static(s);
1801 tcg_temp_free_i32(t1);
1802 tcg_temp_free_i32(m3);
1803 return NO_EXIT;
1804 }
1805
1806 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1807 {
1808 potential_page_fault(s);
1809 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1810 set_cc_static(s);
1811 return_low128(o->in2);
1812 return NO_EXIT;
1813 }
1814
1815 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1816 {
1817 TCGv_i64 t = tcg_temp_new_i64();
1818 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1819 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1820 tcg_gen_or_i64(o->out, o->out, t);
1821 tcg_temp_free_i64(t);
1822 return NO_EXIT;
1823 }
1824
1825 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1826 {
1827 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1828 int d2 = get_field(s->fields, d2);
1829 int b2 = get_field(s->fields, b2);
1830 int is_64 = s->insn->data;
1831 TCGv_i64 addr, mem, cc, z;
1832
1833 /* Note that in1 = R3 (new value) and
1834 in2 = (zero-extended) R1 (expected value). */
1835
1836 /* Load the memory into the (temporary) output. While the PoO only talks
1837 about moving the memory to R1 on inequality, if we include equality it
1838 means that R1 is equal to the memory in all conditions. */
1839 addr = get_address(s, 0, b2, d2);
1840 if (is_64) {
1841 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1842 } else {
1843 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1844 }
1845
1846 /* Are the memory and expected values (un)equal? Note that this setcond
1847 produces the output CC value, thus the NE sense of the test. */
1848 cc = tcg_temp_new_i64();
1849 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1850
1851 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1852 Recall that we are allowed to unconditionally issue the store (and
1853 thus any possible write trap), so (re-)store the original contents
1854 of MEM in case of inequality. */
1855 z = tcg_const_i64(0);
1856 mem = tcg_temp_new_i64();
1857 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1858 if (is_64) {
1859 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1860 } else {
1861 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1862 }
1863 tcg_temp_free_i64(z);
1864 tcg_temp_free_i64(mem);
1865 tcg_temp_free_i64(addr);
1866
1867 /* Store CC back to cc_op. Wait until after the store so that any
1868 exception gets the old cc_op value. */
1869 tcg_gen_trunc_i64_i32(cc_op, cc);
1870 tcg_temp_free_i64(cc);
1871 set_cc_static(s);
1872 return NO_EXIT;
1873 }
1874
1875 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1876 {
1877 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1878 int r1 = get_field(s->fields, r1);
1879 int r3 = get_field(s->fields, r3);
1880 int d2 = get_field(s->fields, d2);
1881 int b2 = get_field(s->fields, b2);
1882 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1883
1884 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1885
1886 addrh = get_address(s, 0, b2, d2);
1887 addrl = get_address(s, 0, b2, d2 + 8);
1888 outh = tcg_temp_new_i64();
1889 outl = tcg_temp_new_i64();
1890
1891 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1892 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1893
1894 /* Fold the double-word compare with arithmetic. */
1895 cc = tcg_temp_new_i64();
1896 z = tcg_temp_new_i64();
1897 tcg_gen_xor_i64(cc, outh, regs[r1]);
1898 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1899 tcg_gen_or_i64(cc, cc, z);
1900 tcg_gen_movi_i64(z, 0);
1901 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1902
1903 memh = tcg_temp_new_i64();
1904 meml = tcg_temp_new_i64();
1905 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1906 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1907 tcg_temp_free_i64(z);
1908
1909 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1910 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1911 tcg_temp_free_i64(memh);
1912 tcg_temp_free_i64(meml);
1913 tcg_temp_free_i64(addrh);
1914 tcg_temp_free_i64(addrl);
1915
1916 /* Save back state now that we've passed all exceptions. */
1917 tcg_gen_mov_i64(regs[r1], outh);
1918 tcg_gen_mov_i64(regs[r1 + 1], outl);
1919 tcg_gen_trunc_i64_i32(cc_op, cc);
1920 tcg_temp_free_i64(outh);
1921 tcg_temp_free_i64(outl);
1922 tcg_temp_free_i64(cc);
1923 set_cc_static(s);
1924 return NO_EXIT;
1925 }
1926
1927 #ifndef CONFIG_USER_ONLY
1928 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1929 {
1930 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1931 check_privileged(s);
1932 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1933 tcg_temp_free_i32(r1);
1934 set_cc_static(s);
1935 return NO_EXIT;
1936 }
1937 #endif
1938
1939 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1940 {
1941 TCGv_i64 t1 = tcg_temp_new_i64();
1942 TCGv_i32 t2 = tcg_temp_new_i32();
1943 tcg_gen_trunc_i64_i32(t2, o->in1);
1944 gen_helper_cvd(t1, t2);
1945 tcg_temp_free_i32(t2);
1946 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1947 tcg_temp_free_i64(t1);
1948 return NO_EXIT;
1949 }
1950
1951 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1952 {
1953 int m3 = get_field(s->fields, m3);
1954 int lab = gen_new_label();
1955 TCGv_i32 t;
1956 TCGCond c;
1957
1958 c = tcg_invert_cond(ltgt_cond[m3]);
1959 if (s->insn->data) {
1960 c = tcg_unsigned_cond(c);
1961 }
1962 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1963
1964 /* Set DXC to 0xff. */
1965 t = tcg_temp_new_i32();
1966 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1967 tcg_gen_ori_i32(t, t, 0xff00);
1968 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1969 tcg_temp_free_i32(t);
1970
1971 /* Trap. */
1972 gen_program_exception(s, PGM_DATA);
1973
1974 gen_set_label(lab);
1975 return NO_EXIT;
1976 }
1977
1978 #ifndef CONFIG_USER_ONLY
1979 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1980 {
1981 TCGv_i32 tmp;
1982
1983 check_privileged(s);
1984 potential_page_fault(s);
1985
1986 /* We pretend the format is RX_a so that D2 is the field we want. */
1987 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1988 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1989 tcg_temp_free_i32(tmp);
1990 return NO_EXIT;
1991 }
1992 #endif
1993
1994 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1995 {
1996 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1997 return_low128(o->out);
1998 return NO_EXIT;
1999 }
2000
2001 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2002 {
2003 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2004 return_low128(o->out);
2005 return NO_EXIT;
2006 }
2007
2008 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2009 {
2010 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2011 return_low128(o->out);
2012 return NO_EXIT;
2013 }
2014
2015 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2016 {
2017 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2018 return_low128(o->out);
2019 return NO_EXIT;
2020 }
2021
2022 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2023 {
2024 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2025 return NO_EXIT;
2026 }
2027
2028 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2029 {
2030 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2031 return NO_EXIT;
2032 }
2033
2034 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2035 {
2036 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2037 return_low128(o->out2);
2038 return NO_EXIT;
2039 }
2040
2041 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2042 {
2043 int r2 = get_field(s->fields, r2);
2044 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2045 return NO_EXIT;
2046 }
2047
2048 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2049 {
2050 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2051 return NO_EXIT;
2052 }
2053
2054 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2055 {
2056 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2057 tb->flags, (ab)use the tb->cs_base field as the address of
2058 the template in memory, and grab 8 bits of tb->flags/cflags for
2059 the contents of the register. We would then recognize all this
2060 in gen_intermediate_code_internal, generating code for exactly
2061 one instruction. This new TB then gets executed normally.
2062
2063 On the other hand, this seems to be mostly used for modifying
2064 MVC inside of memcpy, which needs a helper call anyway. So
2065 perhaps this doesn't bear thinking about any further. */
2066
2067 TCGv_i64 tmp;
2068
2069 update_psw_addr(s);
2070 update_cc_op(s);
2071
2072 tmp = tcg_const_i64(s->next_pc);
2073 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2074 tcg_temp_free_i64(tmp);
2075
2076 set_cc_static(s);
2077 return NO_EXIT;
2078 }
2079
2080 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2081 {
2082 /* We'll use the original input for cc computation, since we get to
2083 compare that against 0, which ought to be better than comparing
2084 the real output against 64. It also lets cc_dst be a convenient
2085 temporary during our computation. */
2086 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2087
2088 /* R1 = IN ? CLZ(IN) : 64. */
2089 gen_helper_clz(o->out, o->in2);
2090
2091 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2092 value by 64, which is undefined. But since the shift is 64 iff the
2093 input is zero, we still get the correct result after and'ing. */
2094 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2095 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2096 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2097 return NO_EXIT;
2098 }
2099
2100 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2101 {
2102 int m3 = get_field(s->fields, m3);
2103 int pos, len, base = s->insn->data;
2104 TCGv_i64 tmp = tcg_temp_new_i64();
2105 uint64_t ccm;
2106
2107 switch (m3) {
2108 case 0xf:
2109 /* Effectively a 32-bit load. */
2110 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2111 len = 32;
2112 goto one_insert;
2113
2114 case 0xc:
2115 case 0x6:
2116 case 0x3:
2117 /* Effectively a 16-bit load. */
2118 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2119 len = 16;
2120 goto one_insert;
2121
2122 case 0x8:
2123 case 0x4:
2124 case 0x2:
2125 case 0x1:
2126 /* Effectively an 8-bit load. */
2127 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2128 len = 8;
2129 goto one_insert;
2130
2131 one_insert:
2132 pos = base + ctz32(m3) * 8;
2133 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2134 ccm = ((1ull << len) - 1) << pos;
2135 break;
2136
2137 default:
2138 /* This is going to be a sequence of loads and inserts. */
2139 pos = base + 32 - 8;
2140 ccm = 0;
2141 while (m3) {
2142 if (m3 & 0x8) {
2143 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2144 tcg_gen_addi_i64(o->in2, o->in2, 1);
2145 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2146 ccm |= 0xff << pos;
2147 }
2148 m3 = (m3 << 1) & 0xf;
2149 pos -= 8;
2150 }
2151 break;
2152 }
2153
2154 tcg_gen_movi_i64(tmp, ccm);
2155 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2156 tcg_temp_free_i64(tmp);
2157 return NO_EXIT;
2158 }
2159
2160 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2161 {
2162 int shift = s->insn->data & 0xff;
2163 int size = s->insn->data >> 8;
2164 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2165 return NO_EXIT;
2166 }
2167
2168 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2169 {
2170 TCGv_i64 t1;
2171
2172 gen_op_calc_cc(s);
2173 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2174
2175 t1 = tcg_temp_new_i64();
2176 tcg_gen_shli_i64(t1, psw_mask, 20);
2177 tcg_gen_shri_i64(t1, t1, 36);
2178 tcg_gen_or_i64(o->out, o->out, t1);
2179
2180 tcg_gen_extu_i32_i64(t1, cc_op);
2181 tcg_gen_shli_i64(t1, t1, 28);
2182 tcg_gen_or_i64(o->out, o->out, t1);
2183 tcg_temp_free_i64(t1);
2184 return NO_EXIT;
2185 }
2186
2187 #ifndef CONFIG_USER_ONLY
2188 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2189 {
2190 check_privileged(s);
2191 gen_helper_ipte(cpu_env, o->in1, o->in2);
2192 return NO_EXIT;
2193 }
2194
2195 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2196 {
2197 check_privileged(s);
2198 gen_helper_iske(o->out, cpu_env, o->in2);
2199 return NO_EXIT;
2200 }
2201 #endif
2202
2203 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2204 {
2205 gen_helper_ldeb(o->out, cpu_env, o->in2);
2206 return NO_EXIT;
2207 }
2208
2209 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2210 {
2211 gen_helper_ledb(o->out, cpu_env, o->in2);
2212 return NO_EXIT;
2213 }
2214
2215 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2216 {
2217 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2218 return NO_EXIT;
2219 }
2220
2221 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2222 {
2223 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2224 return NO_EXIT;
2225 }
2226
2227 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2228 {
2229 gen_helper_lxdb(o->out, cpu_env, o->in2);
2230 return_low128(o->out2);
2231 return NO_EXIT;
2232 }
2233
2234 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2235 {
2236 gen_helper_lxeb(o->out, cpu_env, o->in2);
2237 return_low128(o->out2);
2238 return NO_EXIT;
2239 }
2240
2241 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2242 {
2243 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2244 return NO_EXIT;
2245 }
2246
2247 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2248 {
2249 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2250 return NO_EXIT;
2251 }
2252
2253 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2254 {
2255 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2256 return NO_EXIT;
2257 }
2258
2259 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2260 {
2261 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2262 return NO_EXIT;
2263 }
2264
2265 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2266 {
2267 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2268 return NO_EXIT;
2269 }
2270
2271 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2272 {
2273 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2274 return NO_EXIT;
2275 }
2276
2277 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2278 {
2279 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2280 return NO_EXIT;
2281 }
2282
2283 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2284 {
2285 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2286 return NO_EXIT;
2287 }
2288
2289 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2290 {
2291 DisasCompare c;
2292
2293 disas_jcc(s, &c, get_field(s->fields, m3));
2294
2295 if (c.is_64) {
2296 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2297 o->in2, o->in1);
2298 free_compare(&c);
2299 } else {
2300 TCGv_i32 t32 = tcg_temp_new_i32();
2301 TCGv_i64 t, z;
2302
2303 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2304 free_compare(&c);
2305
2306 t = tcg_temp_new_i64();
2307 tcg_gen_extu_i32_i64(t, t32);
2308 tcg_temp_free_i32(t32);
2309
2310 z = tcg_const_i64(0);
2311 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2312 tcg_temp_free_i64(t);
2313 tcg_temp_free_i64(z);
2314 }
2315
2316 return NO_EXIT;
2317 }
2318
2319 #ifndef CONFIG_USER_ONLY
2320 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2321 {
2322 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2323 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2324 check_privileged(s);
2325 potential_page_fault(s);
2326 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2327 tcg_temp_free_i32(r1);
2328 tcg_temp_free_i32(r3);
2329 return NO_EXIT;
2330 }
2331
2332 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2333 {
2334 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2335 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2336 check_privileged(s);
2337 potential_page_fault(s);
2338 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2339 tcg_temp_free_i32(r1);
2340 tcg_temp_free_i32(r3);
2341 return NO_EXIT;
2342 }
2343 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2344 {
2345 check_privileged(s);
2346 potential_page_fault(s);
2347 gen_helper_lra(o->out, cpu_env, o->in2);
2348 set_cc_static(s);
2349 return NO_EXIT;
2350 }
2351
2352 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2353 {
2354 TCGv_i64 t1, t2;
2355
2356 check_privileged(s);
2357
2358 t1 = tcg_temp_new_i64();
2359 t2 = tcg_temp_new_i64();
2360 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2361 tcg_gen_addi_i64(o->in2, o->in2, 4);
2362 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2363 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2364 tcg_gen_shli_i64(t1, t1, 32);
2365 gen_helper_load_psw(cpu_env, t1, t2);
2366 tcg_temp_free_i64(t1);
2367 tcg_temp_free_i64(t2);
2368 return EXIT_NORETURN;
2369 }
2370
2371 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2372 {
2373 TCGv_i64 t1, t2;
2374
2375 check_privileged(s);
2376
2377 t1 = tcg_temp_new_i64();
2378 t2 = tcg_temp_new_i64();
2379 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2380 tcg_gen_addi_i64(o->in2, o->in2, 8);
2381 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2382 gen_helper_load_psw(cpu_env, t1, t2);
2383 tcg_temp_free_i64(t1);
2384 tcg_temp_free_i64(t2);
2385 return EXIT_NORETURN;
2386 }
2387 #endif
2388
2389 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2390 {
2391 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2392 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2393 potential_page_fault(s);
2394 gen_helper_lam(cpu_env, r1, o->in2, r3);
2395 tcg_temp_free_i32(r1);
2396 tcg_temp_free_i32(r3);
2397 return NO_EXIT;
2398 }
2399
2400 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2401 {
2402 int r1 = get_field(s->fields, r1);
2403 int r3 = get_field(s->fields, r3);
2404 TCGv_i64 t = tcg_temp_new_i64();
2405 TCGv_i64 t4 = tcg_const_i64(4);
2406
2407 while (1) {
2408 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2409 store_reg32_i64(r1, t);
2410 if (r1 == r3) {
2411 break;
2412 }
2413 tcg_gen_add_i64(o->in2, o->in2, t4);
2414 r1 = (r1 + 1) & 15;
2415 }
2416
2417 tcg_temp_free_i64(t);
2418 tcg_temp_free_i64(t4);
2419 return NO_EXIT;
2420 }
2421
2422 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2423 {
2424 int r1 = get_field(s->fields, r1);
2425 int r3 = get_field(s->fields, r3);
2426 TCGv_i64 t = tcg_temp_new_i64();
2427 TCGv_i64 t4 = tcg_const_i64(4);
2428
2429 while (1) {
2430 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2431 store_reg32h_i64(r1, t);
2432 if (r1 == r3) {
2433 break;
2434 }
2435 tcg_gen_add_i64(o->in2, o->in2, t4);
2436 r1 = (r1 + 1) & 15;
2437 }
2438
2439 tcg_temp_free_i64(t);
2440 tcg_temp_free_i64(t4);
2441 return NO_EXIT;
2442 }
2443
2444 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2445 {
2446 int r1 = get_field(s->fields, r1);
2447 int r3 = get_field(s->fields, r3);
2448 TCGv_i64 t8 = tcg_const_i64(8);
2449
2450 while (1) {
2451 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2452 if (r1 == r3) {
2453 break;
2454 }
2455 tcg_gen_add_i64(o->in2, o->in2, t8);
2456 r1 = (r1 + 1) & 15;
2457 }
2458
2459 tcg_temp_free_i64(t8);
2460 return NO_EXIT;
2461 }
2462
2463 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2464 {
2465 o->out = o->in2;
2466 o->g_out = o->g_in2;
2467 TCGV_UNUSED_I64(o->in2);
2468 o->g_in2 = false;
2469 return NO_EXIT;
2470 }
2471
2472 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2473 {
2474 o->out = o->in1;
2475 o->out2 = o->in2;
2476 o->g_out = o->g_in1;
2477 o->g_out2 = o->g_in2;
2478 TCGV_UNUSED_I64(o->in1);
2479 TCGV_UNUSED_I64(o->in2);
2480 o->g_in1 = o->g_in2 = false;
2481 return NO_EXIT;
2482 }
2483
2484 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2485 {
2486 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2487 potential_page_fault(s);
2488 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2489 tcg_temp_free_i32(l);
2490 return NO_EXIT;
2491 }
2492
2493 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2494 {
2495 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2496 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2497 potential_page_fault(s);
2498 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2499 tcg_temp_free_i32(r1);
2500 tcg_temp_free_i32(r2);
2501 set_cc_static(s);
2502 return NO_EXIT;
2503 }
2504
2505 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2506 {
2507 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2508 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2509 potential_page_fault(s);
2510 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2511 tcg_temp_free_i32(r1);
2512 tcg_temp_free_i32(r3);
2513 set_cc_static(s);
2514 return NO_EXIT;
2515 }
2516
2517 #ifndef CONFIG_USER_ONLY
2518 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2519 {
2520 int r1 = get_field(s->fields, l1);
2521 check_privileged(s);
2522 potential_page_fault(s);
2523 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2524 set_cc_static(s);
2525 return NO_EXIT;
2526 }
2527
2528 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2529 {
2530 int r1 = get_field(s->fields, l1);
2531 check_privileged(s);
2532 potential_page_fault(s);
2533 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2534 set_cc_static(s);
2535 return NO_EXIT;
2536 }
2537 #endif
2538
2539 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2540 {
2541 potential_page_fault(s);
2542 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2543 set_cc_static(s);
2544 return NO_EXIT;
2545 }
2546
2547 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2548 {
2549 potential_page_fault(s);
2550 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2551 set_cc_static(s);
2552 return_low128(o->in2);
2553 return NO_EXIT;
2554 }
2555
2556 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2557 {
2558 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2559 return NO_EXIT;
2560 }
2561
2562 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2563 {
2564 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2565 return NO_EXIT;
2566 }
2567
2568 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2569 {
2570 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2571 return NO_EXIT;
2572 }
2573
2574 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2575 {
2576 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2577 return NO_EXIT;
2578 }
2579
2580 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2581 {
2582 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2583 return NO_EXIT;
2584 }
2585
2586 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2587 {
2588 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2589 return_low128(o->out2);
2590 return NO_EXIT;
2591 }
2592
2593 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2594 {
2595 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2596 return_low128(o->out2);
2597 return NO_EXIT;
2598 }
2599
2600 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2601 {
2602 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2603 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2604 tcg_temp_free_i64(r3);
2605 return NO_EXIT;
2606 }
2607
2608 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2609 {
2610 int r3 = get_field(s->fields, r3);
2611 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2612 return NO_EXIT;
2613 }
2614
2615 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2616 {
2617 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2618 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2619 tcg_temp_free_i64(r3);
2620 return NO_EXIT;
2621 }
2622
2623 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2624 {
2625 int r3 = get_field(s->fields, r3);
2626 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2627 return NO_EXIT;
2628 }
2629
2630 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2631 {
2632 gen_helper_nabs_i64(o->out, o->in2);
2633 return NO_EXIT;
2634 }
2635
2636 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2637 {
2638 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2639 return NO_EXIT;
2640 }
2641
2642 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2643 {
2644 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2645 return NO_EXIT;
2646 }
2647
2648 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2649 {
2650 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2651 tcg_gen_mov_i64(o->out2, o->in2);
2652 return NO_EXIT;
2653 }
2654
2655 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2656 {
2657 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2658 potential_page_fault(s);
2659 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2660 tcg_temp_free_i32(l);
2661 set_cc_static(s);
2662 return NO_EXIT;
2663 }
2664
2665 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2666 {
2667 tcg_gen_neg_i64(o->out, o->in2);
2668 return NO_EXIT;
2669 }
2670
2671 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2672 {
2673 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2674 return NO_EXIT;
2675 }
2676
2677 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2678 {
2679 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2680 return NO_EXIT;
2681 }
2682
2683 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2684 {
2685 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2686 tcg_gen_mov_i64(o->out2, o->in2);
2687 return NO_EXIT;
2688 }
2689
2690 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2691 {
2692 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2693 potential_page_fault(s);
2694 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2695 tcg_temp_free_i32(l);
2696 set_cc_static(s);
2697 return NO_EXIT;
2698 }
2699
2700 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2701 {
2702 tcg_gen_or_i64(o->out, o->in1, o->in2);
2703 return NO_EXIT;
2704 }
2705
2706 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2707 {
2708 int shift = s->insn->data & 0xff;
2709 int size = s->insn->data >> 8;
2710 uint64_t mask = ((1ull << size) - 1) << shift;
2711
2712 assert(!o->g_in2);
2713 tcg_gen_shli_i64(o->in2, o->in2, shift);
2714 tcg_gen_or_i64(o->out, o->in1, o->in2);
2715
2716 /* Produce the CC from only the bits manipulated. */
2717 tcg_gen_andi_i64(cc_dst, o->out, mask);
2718 set_cc_nz_u64(s, cc_dst);
2719 return NO_EXIT;
2720 }
2721
2722 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2723 {
2724 gen_helper_popcnt(o->out, o->in2);
2725 return NO_EXIT;
2726 }
2727
2728 #ifndef CONFIG_USER_ONLY
2729 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2730 {
2731 check_privileged(s);
2732 gen_helper_ptlb(cpu_env);
2733 return NO_EXIT;
2734 }
2735 #endif
2736
2737 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2738 {
2739 int i3 = get_field(s->fields, i3);
2740 int i4 = get_field(s->fields, i4);
2741 int i5 = get_field(s->fields, i5);
2742 int do_zero = i4 & 0x80;
2743 uint64_t mask, imask, pmask;
2744 int pos, len, rot;
2745
2746 /* Adjust the arguments for the specific insn. */
2747 switch (s->fields->op2) {
2748 case 0x55: /* risbg */
2749 i3 &= 63;
2750 i4 &= 63;
2751 pmask = ~0;
2752 break;
2753 case 0x5d: /* risbhg */
2754 i3 &= 31;
2755 i4 &= 31;
2756 pmask = 0xffffffff00000000ull;
2757 break;
2758 case 0x51: /* risblg */
2759 i3 &= 31;
2760 i4 &= 31;
2761 pmask = 0x00000000ffffffffull;
2762 break;
2763 default:
2764 abort();
2765 }
2766
2767 /* MASK is the set of bits to be inserted from R2.
2768 Take care for I3/I4 wraparound. */
2769 mask = pmask >> i3;
2770 if (i3 <= i4) {
2771 mask ^= pmask >> i4 >> 1;
2772 } else {
2773 mask |= ~(pmask >> i4 >> 1);
2774 }
2775 mask &= pmask;
2776
2777 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2778 insns, we need to keep the other half of the register. */
2779 imask = ~mask | ~pmask;
2780 if (do_zero) {
2781 if (s->fields->op2 == 0x55) {
2782 imask = 0;
2783 } else {
2784 imask = ~pmask;
2785 }
2786 }
2787
2788 /* In some cases we can implement this with deposit, which can be more
2789 efficient on some hosts. */
2790 if (~mask == imask && i3 <= i4) {
2791 if (s->fields->op2 == 0x5d) {
2792 i3 += 32, i4 += 32;
2793 }
2794 /* Note that we rotate the bits to be inserted to the lsb, not to
2795 the position as described in the PoO. */
2796 len = i4 - i3 + 1;
2797 pos = 63 - i4;
2798 rot = (i5 - pos) & 63;
2799 } else {
2800 pos = len = -1;
2801 rot = i5 & 63;
2802 }
2803
2804 /* Rotate the input as necessary. */
2805 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2806
2807 /* Insert the selected bits into the output. */
2808 if (pos >= 0) {
2809 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2810 } else if (imask == 0) {
2811 tcg_gen_andi_i64(o->out, o->in2, mask);
2812 } else {
2813 tcg_gen_andi_i64(o->in2, o->in2, mask);
2814 tcg_gen_andi_i64(o->out, o->out, imask);
2815 tcg_gen_or_i64(o->out, o->out, o->in2);
2816 }
2817 return NO_EXIT;
2818 }
2819
2820 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2821 {
2822 int i3 = get_field(s->fields, i3);
2823 int i4 = get_field(s->fields, i4);
2824 int i5 = get_field(s->fields, i5);
2825 uint64_t mask;
2826
2827 /* If this is a test-only form, arrange to discard the result. */
2828 if (i3 & 0x80) {
2829 o->out = tcg_temp_new_i64();
2830 o->g_out = false;
2831 }
2832
2833 i3 &= 63;
2834 i4 &= 63;
2835 i5 &= 63;
2836
2837 /* MASK is the set of bits to be operated on from R2.
2838 Take care for I3/I4 wraparound. */
2839 mask = ~0ull >> i3;
2840 if (i3 <= i4) {
2841 mask ^= ~0ull >> i4 >> 1;
2842 } else {
2843 mask |= ~(~0ull >> i4 >> 1);
2844 }
2845
2846 /* Rotate the input as necessary. */
2847 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2848
2849 /* Operate. */
2850 switch (s->fields->op2) {
2851 case 0x55: /* AND */
2852 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2853 tcg_gen_and_i64(o->out, o->out, o->in2);
2854 break;
2855 case 0x56: /* OR */
2856 tcg_gen_andi_i64(o->in2, o->in2, mask);
2857 tcg_gen_or_i64(o->out, o->out, o->in2);
2858 break;
2859 case 0x57: /* XOR */
2860 tcg_gen_andi_i64(o->in2, o->in2, mask);
2861 tcg_gen_xor_i64(o->out, o->out, o->in2);
2862 break;
2863 default:
2864 abort();
2865 }
2866
2867 /* Set the CC. */
2868 tcg_gen_andi_i64(cc_dst, o->out, mask);
2869 set_cc_nz_u64(s, cc_dst);
2870 return NO_EXIT;
2871 }
2872
2873 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2874 {
2875 tcg_gen_bswap16_i64(o->out, o->in2);
2876 return NO_EXIT;
2877 }
2878
2879 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2880 {
2881 tcg_gen_bswap32_i64(o->out, o->in2);
2882 return NO_EXIT;
2883 }
2884
2885 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2886 {
2887 tcg_gen_bswap64_i64(o->out, o->in2);
2888 return NO_EXIT;
2889 }
2890
2891 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2892 {
2893 TCGv_i32 t1 = tcg_temp_new_i32();
2894 TCGv_i32 t2 = tcg_temp_new_i32();
2895 TCGv_i32 to = tcg_temp_new_i32();
2896 tcg_gen_trunc_i64_i32(t1, o->in1);
2897 tcg_gen_trunc_i64_i32(t2, o->in2);
2898 tcg_gen_rotl_i32(to, t1, t2);
2899 tcg_gen_extu_i32_i64(o->out, to);
2900 tcg_temp_free_i32(t1);
2901 tcg_temp_free_i32(t2);
2902 tcg_temp_free_i32(to);
2903 return NO_EXIT;
2904 }
2905
2906 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2907 {
2908 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2909 return NO_EXIT;
2910 }
2911
2912 #ifndef CONFIG_USER_ONLY
2913 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2914 {
2915 check_privileged(s);
2916 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2917 set_cc_static(s);
2918 return NO_EXIT;
2919 }
2920
2921 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2922 {
2923 check_privileged(s);
2924 gen_helper_sacf(cpu_env, o->in2);
2925 /* Addressing mode has changed, so end the block. */
2926 return EXIT_PC_STALE;
2927 }
2928
2929 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
2930 {
2931 int sam = s->insn->data;
2932 TCGv_i64 tsam = tcg_const_i64(sam);
2933
2934 /* Overwrite PSW_MASK_64 and PSW_MASK_32 */
2935 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
2936
2937 tcg_temp_free_i64(tsam);
2938 return EXIT_PC_STALE;
2939 }
2940 #endif
2941
2942 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2943 {
2944 int r1 = get_field(s->fields, r1);
2945 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2946 return NO_EXIT;
2947 }
2948
2949 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2950 {
2951 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2952 return NO_EXIT;
2953 }
2954
2955 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2956 {
2957 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2958 return NO_EXIT;
2959 }
2960
2961 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2962 {
2963 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2964 return_low128(o->out2);
2965 return NO_EXIT;
2966 }
2967
2968 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2969 {
2970 gen_helper_sqeb(o->out, cpu_env, o->in2);
2971 return NO_EXIT;
2972 }
2973
2974 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2975 {
2976 gen_helper_sqdb(o->out, cpu_env, o->in2);
2977 return NO_EXIT;
2978 }
2979
2980 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2981 {
2982 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2983 return_low128(o->out2);
2984 return NO_EXIT;
2985 }
2986
2987 #ifndef CONFIG_USER_ONLY
2988 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2989 {
2990 check_privileged(s);
2991 potential_page_fault(s);
2992 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2993 set_cc_static(s);
2994 return NO_EXIT;
2995 }
2996
2997 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2998 {
2999 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3000 check_privileged(s);
3001 potential_page_fault(s);
3002 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3003 tcg_temp_free_i32(r1);
3004 return NO_EXIT;
3005 }
3006 #endif
3007
3008 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3009 {
3010 DisasCompare c;
3011 TCGv_i64 a;
3012 int lab, r1;
3013
3014 disas_jcc(s, &c, get_field(s->fields, m3));
3015
3016 lab = gen_new_label();
3017 if (c.is_64) {
3018 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3019 } else {
3020 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3021 }
3022 free_compare(&c);
3023
3024 r1 = get_field(s->fields, r1);
3025 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3026 if (s->insn->data) {
3027 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3028 } else {
3029 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3030 }
3031 tcg_temp_free_i64(a);
3032
3033 gen_set_label(lab);
3034 return NO_EXIT;
3035 }
3036
3037 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3038 {
3039 uint64_t sign = 1ull << s->insn->data;
3040 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3041 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3042 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3043 /* The arithmetic left shift is curious in that it does not affect
3044 the sign bit. Copy that over from the source unchanged. */
3045 tcg_gen_andi_i64(o->out, o->out, ~sign);
3046 tcg_gen_andi_i64(o->in1, o->in1, sign);
3047 tcg_gen_or_i64(o->out, o->out, o->in1);
3048 return NO_EXIT;
3049 }
3050
3051 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3052 {
3053 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3054 return NO_EXIT;
3055 }
3056
3057 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3058 {
3059 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3060 return NO_EXIT;
3061 }
3062
3063 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3064 {
3065 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3066 return NO_EXIT;
3067 }
3068
3069 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3070 {
3071 gen_helper_sfpc(cpu_env, o->in2);
3072 return NO_EXIT;
3073 }
3074
3075 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3076 {
3077 gen_helper_sfas(cpu_env, o->in2);
3078 return NO_EXIT;
3079 }
3080
3081 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3082 {
3083 int b2 = get_field(s->fields, b2);
3084 int d2 = get_field(s->fields, d2);
3085 TCGv_i64 t1 = tcg_temp_new_i64();
3086 TCGv_i64 t2 = tcg_temp_new_i64();
3087 int mask, pos, len;
3088
3089 switch (s->fields->op2) {
3090 case 0x99: /* SRNM */
3091 pos = 0, len = 2;
3092 break;
3093 case 0xb8: /* SRNMB */
3094 pos = 0, len = 3;
3095 break;
3096 case 0xb9: /* SRNMT */
3097 pos = 4, len = 3;
3098 break;
3099 default:
3100 tcg_abort();
3101 }
3102 mask = (1 << len) - 1;
3103
3104 /* Insert the value into the appropriate field of the FPC. */
3105 if (b2 == 0) {
3106 tcg_gen_movi_i64(t1, d2 & mask);
3107 } else {
3108 tcg_gen_addi_i64(t1, regs[b2], d2);
3109 tcg_gen_andi_i64(t1, t1, mask);
3110 }
3111 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3112 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3113 tcg_temp_free_i64(t1);
3114
3115 /* Then install the new FPC to set the rounding mode in fpu_status. */
3116 gen_helper_sfpc(cpu_env, t2);
3117 tcg_temp_free_i64(t2);
3118 return NO_EXIT;
3119 }
3120
3121 #ifndef CONFIG_USER_ONLY
3122 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3123 {
3124 check_privileged(s);
3125 tcg_gen_shri_i64(o->in2, o->in2, 4);
3126 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3127 return NO_EXIT;
3128 }
3129
3130 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3131 {
3132 check_privileged(s);
3133 gen_helper_sske(cpu_env, o->in1, o->in2);
3134 return NO_EXIT;
3135 }
3136
3137 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3138 {
3139 check_privileged(s);
3140 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3141 return NO_EXIT;
3142 }
3143
3144 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3145 {
3146 check_privileged(s);
3147 /* ??? Surely cpu address != cpu number. In any case the previous
3148 version of this stored more than the required half-word, so it
3149 is unlikely this has ever been tested. */
3150 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3151 return NO_EXIT;
3152 }
3153
3154 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3155 {
3156 gen_helper_stck(o->out, cpu_env);
3157 /* ??? We don't implement clock states. */
3158 gen_op_movi_cc(s, 0);
3159 return NO_EXIT;
3160 }
3161
3162 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3163 {
3164 TCGv_i64 c1 = tcg_temp_new_i64();
3165 TCGv_i64 c2 = tcg_temp_new_i64();
3166 gen_helper_stck(c1, cpu_env);
3167 /* Shift the 64-bit value into its place as a zero-extended
3168 104-bit value. Note that "bit positions 64-103 are always
3169 non-zero so that they compare differently to STCK"; we set
3170 the least significant bit to 1. */
3171 tcg_gen_shli_i64(c2, c1, 56);
3172 tcg_gen_shri_i64(c1, c1, 8);
3173 tcg_gen_ori_i64(c2, c2, 0x10000);
3174 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3175 tcg_gen_addi_i64(o->in2, o->in2, 8);
3176 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3177 tcg_temp_free_i64(c1);
3178 tcg_temp_free_i64(c2);
3179 /* ??? We don't implement clock states. */
3180 gen_op_movi_cc(s, 0);
3181 return NO_EXIT;
3182 }
3183
3184 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3185 {
3186 check_privileged(s);
3187 gen_helper_sckc(cpu_env, o->in2);
3188 return NO_EXIT;
3189 }
3190
3191 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3192 {
3193 check_privileged(s);
3194 gen_helper_stckc(o->out, cpu_env);
3195 return NO_EXIT;
3196 }
3197
3198 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3199 {
3200 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3201 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3202 check_privileged(s);
3203 potential_page_fault(s);
3204 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3205 tcg_temp_free_i32(r1);
3206 tcg_temp_free_i32(r3);
3207 return NO_EXIT;
3208 }
3209
3210 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3211 {
3212 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3213 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3214 check_privileged(s);
3215 potential_page_fault(s);
3216 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3217 tcg_temp_free_i32(r1);
3218 tcg_temp_free_i32(r3);