4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env
;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext
;
53 typedef struct DisasInsn DisasInsn
;
54 typedef struct DisasFields DisasFields
;
57 struct TranslationBlock
*tb
;
58 const DisasInsn
*insn
;
62 bool singlestep_enabled
;
65 /* Information carried about a condition to be evaluated. */
72 struct { TCGv_i64 a
, b
; } s64
;
73 struct { TCGv_i32 a
, b
; } s32
;
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit
[CC_OP_MAX
];
81 static uint64_t inline_branch_miss
[CC_OP_MAX
];
84 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
86 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
87 if (s
->tb
->flags
& FLAG_MASK_32
) {
88 return pc
| 0x80000000;
94 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
97 S390CPU
*cpu
= S390_CPU(cs
);
98 CPUS390XState
*env
= &cpu
->env
;
101 if (env
->cc_op
> 3) {
102 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
103 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
105 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
106 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
109 for (i
= 0; i
< 16; i
++) {
110 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
112 cpu_fprintf(f
, "\n");
118 for (i
= 0; i
< 16; i
++) {
119 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, get_freg(env
, i
)->ll
);
121 cpu_fprintf(f
, "\n");
127 for (i
= 0; i
< 32; i
++) {
128 cpu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
, i
,
129 env
->vregs
[i
][0].ll
, env
->vregs
[i
][1].ll
);
130 cpu_fprintf(f
, (i
% 2) ?
"\n" : " ");
133 #ifndef CONFIG_USER_ONLY
134 for (i
= 0; i
< 16; i
++) {
135 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
137 cpu_fprintf(f
, "\n");
144 #ifdef DEBUG_INLINE_BRANCHES
145 for (i
= 0; i
< CC_OP_MAX
; i
++) {
146 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
147 inline_branch_miss
[i
], inline_branch_hit
[i
]);
151 cpu_fprintf(f
, "\n");
154 static TCGv_i64 psw_addr
;
155 static TCGv_i64 psw_mask
;
156 static TCGv_i64 gbea
;
158 static TCGv_i32 cc_op
;
159 static TCGv_i64 cc_src
;
160 static TCGv_i64 cc_dst
;
161 static TCGv_i64 cc_vr
;
163 static char cpu_reg_names
[32][4];
164 static TCGv_i64 regs
[16];
165 static TCGv_i64 fregs
[16];
167 void s390x_translate_init(void)
171 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
172 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
173 offsetof(CPUS390XState
, psw
.addr
),
175 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
176 offsetof(CPUS390XState
, psw
.mask
),
178 gbea
= tcg_global_mem_new_i64(cpu_env
,
179 offsetof(CPUS390XState
, gbea
),
182 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
184 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
186 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
188 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
191 for (i
= 0; i
< 16; i
++) {
192 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
193 regs
[i
] = tcg_global_mem_new(cpu_env
,
194 offsetof(CPUS390XState
, regs
[i
]),
198 for (i
= 0; i
< 16; i
++) {
199 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
200 fregs
[i
] = tcg_global_mem_new(cpu_env
,
201 offsetof(CPUS390XState
, vregs
[i
][0].d
),
202 cpu_reg_names
[i
+ 16]);
206 static TCGv_i64
load_reg(int reg
)
208 TCGv_i64 r
= tcg_temp_new_i64();
209 tcg_gen_mov_i64(r
, regs
[reg
]);
213 static TCGv_i64
load_freg32_i64(int reg
)
215 TCGv_i64 r
= tcg_temp_new_i64();
216 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
220 static void store_reg(int reg
, TCGv_i64 v
)
222 tcg_gen_mov_i64(regs
[reg
], v
);
225 static void store_freg(int reg
, TCGv_i64 v
)
227 tcg_gen_mov_i64(fregs
[reg
], v
);
230 static void store_reg32_i64(int reg
, TCGv_i64 v
)
232 /* 32 bit register writes keep the upper half */
233 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
236 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
238 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
241 static void store_freg32_i64(int reg
, TCGv_i64 v
)
243 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
246 static void return_low128(TCGv_i64 dest
)
248 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
251 static void update_psw_addr(DisasContext
*s
)
254 tcg_gen_movi_i64(psw_addr
, s
->pc
);
257 static void per_branch(DisasContext
*s
, bool to_next
)
259 #ifndef CONFIG_USER_ONLY
260 tcg_gen_movi_i64(gbea
, s
->pc
);
262 if (s
->tb
->flags
& FLAG_MASK_PER
) {
263 TCGv_i64 next_pc
= to_next ?
tcg_const_i64(s
->next_pc
) : psw_addr
;
264 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
266 tcg_temp_free_i64(next_pc
);
272 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
273 TCGv_i64 arg1
, TCGv_i64 arg2
)
275 #ifndef CONFIG_USER_ONLY
276 if (s
->tb
->flags
& FLAG_MASK_PER
) {
277 TCGLabel
*lab
= gen_new_label();
278 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
280 tcg_gen_movi_i64(gbea
, s
->pc
);
281 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
285 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
286 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
287 tcg_temp_free_i64(pc
);
292 static void per_breaking_event(DisasContext
*s
)
294 tcg_gen_movi_i64(gbea
, s
->pc
);
297 static void update_cc_op(DisasContext
*s
)
299 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
300 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
304 static void potential_page_fault(DisasContext
*s
)
310 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
312 return (uint64_t)cpu_lduw_code(env
, pc
);
315 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
317 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
320 static int get_mem_index(DisasContext
*s
)
322 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
323 case PSW_ASC_PRIMARY
>> 32:
325 case PSW_ASC_SECONDARY
>> 32:
327 case PSW_ASC_HOME
>> 32:
335 static void gen_exception(int excp
)
337 TCGv_i32 tmp
= tcg_const_i32(excp
);
338 gen_helper_exception(cpu_env
, tmp
);
339 tcg_temp_free_i32(tmp
);
342 static void gen_program_exception(DisasContext
*s
, int code
)
346 /* Remember what pgm exeption this was. */
347 tmp
= tcg_const_i32(code
);
348 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
349 tcg_temp_free_i32(tmp
);
351 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
352 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
353 tcg_temp_free_i32(tmp
);
355 /* Advance past instruction. */
362 /* Trigger exception. */
363 gen_exception(EXCP_PGM
);
366 static inline void gen_illegal_opcode(DisasContext
*s
)
368 gen_program_exception(s
, PGM_OPERATION
);
371 static inline void gen_trap(DisasContext
*s
)
375 /* Set DXC to 0xff. */
376 t
= tcg_temp_new_i32();
377 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
378 tcg_gen_ori_i32(t
, t
, 0xff00);
379 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
380 tcg_temp_free_i32(t
);
382 gen_program_exception(s
, PGM_DATA
);
385 #ifndef CONFIG_USER_ONLY
386 static void check_privileged(DisasContext
*s
)
388 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
389 gen_program_exception(s
, PGM_PRIVILEGED
);
394 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
396 TCGv_i64 tmp
= tcg_temp_new_i64();
397 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
399 /* Note that d2 is limited to 20 bits, signed. If we crop negative
400 displacements early we create larger immedate addends. */
402 /* Note that addi optimizes the imm==0 case. */
404 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
405 tcg_gen_addi_i64(tmp
, tmp
, d2
);
407 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
409 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
415 tcg_gen_movi_i64(tmp
, d2
);
418 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
424 static inline bool live_cc_data(DisasContext
*s
)
426 return (s
->cc_op
!= CC_OP_DYNAMIC
427 && s
->cc_op
!= CC_OP_STATIC
431 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
433 if (live_cc_data(s
)) {
434 tcg_gen_discard_i64(cc_src
);
435 tcg_gen_discard_i64(cc_dst
);
436 tcg_gen_discard_i64(cc_vr
);
438 s
->cc_op
= CC_OP_CONST0
+ val
;
441 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
443 if (live_cc_data(s
)) {
444 tcg_gen_discard_i64(cc_src
);
445 tcg_gen_discard_i64(cc_vr
);
447 tcg_gen_mov_i64(cc_dst
, dst
);
451 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
454 if (live_cc_data(s
)) {
455 tcg_gen_discard_i64(cc_vr
);
457 tcg_gen_mov_i64(cc_src
, src
);
458 tcg_gen_mov_i64(cc_dst
, dst
);
462 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
463 TCGv_i64 dst
, TCGv_i64 vr
)
465 tcg_gen_mov_i64(cc_src
, src
);
466 tcg_gen_mov_i64(cc_dst
, dst
);
467 tcg_gen_mov_i64(cc_vr
, vr
);
471 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
473 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
476 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
478 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
481 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
483 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
486 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
488 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
491 /* CC value is in env->cc_op */
492 static void set_cc_static(DisasContext
*s
)
494 if (live_cc_data(s
)) {
495 tcg_gen_discard_i64(cc_src
);
496 tcg_gen_discard_i64(cc_dst
);
497 tcg_gen_discard_i64(cc_vr
);
499 s
->cc_op
= CC_OP_STATIC
;
502 /* calculates cc into cc_op */
503 static void gen_op_calc_cc(DisasContext
*s
)
505 TCGv_i32 local_cc_op
;
508 TCGV_UNUSED_I32(local_cc_op
);
509 TCGV_UNUSED_I64(dummy
);
512 dummy
= tcg_const_i64(0);
526 local_cc_op
= tcg_const_i32(s
->cc_op
);
542 /* s->cc_op is the cc value */
543 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
546 /* env->cc_op already is the cc value */
561 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
566 case CC_OP_LTUGTU_32
:
567 case CC_OP_LTUGTU_64
:
574 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
589 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
592 /* unknown operation - assume 3 arguments and cc_op in env */
593 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
599 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
600 tcg_temp_free_i32(local_cc_op
);
602 if (!TCGV_IS_UNUSED_I64(dummy
)) {
603 tcg_temp_free_i64(dummy
);
606 /* We now have cc in cc_op as constant */
610 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
612 if (unlikely(s
->singlestep_enabled
) ||
613 (s
->tb
->cflags
& CF_LAST_IO
) ||
614 (s
->tb
->flags
& FLAG_MASK_PER
)) {
617 #ifndef CONFIG_USER_ONLY
618 return (dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
619 (dest
& TARGET_PAGE_MASK
) == (s
->pc
& TARGET_PAGE_MASK
);
625 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
627 #ifdef DEBUG_INLINE_BRANCHES
628 inline_branch_miss
[cc_op
]++;
632 static void account_inline_branch(DisasContext
*s
, int cc_op
)
634 #ifdef DEBUG_INLINE_BRANCHES
635 inline_branch_hit
[cc_op
]++;
639 /* Table of mask values to comparison codes, given a comparison as input.
640 For such, CC=3 should not be possible. */
641 static const TCGCond ltgt_cond
[16] = {
642 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
643 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
644 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
645 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
646 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
647 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
648 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
649 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
652 /* Table of mask values to comparison codes, given a logic op as input.
653 For such, only CC=0 and CC=1 should be possible. */
654 static const TCGCond nz_cond
[16] = {
655 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
656 TCG_COND_NEVER
, TCG_COND_NEVER
,
657 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
658 TCG_COND_NE
, TCG_COND_NE
,
659 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
660 TCG_COND_EQ
, TCG_COND_EQ
,
661 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
662 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
665 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
666 details required to generate a TCG comparison. */
667 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
670 enum cc_op old_cc_op
= s
->cc_op
;
672 if (mask
== 15 || mask
== 0) {
673 c
->cond
= (mask ? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
676 c
->g1
= c
->g2
= true;
681 /* Find the TCG condition for the mask + cc op. */
687 cond
= ltgt_cond
[mask
];
688 if (cond
== TCG_COND_NEVER
) {
691 account_inline_branch(s
, old_cc_op
);
694 case CC_OP_LTUGTU_32
:
695 case CC_OP_LTUGTU_64
:
696 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
697 if (cond
== TCG_COND_NEVER
) {
700 account_inline_branch(s
, old_cc_op
);
704 cond
= nz_cond
[mask
];
705 if (cond
== TCG_COND_NEVER
) {
708 account_inline_branch(s
, old_cc_op
);
723 account_inline_branch(s
, old_cc_op
);
738 account_inline_branch(s
, old_cc_op
);
742 switch (mask
& 0xa) {
743 case 8: /* src == 0 -> no one bit found */
746 case 2: /* src != 0 -> one bit found */
752 account_inline_branch(s
, old_cc_op
);
758 case 8 | 2: /* vr == 0 */
761 case 4 | 1: /* vr != 0 */
764 case 8 | 4: /* no carry -> vr >= src */
767 case 2 | 1: /* carry -> vr < src */
773 account_inline_branch(s
, old_cc_op
);
778 /* Note that CC=0 is impossible; treat it as dont-care. */
780 case 2: /* zero -> op1 == op2 */
783 case 4 | 1: /* !zero -> op1 != op2 */
786 case 4: /* borrow (!carry) -> op1 < op2 */
789 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
795 account_inline_branch(s
, old_cc_op
);
800 /* Calculate cc value. */
805 /* Jump based on CC. We'll load up the real cond below;
806 the assignment here merely avoids a compiler warning. */
807 account_noninline_branch(s
, old_cc_op
);
808 old_cc_op
= CC_OP_STATIC
;
809 cond
= TCG_COND_NEVER
;
813 /* Load up the arguments of the comparison. */
815 c
->g1
= c
->g2
= false;
819 c
->u
.s32
.a
= tcg_temp_new_i32();
820 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
821 c
->u
.s32
.b
= tcg_const_i32(0);
824 case CC_OP_LTUGTU_32
:
827 c
->u
.s32
.a
= tcg_temp_new_i32();
828 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
829 c
->u
.s32
.b
= tcg_temp_new_i32();
830 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
837 c
->u
.s64
.b
= tcg_const_i64(0);
841 case CC_OP_LTUGTU_64
:
845 c
->g1
= c
->g2
= true;
851 c
->u
.s64
.a
= tcg_temp_new_i64();
852 c
->u
.s64
.b
= tcg_const_i64(0);
853 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
858 c
->u
.s32
.a
= tcg_temp_new_i32();
859 c
->u
.s32
.b
= tcg_temp_new_i32();
860 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
861 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
862 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
864 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
871 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
872 c
->u
.s64
.b
= tcg_const_i64(0);
884 case 0x8 | 0x4 | 0x2: /* cc != 3 */
886 c
->u
.s32
.b
= tcg_const_i32(3);
888 case 0x8 | 0x4 | 0x1: /* cc != 2 */
890 c
->u
.s32
.b
= tcg_const_i32(2);
892 case 0x8 | 0x2 | 0x1: /* cc != 1 */
894 c
->u
.s32
.b
= tcg_const_i32(1);
896 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
899 c
->u
.s32
.a
= tcg_temp_new_i32();
900 c
->u
.s32
.b
= tcg_const_i32(0);
901 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
903 case 0x8 | 0x4: /* cc < 2 */
905 c
->u
.s32
.b
= tcg_const_i32(2);
907 case 0x8: /* cc == 0 */
909 c
->u
.s32
.b
= tcg_const_i32(0);
911 case 0x4 | 0x2 | 0x1: /* cc != 0 */
913 c
->u
.s32
.b
= tcg_const_i32(0);
915 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
918 c
->u
.s32
.a
= tcg_temp_new_i32();
919 c
->u
.s32
.b
= tcg_const_i32(0);
920 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
922 case 0x4: /* cc == 1 */
924 c
->u
.s32
.b
= tcg_const_i32(1);
926 case 0x2 | 0x1: /* cc > 1 */
928 c
->u
.s32
.b
= tcg_const_i32(1);
930 case 0x2: /* cc == 2 */
932 c
->u
.s32
.b
= tcg_const_i32(2);
934 case 0x1: /* cc == 3 */
936 c
->u
.s32
.b
= tcg_const_i32(3);
939 /* CC is masked by something else: (8 >> cc) & mask. */
942 c
->u
.s32
.a
= tcg_const_i32(8);
943 c
->u
.s32
.b
= tcg_const_i32(0);
944 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
945 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
956 static void free_compare(DisasCompare
*c
)
960 tcg_temp_free_i64(c
->u
.s64
.a
);
962 tcg_temp_free_i32(c
->u
.s32
.a
);
967 tcg_temp_free_i64(c
->u
.s64
.b
);
969 tcg_temp_free_i32(c
->u
.s32
.b
);
974 /* ====================================================================== */
975 /* Define the insn format enumeration. */
976 #define F0(N) FMT_##N,
977 #define F1(N, X1) F0(N)
978 #define F2(N, X1, X2) F0(N)
979 #define F3(N, X1, X2, X3) F0(N)
980 #define F4(N, X1, X2, X3, X4) F0(N)
981 #define F5(N, X1, X2, X3, X4, X5) F0(N)
984 #include "insn-format.def"
994 /* Define a structure to hold the decoded fields. We'll store each inside
995 an array indexed by an enum. In order to conserve memory, we'll arrange
996 for fields that do not exist at the same time to overlap, thus the "C"
997 for compact. For checking purposes there is an "O" for original index
998 as well that will be applied to availability bitmaps. */
1000 enum DisasFieldIndexO
{
1023 enum DisasFieldIndexC
{
1054 struct DisasFields
{
1058 unsigned presentC
:16;
1059 unsigned int presentO
;
1063 /* This is the way fields are to be accessed out of DisasFields. */
1064 #define have_field(S, F) have_field1((S), FLD_O_##F)
1065 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1067 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1069 return (f
->presentO
>> c
) & 1;
1072 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1073 enum DisasFieldIndexC c
)
1075 assert(have_field1(f
, o
));
1079 /* Describe the layout of each field in each format. */
1080 typedef struct DisasField
{
1082 unsigned int size
:8;
1083 unsigned int type
:2;
1084 unsigned int indexC
:6;
1085 enum DisasFieldIndexO indexO
:8;
1088 typedef struct DisasFormatInfo
{
1089 DisasField op
[NUM_C_FIELD
];
1092 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1093 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1094 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1095 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1096 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1097 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1098 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1099 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1100 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1101 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1102 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1103 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1104 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1105 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1107 #define F0(N) { { } },
1108 #define F1(N, X1) { { X1 } },
1109 #define F2(N, X1, X2) { { X1, X2 } },
1110 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1111 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1112 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1114 static const DisasFormatInfo format_info
[] = {
1115 #include "insn-format.def"
1133 /* Generally, we'll extract operands into this structures, operate upon
1134 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1135 of routines below for more details. */
1137 bool g_out
, g_out2
, g_in1
, g_in2
;
1138 TCGv_i64 out
, out2
, in1
, in2
;
1142 /* Instructions can place constraints on their operands, raising specification
1143 exceptions if they are violated. To make this easy to automate, each "in1",
1144 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1145 of the following, or 0. To make this easy to document, we'll put the
1146 SPEC_<name> defines next to <name>. */
1148 #define SPEC_r1_even 1
1149 #define SPEC_r2_even 2
1150 #define SPEC_r3_even 4
1151 #define SPEC_r1_f128 8
1152 #define SPEC_r2_f128 16
1154 /* Return values from translate_one, indicating the state of the TB. */
1156 /* Continue the TB. */
1158 /* We have emitted one or more goto_tb. No fixup required. */
1160 /* We are not using a goto_tb (for whatever reason), but have updated
1161 the PC (for whatever reason), so there's no need to do it again on
1164 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1165 updated the PC for the next instruction to be executed. */
1167 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1168 No following code will be executed. */
1172 typedef enum DisasFacility
{
1173 FAC_Z
, /* zarch (default) */
1174 FAC_CASS
, /* compare and swap and store */
1175 FAC_CASS2
, /* compare and swap and store 2*/
1176 FAC_DFP
, /* decimal floating point */
1177 FAC_DFPR
, /* decimal floating point rounding */
1178 FAC_DO
, /* distinct operands */
1179 FAC_EE
, /* execute extensions */
1180 FAC_EI
, /* extended immediate */
1181 FAC_FPE
, /* floating point extension */
1182 FAC_FPSSH
, /* floating point support sign handling */
1183 FAC_FPRGR
, /* FPR-GR transfer */
1184 FAC_GIE
, /* general instructions extension */
1185 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1186 FAC_HW
, /* high-word */
1187 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1188 FAC_MIE
, /* miscellaneous-instruction-extensions */
1189 FAC_LAT
, /* load-and-trap */
1190 FAC_LOC
, /* load/store on condition */
1191 FAC_LD
, /* long displacement */
1192 FAC_PC
, /* population count */
1193 FAC_SCF
, /* store clock fast */
1194 FAC_SFLE
, /* store facility list extended */
1195 FAC_ILA
, /* interlocked access facility 1 */
1201 DisasFacility fac
:8;
1206 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1207 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1208 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1209 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1210 void (*help_cout
)(DisasContext
*, DisasOps
*);
1211 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1216 /* ====================================================================== */
1217 /* Miscellaneous helpers, used by several operations. */
1219 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1220 DisasOps
*o
, int mask
)
1222 int b2
= get_field(f
, b2
);
1223 int d2
= get_field(f
, d2
);
1226 o
->in2
= tcg_const_i64(d2
& mask
);
1228 o
->in2
= get_address(s
, 0, b2
, d2
);
1229 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1233 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1235 if (dest
== s
->next_pc
) {
1236 per_branch(s
, true);
1239 if (use_goto_tb(s
, dest
)) {
1241 per_breaking_event(s
);
1243 tcg_gen_movi_i64(psw_addr
, dest
);
1244 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1245 return EXIT_GOTO_TB
;
1247 tcg_gen_movi_i64(psw_addr
, dest
);
1248 per_branch(s
, false);
1249 return EXIT_PC_UPDATED
;
1253 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1254 bool is_imm
, int imm
, TCGv_i64 cdest
)
1257 uint64_t dest
= s
->pc
+ 2 * imm
;
1260 /* Take care of the special cases first. */
1261 if (c
->cond
== TCG_COND_NEVER
) {
1266 if (dest
== s
->next_pc
) {
1267 /* Branch to next. */
1268 per_branch(s
, true);
1272 if (c
->cond
== TCG_COND_ALWAYS
) {
1273 ret
= help_goto_direct(s
, dest
);
1277 if (TCGV_IS_UNUSED_I64(cdest
)) {
1278 /* E.g. bcr %r0 -> no branch. */
1282 if (c
->cond
== TCG_COND_ALWAYS
) {
1283 tcg_gen_mov_i64(psw_addr
, cdest
);
1284 per_branch(s
, false);
1285 ret
= EXIT_PC_UPDATED
;
1290 if (use_goto_tb(s
, s
->next_pc
)) {
1291 if (is_imm
&& use_goto_tb(s
, dest
)) {
1292 /* Both exits can use goto_tb. */
1295 lab
= gen_new_label();
1297 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1299 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1302 /* Branch not taken. */
1304 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1305 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1309 per_breaking_event(s
);
1311 tcg_gen_movi_i64(psw_addr
, dest
);
1312 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1316 /* Fallthru can use goto_tb, but taken branch cannot. */
1317 /* Store taken branch destination before the brcond. This
1318 avoids having to allocate a new local temp to hold it.
1319 We'll overwrite this in the not taken case anyway. */
1321 tcg_gen_mov_i64(psw_addr
, cdest
);
1324 lab
= gen_new_label();
1326 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1328 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1331 /* Branch not taken. */
1334 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1335 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1339 tcg_gen_movi_i64(psw_addr
, dest
);
1341 per_breaking_event(s
);
1342 ret
= EXIT_PC_UPDATED
;
1345 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1346 Most commonly we're single-stepping or some other condition that
1347 disables all use of goto_tb. Just update the PC and exit. */
1349 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1351 cdest
= tcg_const_i64(dest
);
1355 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1357 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1359 TCGv_i32 t0
= tcg_temp_new_i32();
1360 TCGv_i64 t1
= tcg_temp_new_i64();
1361 TCGv_i64 z
= tcg_const_i64(0);
1362 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1363 tcg_gen_extu_i32_i64(t1
, t0
);
1364 tcg_temp_free_i32(t0
);
1365 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1366 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1367 tcg_temp_free_i64(t1
);
1368 tcg_temp_free_i64(z
);
1372 tcg_temp_free_i64(cdest
);
1374 tcg_temp_free_i64(next
);
1376 ret
= EXIT_PC_UPDATED
;
1384 /* ====================================================================== */
1385 /* The operations. These perform the bulk of the work for any insn,
1386 usually after the operands have been loaded and output initialized. */
1388 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1391 z
= tcg_const_i64(0);
1392 n
= tcg_temp_new_i64();
1393 tcg_gen_neg_i64(n
, o
->in2
);
1394 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1395 tcg_temp_free_i64(n
);
1396 tcg_temp_free_i64(z
);
1400 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1402 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1406 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1408 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1412 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1414 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1415 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1419 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1421 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1425 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1430 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1432 /* The carry flag is the msb of CC, therefore the branch mask that would
1433 create that comparison is 3. Feeding the generated comparison to
1434 setcond produces the carry flag that we desire. */
1435 disas_jcc(s
, &cmp
, 3);
1436 carry
= tcg_temp_new_i64();
1438 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1440 TCGv_i32 t
= tcg_temp_new_i32();
1441 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1442 tcg_gen_extu_i32_i64(carry
, t
);
1443 tcg_temp_free_i32(t
);
1447 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1448 tcg_temp_free_i64(carry
);
1452 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1454 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1458 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1460 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1464 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1466 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1467 return_low128(o
->out2
);
1471 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1473 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1477 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1479 int shift
= s
->insn
->data
& 0xff;
1480 int size
= s
->insn
->data
>> 8;
1481 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1484 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1485 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1486 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1488 /* Produce the CC from only the bits manipulated. */
1489 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1490 set_cc_nz_u64(s
, cc_dst
);
1494 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1496 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1497 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1498 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1499 per_branch(s
, false);
1500 return EXIT_PC_UPDATED
;
1506 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1508 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1509 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1512 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1514 int m1
= get_field(s
->fields
, m1
);
1515 bool is_imm
= have_field(s
->fields
, i2
);
1516 int imm
= is_imm ?
get_field(s
->fields
, i2
) : 0;
1519 disas_jcc(s
, &c
, m1
);
1520 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1523 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1525 int r1
= get_field(s
->fields
, r1
);
1526 bool is_imm
= have_field(s
->fields
, i2
);
1527 int imm
= is_imm ?
get_field(s
->fields
, i2
) : 0;
1531 c
.cond
= TCG_COND_NE
;
1536 t
= tcg_temp_new_i64();
1537 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1538 store_reg32_i64(r1
, t
);
1539 c
.u
.s32
.a
= tcg_temp_new_i32();
1540 c
.u
.s32
.b
= tcg_const_i32(0);
1541 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1542 tcg_temp_free_i64(t
);
1544 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1547 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1549 int r1
= get_field(s
->fields
, r1
);
1550 int imm
= get_field(s
->fields
, i2
);
1554 c
.cond
= TCG_COND_NE
;
1559 t
= tcg_temp_new_i64();
1560 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1561 tcg_gen_subi_i64(t
, t
, 1);
1562 store_reg32h_i64(r1
, t
);
1563 c
.u
.s32
.a
= tcg_temp_new_i32();
1564 c
.u
.s32
.b
= tcg_const_i32(0);
1565 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1566 tcg_temp_free_i64(t
);
1568 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1571 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1573 int r1
= get_field(s
->fields
, r1
);
1574 bool is_imm
= have_field(s
->fields
, i2
);
1575 int imm
= is_imm ?
get_field(s
->fields
, i2
) : 0;
1578 c
.cond
= TCG_COND_NE
;
1583 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1584 c
.u
.s64
.a
= regs
[r1
];
1585 c
.u
.s64
.b
= tcg_const_i64(0);
1587 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1590 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1592 int r1
= get_field(s
->fields
, r1
);
1593 int r3
= get_field(s
->fields
, r3
);
1594 bool is_imm
= have_field(s
->fields
, i2
);
1595 int imm
= is_imm ?
get_field(s
->fields
, i2
) : 0;
1599 c
.cond
= (s
->insn
->data ? TCG_COND_LE
: TCG_COND_GT
);
1604 t
= tcg_temp_new_i64();
1605 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1606 c
.u
.s32
.a
= tcg_temp_new_i32();
1607 c
.u
.s32
.b
= tcg_temp_new_i32();
1608 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1609 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1610 store_reg32_i64(r1
, t
);
1611 tcg_temp_free_i64(t
);
1613 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1616 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1618 int r1
= get_field(s
->fields
, r1
);
1619 int r3
= get_field(s
->fields
, r3
);
1620 bool is_imm
= have_field(s
->fields
, i2
);
1621 int imm
= is_imm ?
get_field(s
->fields
, i2
) : 0;
1624 c
.cond
= (s
->insn
->data ? TCG_COND_LE
: TCG_COND_GT
);
1627 if (r1
== (r3
| 1)) {
1628 c
.u
.s64
.b
= load_reg(r3
| 1);
1631 c
.u
.s64
.b
= regs
[r3
| 1];
1635 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1636 c
.u
.s64
.a
= regs
[r1
];
1639 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1642 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1644 int imm
, m3
= get_field(s
->fields
, m3
);
1648 c
.cond
= ltgt_cond
[m3
];
1649 if (s
->insn
->data
) {
1650 c
.cond
= tcg_unsigned_cond(c
.cond
);
1652 c
.is_64
= c
.g1
= c
.g2
= true;
1656 is_imm
= have_field(s
->fields
, i4
);
1658 imm
= get_field(s
->fields
, i4
);
1661 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1662 get_field(s
->fields
, d4
));
1665 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1668 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1670 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1675 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1677 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1682 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1684 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1689 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1691 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1692 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1693 tcg_temp_free_i32(m3
);
1694 gen_set_cc_nz_f32(s
, o
->in2
);
1698 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1700 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1701 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1702 tcg_temp_free_i32(m3
);
1703 gen_set_cc_nz_f64(s
, o
->in2
);
1707 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1709 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1710 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1711 tcg_temp_free_i32(m3
);
1712 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1716 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1718 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1719 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1720 tcg_temp_free_i32(m3
);
1721 gen_set_cc_nz_f32(s
, o
->in2
);
1725 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1727 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1728 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1729 tcg_temp_free_i32(m3
);
1730 gen_set_cc_nz_f64(s
, o
->in2
);
1734 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1736 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1737 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1738 tcg_temp_free_i32(m3
);
1739 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1743 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1745 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1746 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1747 tcg_temp_free_i32(m3
);
1748 gen_set_cc_nz_f32(s
, o
->in2
);
1752 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1754 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1755 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1756 tcg_temp_free_i32(m3
);
1757 gen_set_cc_nz_f64(s
, o
->in2
);
1761 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1763 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1764 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1765 tcg_temp_free_i32(m3
);
1766 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1770 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1772 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1773 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1774 tcg_temp_free_i32(m3
);
1775 gen_set_cc_nz_f32(s
, o
->in2
);
1779 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1781 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1782 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1783 tcg_temp_free_i32(m3
);
1784 gen_set_cc_nz_f64(s
, o
->in2
);
1788 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1790 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1791 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1792 tcg_temp_free_i32(m3
);
1793 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1797 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1799 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1800 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1801 tcg_temp_free_i32(m3
);
1805 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1807 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1808 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1809 tcg_temp_free_i32(m3
);
1813 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1815 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1816 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1817 tcg_temp_free_i32(m3
);
1818 return_low128(o
->out2
);
1822 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1824 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1825 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1826 tcg_temp_free_i32(m3
);
1830 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1832 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1833 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1834 tcg_temp_free_i32(m3
);
1838 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1840 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1841 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1842 tcg_temp_free_i32(m3
);
1843 return_low128(o
->out2
);
1847 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1849 int r2
= get_field(s
->fields
, r2
);
1850 TCGv_i64 len
= tcg_temp_new_i64();
1852 potential_page_fault(s
);
1853 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1855 return_low128(o
->out
);
1857 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1858 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1859 tcg_temp_free_i64(len
);
1864 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1866 int l
= get_field(s
->fields
, l1
);
1871 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1872 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1875 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1876 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1879 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1880 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1883 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1884 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1887 potential_page_fault(s
);
1888 vl
= tcg_const_i32(l
);
1889 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1890 tcg_temp_free_i32(vl
);
1894 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1898 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1900 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1901 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1902 potential_page_fault(s
);
1903 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1904 tcg_temp_free_i32(r1
);
1905 tcg_temp_free_i32(r3
);
1910 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1912 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1913 TCGv_i32 t1
= tcg_temp_new_i32();
1914 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1915 potential_page_fault(s
);
1916 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1918 tcg_temp_free_i32(t1
);
1919 tcg_temp_free_i32(m3
);
1923 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1925 potential_page_fault(s
);
1926 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1928 return_low128(o
->in2
);
1932 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1934 TCGv_i64 t
= tcg_temp_new_i64();
1935 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1936 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1937 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1938 tcg_temp_free_i64(t
);
1942 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1944 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1945 int d2
= get_field(s
->fields
, d2
);
1946 int b2
= get_field(s
->fields
, b2
);
1947 int is_64
= s
->insn
->data
;
1948 TCGv_i64 addr
, mem
, cc
, z
;
1950 /* Note that in1 = R3 (new value) and
1951 in2 = (zero-extended) R1 (expected value). */
1953 /* Load the memory into the (temporary) output. While the PoO only talks
1954 about moving the memory to R1 on inequality, if we include equality it
1955 means that R1 is equal to the memory in all conditions. */
1956 addr
= get_address(s
, 0, b2
, d2
);
1958 tcg_gen_qemu_ld64(o
->out
, addr
, get_mem_index(s
));
1960 tcg_gen_qemu_ld32u(o
->out
, addr
, get_mem_index(s
));
1963 /* Are the memory and expected values (un)equal? Note that this setcond
1964 produces the output CC value, thus the NE sense of the test. */
1965 cc
= tcg_temp_new_i64();
1966 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1968 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1969 Recall that we are allowed to unconditionally issue the store (and
1970 thus any possible write trap), so (re-)store the original contents
1971 of MEM in case of inequality. */
1972 z
= tcg_const_i64(0);
1973 mem
= tcg_temp_new_i64();
1974 tcg_gen_movcond_i64(TCG_COND_EQ
, mem
, cc
, z
, o
->in1
, o
->out
);
1976 tcg_gen_qemu_st64(mem
, addr
, get_mem_index(s
));
1978 tcg_gen_qemu_st32(mem
, addr
, get_mem_index(s
));
1980 tcg_temp_free_i64(z
);
1981 tcg_temp_free_i64(mem
);
1982 tcg_temp_free_i64(addr
);
1984 /* Store CC back to cc_op. Wait until after the store so that any
1985 exception gets the old cc_op value. */
1986 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1987 tcg_temp_free_i64(cc
);
1992 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1994 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1995 int r1
= get_field(s
->fields
, r1
);
1996 int r3
= get_field(s
->fields
, r3
);
1997 int d2
= get_field(s
->fields
, d2
);
1998 int b2
= get_field(s
->fields
, b2
);
1999 TCGv_i64 addrh
, addrl
, memh
, meml
, outh
, outl
, cc
, z
;
2001 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2003 addrh
= get_address(s
, 0, b2
, d2
);
2004 addrl
= get_address(s
, 0, b2
, d2
+ 8);
2005 outh
= tcg_temp_new_i64();
2006 outl
= tcg_temp_new_i64();
2008 tcg_gen_qemu_ld64(outh
, addrh
, get_mem_index(s
));
2009 tcg_gen_qemu_ld64(outl
, addrl
, get_mem_index(s
));
2011 /* Fold the double-word compare with arithmetic. */
2012 cc
= tcg_temp_new_i64();
2013 z
= tcg_temp_new_i64();
2014 tcg_gen_xor_i64(cc
, outh
, regs
[r1
]);
2015 tcg_gen_xor_i64(z
, outl
, regs
[r1
+ 1]);
2016 tcg_gen_or_i64(cc
, cc
, z
);
2017 tcg_gen_movi_i64(z
, 0);
2018 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, cc
, z
);
2020 memh
= tcg_temp_new_i64();
2021 meml
= tcg_temp_new_i64();
2022 tcg_gen_movcond_i64(TCG_COND_EQ
, memh
, cc
, z
, regs
[r3
], outh
);
2023 tcg_gen_movcond_i64(TCG_COND_EQ
, meml
, cc
, z
, regs
[r3
+ 1], outl
);
2024 tcg_temp_free_i64(z
);
2026 tcg_gen_qemu_st64(memh
, addrh
, get_mem_index(s
));
2027 tcg_gen_qemu_st64(meml
, addrl
, get_mem_index(s
));
2028 tcg_temp_free_i64(memh
);
2029 tcg_temp_free_i64(meml
);
2030 tcg_temp_free_i64(addrh
);
2031 tcg_temp_free_i64(addrl
);
2033 /* Save back state now that we've passed all exceptions. */
2034 tcg_gen_mov_i64(regs
[r1
], outh
);
2035 tcg_gen_mov_i64(regs
[r1
+ 1], outl
);
2036 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2037 tcg_temp_free_i64(outh
);
2038 tcg_temp_free_i64(outl
);
2039 tcg_temp_free_i64(cc
);
2044 #ifndef CONFIG_USER_ONLY
2045 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
2047 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2048 check_privileged(s
);
2049 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
2050 tcg_temp_free_i32(r1
);
2056 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2058 TCGv_i64 t1
= tcg_temp_new_i64();
2059 TCGv_i32 t2
= tcg_temp_new_i32();
2060 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2061 gen_helper_cvd(t1
, t2
);
2062 tcg_temp_free_i32(t2
);
2063 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2064 tcg_temp_free_i64(t1
);
2068 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2070 int m3
= get_field(s
->fields
, m3
);
2071 TCGLabel
*lab
= gen_new_label();
2074 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2075 if (s
->insn
->data
) {
2076 c
= tcg_unsigned_cond(c
);
2078 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2087 #ifndef CONFIG_USER_ONLY
2088 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2090 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2091 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2092 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2094 check_privileged(s
);
2098 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2100 tcg_temp_free_i32(func_code
);
2101 tcg_temp_free_i32(r3
);
2102 tcg_temp_free_i32(r1
);
2107 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2109 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2110 return_low128(o
->out
);
2114 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2116 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2117 return_low128(o
->out
);
2121 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2123 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2124 return_low128(o
->out
);
2128 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2130 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2131 return_low128(o
->out
);
2135 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2137 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2141 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2143 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2147 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2149 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2150 return_low128(o
->out2
);
2154 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2156 int r2
= get_field(s
->fields
, r2
);
2157 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2161 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2163 /* No cache information provided. */
2164 tcg_gen_movi_i64(o
->out
, -1);
2168 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2170 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2174 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2176 int r1
= get_field(s
->fields
, r1
);
2177 int r2
= get_field(s
->fields
, r2
);
2178 TCGv_i64 t
= tcg_temp_new_i64();
2180 /* Note the "subsequently" in the PoO, which implies a defined result
2181 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2182 tcg_gen_shri_i64(t
, psw_mask
, 32);
2183 store_reg32_i64(r1
, t
);
2185 store_reg32_i64(r2
, psw_mask
);
2188 tcg_temp_free_i64(t
);
2192 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2194 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2195 tb->flags, (ab)use the tb->cs_base field as the address of
2196 the template in memory, and grab 8 bits of tb->flags/cflags for
2197 the contents of the register. We would then recognize all this
2198 in gen_intermediate_code_internal, generating code for exactly
2199 one instruction. This new TB then gets executed normally.
2201 On the other hand, this seems to be mostly used for modifying
2202 MVC inside of memcpy, which needs a helper call anyway. So
2203 perhaps this doesn't bear thinking about any further. */
2210 tmp
= tcg_const_i64(s
->next_pc
);
2211 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2212 tcg_temp_free_i64(tmp
);
2217 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2219 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2220 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2221 tcg_temp_free_i32(m3
);
2225 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2227 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2228 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2229 tcg_temp_free_i32(m3
);
2233 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2235 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2236 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2237 return_low128(o
->out2
);
2238 tcg_temp_free_i32(m3
);
2242 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2244 /* We'll use the original input for cc computation, since we get to
2245 compare that against 0, which ought to be better than comparing
2246 the real output against 64. It also lets cc_dst be a convenient
2247 temporary during our computation. */
2248 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2250 /* R1 = IN ? CLZ(IN) : 64. */
2251 gen_helper_clz(o
->out
, o
->in2
);
2253 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2254 value by 64, which is undefined. But since the shift is 64 iff the
2255 input is zero, we still get the correct result after and'ing. */
2256 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2257 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2258 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2262 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2264 int m3
= get_field(s
->fields
, m3
);
2265 int pos
, len
, base
= s
->insn
->data
;
2266 TCGv_i64 tmp
= tcg_temp_new_i64();
2271 /* Effectively a 32-bit load. */
2272 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2279 /* Effectively a 16-bit load. */
2280 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2288 /* Effectively an 8-bit load. */
2289 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2294 pos
= base
+ ctz32(m3
) * 8;
2295 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2296 ccm
= ((1ull << len
) - 1) << pos
;
2300 /* This is going to be a sequence of loads and inserts. */
2301 pos
= base
+ 32 - 8;
2305 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2306 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2307 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2310 m3
= (m3
<< 1) & 0xf;
2316 tcg_gen_movi_i64(tmp
, ccm
);
2317 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2318 tcg_temp_free_i64(tmp
);
2322 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2324 int shift
= s
->insn
->data
& 0xff;
2325 int size
= s
->insn
->data
>> 8;
2326 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2330 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2335 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2337 t1
= tcg_temp_new_i64();
2338 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2339 tcg_gen_shri_i64(t1
, t1
, 36);
2340 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2342 tcg_gen_extu_i32_i64(t1
, cc_op
);
2343 tcg_gen_shli_i64(t1
, t1
, 28);
2344 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2345 tcg_temp_free_i64(t1
);
2349 #ifndef CONFIG_USER_ONLY
2350 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2352 check_privileged(s
);
2353 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2357 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2359 check_privileged(s
);
2360 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2365 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2367 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2371 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2373 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2377 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2379 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2383 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2385 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2389 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2391 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2392 return_low128(o
->out2
);
2396 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2398 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2399 return_low128(o
->out2
);
2403 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2405 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2409 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2411 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2415 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2417 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2421 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2423 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2427 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2429 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2433 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2435 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2439 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2441 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2445 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2447 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2451 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2453 TCGLabel
*lab
= gen_new_label();
2454 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2455 /* The value is stored even in case of trap. */
2456 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2462 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2464 TCGLabel
*lab
= gen_new_label();
2465 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2466 /* The value is stored even in case of trap. */
2467 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2473 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2475 TCGLabel
*lab
= gen_new_label();
2476 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2477 /* The value is stored even in case of trap. */
2478 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2484 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2486 TCGLabel
*lab
= gen_new_label();
2487 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2488 /* The value is stored even in case of trap. */
2489 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2495 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2497 TCGLabel
*lab
= gen_new_label();
2498 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2499 /* The value is stored even in case of trap. */
2500 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2506 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2510 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2513 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2517 TCGv_i32 t32
= tcg_temp_new_i32();
2520 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2523 t
= tcg_temp_new_i64();
2524 tcg_gen_extu_i32_i64(t
, t32
);
2525 tcg_temp_free_i32(t32
);
2527 z
= tcg_const_i64(0);
2528 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2529 tcg_temp_free_i64(t
);
2530 tcg_temp_free_i64(z
);
2536 #ifndef CONFIG_USER_ONLY
2537 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2539 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2540 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2541 check_privileged(s
);
2542 potential_page_fault(s
);
2543 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2544 tcg_temp_free_i32(r1
);
2545 tcg_temp_free_i32(r3
);
2549 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2551 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2552 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2553 check_privileged(s
);
2554 potential_page_fault(s
);
2555 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2556 tcg_temp_free_i32(r1
);
2557 tcg_temp_free_i32(r3
);
2560 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2562 check_privileged(s
);
2563 potential_page_fault(s
);
2564 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2569 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2573 check_privileged(s
);
2574 per_breaking_event(s
);
2576 t1
= tcg_temp_new_i64();
2577 t2
= tcg_temp_new_i64();
2578 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2579 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2580 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2581 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2582 tcg_gen_shli_i64(t1
, t1
, 32);
2583 gen_helper_load_psw(cpu_env
, t1
, t2
);
2584 tcg_temp_free_i64(t1
);
2585 tcg_temp_free_i64(t2
);
2586 return EXIT_NORETURN
;
2589 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2593 check_privileged(s
);
2594 per_breaking_event(s
);
2596 t1
= tcg_temp_new_i64();
2597 t2
= tcg_temp_new_i64();
2598 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2599 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2600 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2601 gen_helper_load_psw(cpu_env
, t1
, t2
);
2602 tcg_temp_free_i64(t1
);
2603 tcg_temp_free_i64(t2
);
2604 return EXIT_NORETURN
;
2608 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2610 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2611 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2612 potential_page_fault(s
);
2613 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2614 tcg_temp_free_i32(r1
);
2615 tcg_temp_free_i32(r3
);
2619 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2621 int r1
= get_field(s
->fields
, r1
);
2622 int r3
= get_field(s
->fields
, r3
);
2625 /* Only one register to read. */
2626 t1
= tcg_temp_new_i64();
2627 if (unlikely(r1
== r3
)) {
2628 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2629 store_reg32_i64(r1
, t1
);
2634 /* First load the values of the first and last registers to trigger
2635 possible page faults. */
2636 t2
= tcg_temp_new_i64();
2637 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2638 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2639 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2640 store_reg32_i64(r1
, t1
);
2641 store_reg32_i64(r3
, t2
);
2643 /* Only two registers to read. */
2644 if (((r1
+ 1) & 15) == r3
) {
2650 /* Then load the remaining registers. Page fault can't occur. */
2652 tcg_gen_movi_i64(t2
, 4);
2655 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2656 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2657 store_reg32_i64(r1
, t1
);
2665 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2667 int r1
= get_field(s
->fields
, r1
);
2668 int r3
= get_field(s
->fields
, r3
);
2671 /* Only one register to read. */
2672 t1
= tcg_temp_new_i64();
2673 if (unlikely(r1
== r3
)) {
2674 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2675 store_reg32h_i64(r1
, t1
);
2680 /* First load the values of the first and last registers to trigger
2681 possible page faults. */
2682 t2
= tcg_temp_new_i64();
2683 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2684 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2685 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2686 store_reg32h_i64(r1
, t1
);
2687 store_reg32h_i64(r3
, t2
);
2689 /* Only two registers to read. */
2690 if (((r1
+ 1) & 15) == r3
) {
2696 /* Then load the remaining registers. Page fault can't occur. */
2698 tcg_gen_movi_i64(t2
, 4);
2701 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2702 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2703 store_reg32h_i64(r1
, t1
);
2711 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2713 int r1
= get_field(s
->fields
, r1
);
2714 int r3
= get_field(s
->fields
, r3
);
2717 /* Only one register to read. */
2718 if (unlikely(r1
== r3
)) {
2719 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2723 /* First load the values of the first and last registers to trigger
2724 possible page faults. */
2725 t1
= tcg_temp_new_i64();
2726 t2
= tcg_temp_new_i64();
2727 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2728 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2729 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2730 tcg_gen_mov_i64(regs
[r1
], t1
);
2733 /* Only two registers to read. */
2734 if (((r1
+ 1) & 15) == r3
) {
2739 /* Then load the remaining registers. Page fault can't occur. */
2741 tcg_gen_movi_i64(t1
, 8);
2744 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2745 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2752 #ifndef CONFIG_USER_ONLY
2753 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2755 check_privileged(s
);
2756 potential_page_fault(s
);
2757 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2761 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2763 check_privileged(s
);
2764 potential_page_fault(s
);
2765 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2770 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2773 o
->g_out
= o
->g_in2
;
2774 TCGV_UNUSED_I64(o
->in2
);
2779 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2781 int b2
= get_field(s
->fields
, b2
);
2782 TCGv ar1
= tcg_temp_new_i64();
2785 o
->g_out
= o
->g_in2
;
2786 TCGV_UNUSED_I64(o
->in2
);
2789 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
2790 case PSW_ASC_PRIMARY
>> 32:
2791 tcg_gen_movi_i64(ar1
, 0);
2793 case PSW_ASC_ACCREG
>> 32:
2794 tcg_gen_movi_i64(ar1
, 1);
2796 case PSW_ASC_SECONDARY
>> 32:
2798 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
2800 tcg_gen_movi_i64(ar1
, 0);
2803 case PSW_ASC_HOME
>> 32:
2804 tcg_gen_movi_i64(ar1
, 2);
2808 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
2809 tcg_temp_free_i64(ar1
);
2814 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2818 o
->g_out
= o
->g_in1
;
2819 o
->g_out2
= o
->g_in2
;
2820 TCGV_UNUSED_I64(o
->in1
);
2821 TCGV_UNUSED_I64(o
->in2
);
2822 o
->g_in1
= o
->g_in2
= false;
2826 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2828 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2829 potential_page_fault(s
);
2830 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2831 tcg_temp_free_i32(l
);
2835 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2837 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2838 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2839 potential_page_fault(s
);
2840 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2841 tcg_temp_free_i32(r1
);
2842 tcg_temp_free_i32(r2
);
2847 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2849 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2850 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2851 potential_page_fault(s
);
2852 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2853 tcg_temp_free_i32(r1
);
2854 tcg_temp_free_i32(r3
);
2859 #ifndef CONFIG_USER_ONLY
2860 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2862 int r1
= get_field(s
->fields
, l1
);
2863 check_privileged(s
);
2864 potential_page_fault(s
);
2865 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2870 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2872 int r1
= get_field(s
->fields
, l1
);
2873 check_privileged(s
);
2874 potential_page_fault(s
);
2875 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2881 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2883 potential_page_fault(s
);
2884 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2889 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2891 potential_page_fault(s
);
2892 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2894 return_low128(o
->in2
);
2898 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2900 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2904 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2906 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
2910 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2912 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2916 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2918 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2922 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2924 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2928 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2930 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2931 return_low128(o
->out2
);
2935 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2937 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2938 return_low128(o
->out2
);
2942 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2944 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2945 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2946 tcg_temp_free_i64(r3
);
2950 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2952 int r3
= get_field(s
->fields
, r3
);
2953 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2957 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2959 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2960 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2961 tcg_temp_free_i64(r3
);
2965 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2967 int r3
= get_field(s
->fields
, r3
);
2968 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2972 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2975 z
= tcg_const_i64(0);
2976 n
= tcg_temp_new_i64();
2977 tcg_gen_neg_i64(n
, o
->in2
);
2978 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
2979 tcg_temp_free_i64(n
);
2980 tcg_temp_free_i64(z
);
2984 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2986 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2990 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2992 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2996 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2998 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2999 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3003 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3005 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3006 potential_page_fault(s
);
3007 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3008 tcg_temp_free_i32(l
);
3013 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3015 tcg_gen_neg_i64(o
->out
, o
->in2
);
3019 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3021 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3025 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3027 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3031 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3033 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3034 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3038 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3040 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3041 potential_page_fault(s
);
3042 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3043 tcg_temp_free_i32(l
);
3048 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3050 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3054 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3056 int shift
= s
->insn
->data
& 0xff;
3057 int size
= s
->insn
->data
>> 8;
3058 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3061 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3062 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3064 /* Produce the CC from only the bits manipulated. */
3065 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3066 set_cc_nz_u64(s
, cc_dst
);
3070 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3072 gen_helper_popcnt(o
->out
, o
->in2
);
3076 #ifndef CONFIG_USER_ONLY
3077 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3079 check_privileged(s
);
3080 gen_helper_ptlb(cpu_env
);
3085 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3087 int i3
= get_field(s
->fields
, i3
);
3088 int i4
= get_field(s
->fields
, i4
);
3089 int i5
= get_field(s
->fields
, i5
);
3090 int do_zero
= i4
& 0x80;
3091 uint64_t mask
, imask
, pmask
;
3094 /* Adjust the arguments for the specific insn. */
3095 switch (s
->fields
->op2
) {
3096 case 0x55: /* risbg */
3101 case 0x5d: /* risbhg */
3104 pmask
= 0xffffffff00000000ull
;
3106 case 0x51: /* risblg */
3109 pmask
= 0x00000000ffffffffull
;
3115 /* MASK is the set of bits to be inserted from R2.
3116 Take care for I3/I4 wraparound. */
3119 mask
^= pmask
>> i4
>> 1;
3121 mask
|= ~(pmask
>> i4
>> 1);
3125 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3126 insns, we need to keep the other half of the register. */
3127 imask
= ~mask
| ~pmask
;
3129 if (s
->fields
->op2
== 0x55) {
3136 /* In some cases we can implement this with deposit, which can be more
3137 efficient on some hosts. */
3138 if (~mask
== imask
&& i3
<= i4
) {
3139 if (s
->fields
->op2
== 0x5d) {
3142 /* Note that we rotate the bits to be inserted to the lsb, not to
3143 the position as described in the PoO. */
3146 rot
= (i5
- pos
) & 63;
3152 /* Rotate the input as necessary. */
3153 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3155 /* Insert the selected bits into the output. */
3157 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3158 } else if (imask
== 0) {
3159 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3161 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3162 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3163 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3168 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3170 int i3
= get_field(s
->fields
, i3
);
3171 int i4
= get_field(s
->fields
, i4
);
3172 int i5
= get_field(s
->fields
, i5
);
3175 /* If this is a test-only form, arrange to discard the result. */
3177 o
->out
= tcg_temp_new_i64();
3185 /* MASK is the set of bits to be operated on from R2.
3186 Take care for I3/I4 wraparound. */
3189 mask
^= ~0ull >> i4
>> 1;
3191 mask
|= ~(~0ull >> i4
>> 1);
3194 /* Rotate the input as necessary. */
3195 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3198 switch (s
->fields
->op2
) {
3199 case 0x55: /* AND */
3200 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3201 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3204 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3205 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3207 case 0x57: /* XOR */
3208 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3209 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);