s390x/tcg: Implement MULTIPLY SINGLE (MSC, MSGC, MSGRKC, MSRKC)
[qemu.git] / target / s390x / internal.h
1 /*
2 * s390x internal definitions and helpers
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 */
9
10 #ifndef S390X_INTERNAL_H
11 #define S390X_INTERNAL_H
12
13 #include "cpu.h"
14
15 #ifndef CONFIG_USER_ONLY
16 typedef struct LowCore {
17 /* prefix area: defined by architecture */
18 uint32_t ccw1[2]; /* 0x000 */
19 uint32_t ccw2[4]; /* 0x008 */
20 uint8_t pad1[0x80 - 0x18]; /* 0x018 */
21 uint32_t ext_params; /* 0x080 */
22 uint16_t cpu_addr; /* 0x084 */
23 uint16_t ext_int_code; /* 0x086 */
24 uint16_t svc_ilen; /* 0x088 */
25 uint16_t svc_code; /* 0x08a */
26 uint16_t pgm_ilen; /* 0x08c */
27 uint16_t pgm_code; /* 0x08e */
28 uint32_t data_exc_code; /* 0x090 */
29 uint16_t mon_class_num; /* 0x094 */
30 uint16_t per_perc_atmid; /* 0x096 */
31 uint64_t per_address; /* 0x098 */
32 uint8_t exc_access_id; /* 0x0a0 */
33 uint8_t per_access_id; /* 0x0a1 */
34 uint8_t op_access_id; /* 0x0a2 */
35 uint8_t ar_access_id; /* 0x0a3 */
36 uint8_t pad2[0xA8 - 0xA4]; /* 0x0a4 */
37 uint64_t trans_exc_code; /* 0x0a8 */
38 uint64_t monitor_code; /* 0x0b0 */
39 uint16_t subchannel_id; /* 0x0b8 */
40 uint16_t subchannel_nr; /* 0x0ba */
41 uint32_t io_int_parm; /* 0x0bc */
42 uint32_t io_int_word; /* 0x0c0 */
43 uint8_t pad3[0xc8 - 0xc4]; /* 0x0c4 */
44 uint32_t stfl_fac_list; /* 0x0c8 */
45 uint8_t pad4[0xe8 - 0xcc]; /* 0x0cc */
46 uint64_t mcic; /* 0x0e8 */
47 uint8_t pad5[0xf4 - 0xf0]; /* 0x0f0 */
48 uint32_t external_damage_code; /* 0x0f4 */
49 uint64_t failing_storage_address; /* 0x0f8 */
50 uint8_t pad6[0x110 - 0x100]; /* 0x100 */
51 uint64_t per_breaking_event_addr; /* 0x110 */
52 uint8_t pad7[0x120 - 0x118]; /* 0x118 */
53 PSW restart_old_psw; /* 0x120 */
54 PSW external_old_psw; /* 0x130 */
55 PSW svc_old_psw; /* 0x140 */
56 PSW program_old_psw; /* 0x150 */
57 PSW mcck_old_psw; /* 0x160 */
58 PSW io_old_psw; /* 0x170 */
59 uint8_t pad8[0x1a0 - 0x180]; /* 0x180 */
60 PSW restart_new_psw; /* 0x1a0 */
61 PSW external_new_psw; /* 0x1b0 */
62 PSW svc_new_psw; /* 0x1c0 */
63 PSW program_new_psw; /* 0x1d0 */
64 PSW mcck_new_psw; /* 0x1e0 */
65 PSW io_new_psw; /* 0x1f0 */
66 uint8_t pad13[0x11b0 - 0x200]; /* 0x200 */
67
68 uint64_t mcesad; /* 0x11B0 */
69
70 /* 64 bit extparam used for pfault, diag 250 etc */
71 uint64_t ext_params2; /* 0x11B8 */
72
73 uint8_t pad14[0x1200 - 0x11C0]; /* 0x11C0 */
74
75 /* System info area */
76
77 uint64_t floating_pt_save_area[16]; /* 0x1200 */
78 uint64_t gpregs_save_area[16]; /* 0x1280 */
79 uint32_t st_status_fixed_logout[4]; /* 0x1300 */
80 uint8_t pad15[0x1318 - 0x1310]; /* 0x1310 */
81 uint32_t prefixreg_save_area; /* 0x1318 */
82 uint32_t fpt_creg_save_area; /* 0x131c */
83 uint8_t pad16[0x1324 - 0x1320]; /* 0x1320 */
84 uint32_t tod_progreg_save_area; /* 0x1324 */
85 uint64_t cpu_timer_save_area; /* 0x1328 */
86 uint64_t clock_comp_save_area; /* 0x1330 */
87 uint8_t pad17[0x1340 - 0x1338]; /* 0x1338 */
88 uint32_t access_regs_save_area[16]; /* 0x1340 */
89 uint64_t cregs_save_area[16]; /* 0x1380 */
90
91 /* align to the top of the prefix area */
92
93 uint8_t pad18[0x2000 - 0x1400]; /* 0x1400 */
94 } QEMU_PACKED LowCore;
95 QEMU_BUILD_BUG_ON(sizeof(LowCore) != 8192);
96 #endif /* CONFIG_USER_ONLY */
97
98 #define MAX_ILEN 6
99
100 /* While the PoO talks about ILC (a number between 1-3) what is actually
101 stored in LowCore is shifted left one bit (an even between 2-6). As
102 this is the actual length of the insn and therefore more useful, that
103 is what we want to pass around and manipulate. To make sure that we
104 have applied this distinction universally, rename the "ILC" to "ILEN". */
105 static inline int get_ilen(uint8_t opc)
106 {
107 switch (opc >> 6) {
108 case 0:
109 return 2;
110 case 1:
111 case 2:
112 return 4;
113 default:
114 return 6;
115 }
116 }
117
118 /* Compute the ATMID field that is stored in the per_perc_atmid lowcore
119 entry when a PER exception is triggered. */
120 static inline uint8_t get_per_atmid(CPUS390XState *env)
121 {
122 return ((env->psw.mask & PSW_MASK_64) ? (1 << 7) : 0) |
123 (1 << 6) |
124 ((env->psw.mask & PSW_MASK_32) ? (1 << 5) : 0) |
125 ((env->psw.mask & PSW_MASK_DAT) ? (1 << 4) : 0) |
126 ((env->psw.mask & PSW_ASC_SECONDARY) ? (1 << 3) : 0) |
127 ((env->psw.mask & PSW_ASC_ACCREG) ? (1 << 2) : 0);
128 }
129
130 static inline uint64_t wrap_address(CPUS390XState *env, uint64_t a)
131 {
132 if (!(env->psw.mask & PSW_MASK_64)) {
133 if (!(env->psw.mask & PSW_MASK_32)) {
134 /* 24-Bit mode */
135 a &= 0x00ffffff;
136 } else {
137 /* 31-Bit mode */
138 a &= 0x7fffffff;
139 }
140 }
141 return a;
142 }
143
144 /* CC optimization */
145
146 /* Instead of computing the condition codes after each x86 instruction,
147 * QEMU just stores the result (called CC_DST), the type of operation
148 * (called CC_OP) and whatever operands are needed (CC_SRC and possibly
149 * CC_VR). When the condition codes are needed, the condition codes can
150 * be calculated using this information. Condition codes are not generated
151 * if they are only needed for conditional branches.
152 */
153 enum cc_op {
154 CC_OP_CONST0 = 0, /* CC is 0 */
155 CC_OP_CONST1, /* CC is 1 */
156 CC_OP_CONST2, /* CC is 2 */
157 CC_OP_CONST3, /* CC is 3 */
158
159 CC_OP_DYNAMIC, /* CC calculation defined by env->cc_op */
160 CC_OP_STATIC, /* CC value is env->cc_op */
161
162 CC_OP_NZ, /* env->cc_dst != 0 */
163 CC_OP_LTGT_32, /* signed less/greater than (32bit) */
164 CC_OP_LTGT_64, /* signed less/greater than (64bit) */
165 CC_OP_LTUGTU_32, /* unsigned less/greater than (32bit) */
166 CC_OP_LTUGTU_64, /* unsigned less/greater than (64bit) */
167 CC_OP_LTGT0_32, /* signed less/greater than 0 (32bit) */
168 CC_OP_LTGT0_64, /* signed less/greater than 0 (64bit) */
169
170 CC_OP_ADD_64, /* overflow on add (64bit) */
171 CC_OP_ADDU_64, /* overflow on unsigned add (64bit) */
172 CC_OP_ADDC_64, /* overflow on unsigned add-carry (64bit) */
173 CC_OP_SUB_64, /* overflow on subtraction (64bit) */
174 CC_OP_SUBU_64, /* overflow on unsigned subtraction (64bit) */
175 CC_OP_SUBB_64, /* overflow on unsigned sub-borrow (64bit) */
176 CC_OP_ABS_64, /* sign eval on abs (64bit) */
177 CC_OP_NABS_64, /* sign eval on nabs (64bit) */
178 CC_OP_MULS_64, /* overflow on signed multiply (64bit) */
179
180 CC_OP_ADD_32, /* overflow on add (32bit) */
181 CC_OP_ADDU_32, /* overflow on unsigned add (32bit) */
182 CC_OP_ADDC_32, /* overflow on unsigned add-carry (32bit) */
183 CC_OP_SUB_32, /* overflow on subtraction (32bit) */
184 CC_OP_SUBU_32, /* overflow on unsigned subtraction (32bit) */
185 CC_OP_SUBB_32, /* overflow on unsigned sub-borrow (32bit) */
186 CC_OP_ABS_32, /* sign eval on abs (64bit) */
187 CC_OP_NABS_32, /* sign eval on nabs (64bit) */
188 CC_OP_MULS_32, /* overflow on signed multiply (32bit) */
189
190 CC_OP_COMP_32, /* complement */
191 CC_OP_COMP_64, /* complement */
192
193 CC_OP_TM_32, /* test under mask (32bit) */
194 CC_OP_TM_64, /* test under mask (64bit) */
195
196 CC_OP_NZ_F32, /* FP dst != 0 (32bit) */
197 CC_OP_NZ_F64, /* FP dst != 0 (64bit) */
198 CC_OP_NZ_F128, /* FP dst != 0 (128bit) */
199
200 CC_OP_ICM, /* insert characters under mask */
201 CC_OP_SLA_32, /* Calculate shift left signed (32bit) */
202 CC_OP_SLA_64, /* Calculate shift left signed (64bit) */
203 CC_OP_FLOGR, /* find leftmost one */
204 CC_OP_LCBB, /* load count to block boundary */
205 CC_OP_VC, /* vector compare result */
206 CC_OP_MAX
207 };
208
209 #ifndef CONFIG_USER_ONLY
210
211 static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb,
212 uint8_t *ar)
213 {
214 hwaddr addr = 0;
215 uint8_t reg;
216
217 reg = ipb >> 28;
218 if (reg > 0) {
219 addr = env->regs[reg];
220 }
221 addr += (ipb >> 16) & 0xfff;
222 if (ar) {
223 *ar = reg;
224 }
225
226 return addr;
227 }
228
229 /* Base/displacement are at the same locations. */
230 #define decode_basedisp_rs decode_basedisp_s
231
232 #endif /* CONFIG_USER_ONLY */
233
234 /* arch_dump.c */
235 int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
236 int cpuid, void *opaque);
237
238
239 /* cc_helper.c */
240 const char *cc_name(enum cc_op cc_op);
241 uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
242 uint64_t vr);
243 #ifndef CONFIG_USER_ONLY
244 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr);
245 #endif /* CONFIG_USER_ONLY */
246
247
248 /* cpu.c */
249 #ifndef CONFIG_USER_ONLY
250 unsigned int s390_cpu_halt(S390CPU *cpu);
251 void s390_cpu_unhalt(S390CPU *cpu);
252 #else
253 static inline unsigned int s390_cpu_halt(S390CPU *cpu)
254 {
255 return 0;
256 }
257
258 static inline void s390_cpu_unhalt(S390CPU *cpu)
259 {
260 }
261 #endif /* CONFIG_USER_ONLY */
262
263
264 /* cpu_models.c */
265 void s390_cpu_model_class_register_props(ObjectClass *oc);
266 void s390_realize_cpu_model(CPUState *cs, Error **errp);
267 ObjectClass *s390_cpu_class_by_name(const char *name);
268
269
270 /* excp_helper.c */
271 void s390x_cpu_debug_excp_handler(CPUState *cs);
272 void s390_cpu_do_interrupt(CPUState *cpu);
273 bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req);
274 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
275 MMUAccessType access_type, int mmu_idx,
276 bool probe, uintptr_t retaddr);
277 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
278 MMUAccessType access_type,
279 int mmu_idx, uintptr_t retaddr);
280
281
282 /* fpu_helper.c */
283 uint32_t set_cc_nz_f32(float32 v);
284 uint32_t set_cc_nz_f64(float64 v);
285 uint32_t set_cc_nz_f128(float128 v);
286 #define S390_IEEE_MASK_INVALID 0x80
287 #define S390_IEEE_MASK_DIVBYZERO 0x40
288 #define S390_IEEE_MASK_OVERFLOW 0x20
289 #define S390_IEEE_MASK_UNDERFLOW 0x10
290 #define S390_IEEE_MASK_INEXACT 0x08
291 #define S390_IEEE_MASK_QUANTUM 0x04
292 uint8_t s390_softfloat_exc_to_ieee(unsigned int exc);
293 int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3);
294 void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode);
295 int float_comp_to_cc(CPUS390XState *env, int float_compare);
296 uint16_t float32_dcmask(CPUS390XState *env, float32 f1);
297 uint16_t float64_dcmask(CPUS390XState *env, float64 f1);
298 uint16_t float128_dcmask(CPUS390XState *env, float128 f1);
299
300
301 /* gdbstub.c */
302 int s390_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
303 int s390_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
304 void s390_cpu_gdb_init(CPUState *cs);
305
306
307 /* helper.c */
308 void s390_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
309 void do_restart_interrupt(CPUS390XState *env);
310 #ifndef CONFIG_USER_ONLY
311 uint64_t get_psw_mask(CPUS390XState *env);
312 void s390_cpu_recompute_watchpoints(CPUState *cs);
313 void s390x_tod_timer(void *opaque);
314 void s390x_cpu_timer(void *opaque);
315 void s390_handle_wait(S390CPU *cpu);
316 hwaddr s390_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
317 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cpu, vaddr addr);
318 #define S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
319 int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch);
320 int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len);
321 LowCore *cpu_map_lowcore(CPUS390XState *env);
322 void cpu_unmap_lowcore(LowCore *lowcore);
323 #endif /* CONFIG_USER_ONLY */
324
325
326 /* interrupt.c */
327 void trigger_pgm_exception(CPUS390XState *env, uint32_t code);
328 void cpu_inject_clock_comparator(S390CPU *cpu);
329 void cpu_inject_cpu_timer(S390CPU *cpu);
330 void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr);
331 int cpu_inject_external_call(S390CPU *cpu, uint16_t src_cpu_addr);
332 bool s390_cpu_has_io_int(S390CPU *cpu);
333 bool s390_cpu_has_ext_int(S390CPU *cpu);
334 bool s390_cpu_has_mcck_int(S390CPU *cpu);
335 bool s390_cpu_has_int(S390CPU *cpu);
336 bool s390_cpu_has_restart_int(S390CPU *cpu);
337 bool s390_cpu_has_stop_int(S390CPU *cpu);
338 void cpu_inject_restart(S390CPU *cpu);
339 void cpu_inject_stop(S390CPU *cpu);
340
341
342 /* ioinst.c */
343 void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
344 void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
345 void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
346 void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
347 uintptr_t ra);
348 void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
349 uintptr_t ra);
350 void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra);
351 void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
352 uintptr_t ra);
353 int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra);
354 void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra);
355 void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
356 uint32_t ipb, uintptr_t ra);
357 void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
358 void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
359 void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
360
361
362 /* mem_helper.c */
363 target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr);
364 void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len,
365 uintptr_t ra);
366
367
368 /* mmu_helper.c */
369 int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
370 target_ulong *raddr, int *flags, uint64_t *tec);
371 int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw,
372 target_ulong *addr, int *flags, uint64_t *tec);
373
374
375 /* misc_helper.c */
376 int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3);
377 void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
378 uintptr_t ra);
379
380
381 /* translate.c */
382 void s390x_translate_init(void);
383
384
385 /* sigp.c */
386 int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3);
387 void do_stop_interrupt(CPUS390XState *env);
388
389 #endif /* S390X_INTERNAL_H */