s390x/tcg: Implement MULTIPLY SINGLE (MSC, MSGC, MSGRKC, MSRKC)
[qemu.git] / target / s390x / cc_helper.c
1 /*
2 * S/390 condition code helper routines
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "tcg_s390x.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "qemu/host-utils.h"
28
29 /* #define DEBUG_HELPER */
30 #ifdef DEBUG_HELPER
31 #define HELPER_LOG(x...) qemu_log(x)
32 #else
33 #define HELPER_LOG(x...)
34 #endif
35
36 static uint32_t cc_calc_ltgt_32(int32_t src, int32_t dst)
37 {
38 if (src == dst) {
39 return 0;
40 } else if (src < dst) {
41 return 1;
42 } else {
43 return 2;
44 }
45 }
46
47 static uint32_t cc_calc_ltgt0_32(int32_t dst)
48 {
49 return cc_calc_ltgt_32(dst, 0);
50 }
51
52 static uint32_t cc_calc_ltgt_64(int64_t src, int64_t dst)
53 {
54 if (src == dst) {
55 return 0;
56 } else if (src < dst) {
57 return 1;
58 } else {
59 return 2;
60 }
61 }
62
63 static uint32_t cc_calc_ltgt0_64(int64_t dst)
64 {
65 return cc_calc_ltgt_64(dst, 0);
66 }
67
68 static uint32_t cc_calc_ltugtu_32(uint32_t src, uint32_t dst)
69 {
70 if (src == dst) {
71 return 0;
72 } else if (src < dst) {
73 return 1;
74 } else {
75 return 2;
76 }
77 }
78
79 static uint32_t cc_calc_ltugtu_64(uint64_t src, uint64_t dst)
80 {
81 if (src == dst) {
82 return 0;
83 } else if (src < dst) {
84 return 1;
85 } else {
86 return 2;
87 }
88 }
89
90 static uint32_t cc_calc_tm_32(uint32_t val, uint32_t mask)
91 {
92 uint32_t r = val & mask;
93
94 if (r == 0) {
95 return 0;
96 } else if (r == mask) {
97 return 3;
98 } else {
99 return 1;
100 }
101 }
102
103 static uint32_t cc_calc_tm_64(uint64_t val, uint64_t mask)
104 {
105 uint64_t r = val & mask;
106
107 if (r == 0) {
108 return 0;
109 } else if (r == mask) {
110 return 3;
111 } else {
112 int top = clz64(mask);
113 if ((int64_t)(val << top) < 0) {
114 return 2;
115 } else {
116 return 1;
117 }
118 }
119 }
120
121 static uint32_t cc_calc_nz(uint64_t dst)
122 {
123 return !!dst;
124 }
125
126 static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar)
127 {
128 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
129 return 3; /* overflow */
130 } else {
131 if (ar < 0) {
132 return 1;
133 } else if (ar > 0) {
134 return 2;
135 } else {
136 return 0;
137 }
138 }
139 }
140
141 static uint32_t cc_calc_addu_64(uint64_t a1, uint64_t a2, uint64_t ar)
142 {
143 return (ar != 0) + 2 * (ar < a1);
144 }
145
146 static uint32_t cc_calc_addc_64(uint64_t a1, uint64_t a2, uint64_t ar)
147 {
148 /* Recover a2 + carry_in. */
149 uint64_t a2c = ar - a1;
150 /* Check for a2+carry_in overflow, then a1+a2c overflow. */
151 int carry_out = (a2c < a2) || (ar < a1);
152
153 return (ar != 0) + 2 * carry_out;
154 }
155
156 static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar)
157 {
158 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
159 return 3; /* overflow */
160 } else {
161 if (ar < 0) {
162 return 1;
163 } else if (ar > 0) {
164 return 2;
165 } else {
166 return 0;
167 }
168 }
169 }
170
171 static uint32_t cc_calc_subu_64(uint64_t a1, uint64_t a2, uint64_t ar)
172 {
173 if (ar == 0) {
174 return 2;
175 } else {
176 if (a2 > a1) {
177 return 1;
178 } else {
179 return 3;
180 }
181 }
182 }
183
184 static uint32_t cc_calc_subb_64(uint64_t a1, uint64_t a2, uint64_t ar)
185 {
186 int borrow_out;
187
188 if (ar != a1 - a2) { /* difference means borrow-in */
189 borrow_out = (a2 >= a1);
190 } else {
191 borrow_out = (a2 > a1);
192 }
193
194 return (ar != 0) + 2 * !borrow_out;
195 }
196
197 static uint32_t cc_calc_abs_64(int64_t dst)
198 {
199 if ((uint64_t)dst == 0x8000000000000000ULL) {
200 return 3;
201 } else if (dst) {
202 return 2;
203 } else {
204 return 0;
205 }
206 }
207
208 static uint32_t cc_calc_nabs_64(int64_t dst)
209 {
210 return !!dst;
211 }
212
213 static uint32_t cc_calc_comp_64(int64_t dst)
214 {
215 if ((uint64_t)dst == 0x8000000000000000ULL) {
216 return 3;
217 } else if (dst < 0) {
218 return 1;
219 } else if (dst > 0) {
220 return 2;
221 } else {
222 return 0;
223 }
224 }
225
226
227 static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar)
228 {
229 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
230 return 3; /* overflow */
231 } else {
232 if (ar < 0) {
233 return 1;
234 } else if (ar > 0) {
235 return 2;
236 } else {
237 return 0;
238 }
239 }
240 }
241
242 static uint32_t cc_calc_addu_32(uint32_t a1, uint32_t a2, uint32_t ar)
243 {
244 return (ar != 0) + 2 * (ar < a1);
245 }
246
247 static uint32_t cc_calc_addc_32(uint32_t a1, uint32_t a2, uint32_t ar)
248 {
249 /* Recover a2 + carry_in. */
250 uint32_t a2c = ar - a1;
251 /* Check for a2+carry_in overflow, then a1+a2c overflow. */
252 int carry_out = (a2c < a2) || (ar < a1);
253
254 return (ar != 0) + 2 * carry_out;
255 }
256
257 static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar)
258 {
259 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
260 return 3; /* overflow */
261 } else {
262 if (ar < 0) {
263 return 1;
264 } else if (ar > 0) {
265 return 2;
266 } else {
267 return 0;
268 }
269 }
270 }
271
272 static uint32_t cc_calc_subu_32(uint32_t a1, uint32_t a2, uint32_t ar)
273 {
274 if (ar == 0) {
275 return 2;
276 } else {
277 if (a2 > a1) {
278 return 1;
279 } else {
280 return 3;
281 }
282 }
283 }
284
285 static uint32_t cc_calc_subb_32(uint32_t a1, uint32_t a2, uint32_t ar)
286 {
287 int borrow_out;
288
289 if (ar != a1 - a2) { /* difference means borrow-in */
290 borrow_out = (a2 >= a1);
291 } else {
292 borrow_out = (a2 > a1);
293 }
294
295 return (ar != 0) + 2 * !borrow_out;
296 }
297
298 static uint32_t cc_calc_abs_32(int32_t dst)
299 {
300 if ((uint32_t)dst == 0x80000000UL) {
301 return 3;
302 } else if (dst) {
303 return 2;
304 } else {
305 return 0;
306 }
307 }
308
309 static uint32_t cc_calc_nabs_32(int32_t dst)
310 {
311 return !!dst;
312 }
313
314 static uint32_t cc_calc_comp_32(int32_t dst)
315 {
316 if ((uint32_t)dst == 0x80000000UL) {
317 return 3;
318 } else if (dst < 0) {
319 return 1;
320 } else if (dst > 0) {
321 return 2;
322 } else {
323 return 0;
324 }
325 }
326
327 /* calculate condition code for insert character under mask insn */
328 static uint32_t cc_calc_icm(uint64_t mask, uint64_t val)
329 {
330 if ((val & mask) == 0) {
331 return 0;
332 } else {
333 int top = clz64(mask);
334 if ((int64_t)(val << top) < 0) {
335 return 1;
336 } else {
337 return 2;
338 }
339 }
340 }
341
342 static uint32_t cc_calc_sla_32(uint32_t src, int shift)
343 {
344 uint32_t mask = ((1U << shift) - 1U) << (32 - shift);
345 uint32_t sign = 1U << 31;
346 uint32_t match;
347 int32_t r;
348
349 /* Check if the sign bit stays the same. */
350 if (src & sign) {
351 match = mask;
352 } else {
353 match = 0;
354 }
355 if ((src & mask) != match) {
356 /* Overflow. */
357 return 3;
358 }
359
360 r = ((src << shift) & ~sign) | (src & sign);
361 if (r == 0) {
362 return 0;
363 } else if (r < 0) {
364 return 1;
365 }
366 return 2;
367 }
368
369 static uint32_t cc_calc_sla_64(uint64_t src, int shift)
370 {
371 uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
372 uint64_t sign = 1ULL << 63;
373 uint64_t match;
374 int64_t r;
375
376 /* Check if the sign bit stays the same. */
377 if (src & sign) {
378 match = mask;
379 } else {
380 match = 0;
381 }
382 if ((src & mask) != match) {
383 /* Overflow. */
384 return 3;
385 }
386
387 r = ((src << shift) & ~sign) | (src & sign);
388 if (r == 0) {
389 return 0;
390 } else if (r < 0) {
391 return 1;
392 }
393 return 2;
394 }
395
396 static uint32_t cc_calc_flogr(uint64_t dst)
397 {
398 return dst ? 2 : 0;
399 }
400
401 static uint32_t cc_calc_lcbb(uint64_t dst)
402 {
403 return dst == 16 ? 0 : 3;
404 }
405
406 static uint32_t cc_calc_vc(uint64_t low, uint64_t high)
407 {
408 if (high == -1ull && low == -1ull) {
409 /* all elements match */
410 return 0;
411 } else if (high == 0 && low == 0) {
412 /* no elements match */
413 return 3;
414 } else {
415 /* some elements but not all match */
416 return 1;
417 }
418 }
419
420 static uint32_t cc_calc_muls_32(int64_t res)
421 {
422 const int64_t tmp = res >> 31;
423
424 if (!res) {
425 return 0;
426 } else if (tmp && tmp != -1) {
427 return 3;
428 } else if (res < 0) {
429 return 1;
430 }
431 return 2;
432 }
433
434 static uint64_t cc_calc_muls_64(int64_t res_high, uint64_t res_low)
435 {
436 if (!res_high && !res_low) {
437 return 0;
438 } else if (res_high + (res_low >> 63) != 0) {
439 return 3;
440 } else if (res_high < 0) {
441 return 1;
442 }
443 return 2;
444 }
445
446 static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op,
447 uint64_t src, uint64_t dst, uint64_t vr)
448 {
449 uint32_t r = 0;
450
451 switch (cc_op) {
452 case CC_OP_CONST0:
453 case CC_OP_CONST1:
454 case CC_OP_CONST2:
455 case CC_OP_CONST3:
456 /* cc_op value _is_ cc */
457 r = cc_op;
458 break;
459 case CC_OP_LTGT0_32:
460 r = cc_calc_ltgt0_32(dst);
461 break;
462 case CC_OP_LTGT0_64:
463 r = cc_calc_ltgt0_64(dst);
464 break;
465 case CC_OP_LTGT_32:
466 r = cc_calc_ltgt_32(src, dst);
467 break;
468 case CC_OP_LTGT_64:
469 r = cc_calc_ltgt_64(src, dst);
470 break;
471 case CC_OP_LTUGTU_32:
472 r = cc_calc_ltugtu_32(src, dst);
473 break;
474 case CC_OP_LTUGTU_64:
475 r = cc_calc_ltugtu_64(src, dst);
476 break;
477 case CC_OP_TM_32:
478 r = cc_calc_tm_32(src, dst);
479 break;
480 case CC_OP_TM_64:
481 r = cc_calc_tm_64(src, dst);
482 break;
483 case CC_OP_NZ:
484 r = cc_calc_nz(dst);
485 break;
486 case CC_OP_ADD_64:
487 r = cc_calc_add_64(src, dst, vr);
488 break;
489 case CC_OP_ADDU_64:
490 r = cc_calc_addu_64(src, dst, vr);
491 break;
492 case CC_OP_ADDC_64:
493 r = cc_calc_addc_64(src, dst, vr);
494 break;
495 case CC_OP_SUB_64:
496 r = cc_calc_sub_64(src, dst, vr);
497 break;
498 case CC_OP_SUBU_64:
499 r = cc_calc_subu_64(src, dst, vr);
500 break;
501 case CC_OP_SUBB_64:
502 r = cc_calc_subb_64(src, dst, vr);
503 break;
504 case CC_OP_ABS_64:
505 r = cc_calc_abs_64(dst);
506 break;
507 case CC_OP_NABS_64:
508 r = cc_calc_nabs_64(dst);
509 break;
510 case CC_OP_COMP_64:
511 r = cc_calc_comp_64(dst);
512 break;
513 case CC_OP_MULS_64:
514 r = cc_calc_muls_64(src, dst);
515 break;
516
517 case CC_OP_ADD_32:
518 r = cc_calc_add_32(src, dst, vr);
519 break;
520 case CC_OP_ADDU_32:
521 r = cc_calc_addu_32(src, dst, vr);
522 break;
523 case CC_OP_ADDC_32:
524 r = cc_calc_addc_32(src, dst, vr);
525 break;
526 case CC_OP_SUB_32:
527 r = cc_calc_sub_32(src, dst, vr);
528 break;
529 case CC_OP_SUBU_32:
530 r = cc_calc_subu_32(src, dst, vr);
531 break;
532 case CC_OP_SUBB_32:
533 r = cc_calc_subb_32(src, dst, vr);
534 break;
535 case CC_OP_ABS_32:
536 r = cc_calc_abs_32(dst);
537 break;
538 case CC_OP_NABS_32:
539 r = cc_calc_nabs_32(dst);
540 break;
541 case CC_OP_COMP_32:
542 r = cc_calc_comp_32(dst);
543 break;
544 case CC_OP_MULS_32:
545 r = cc_calc_muls_32(dst);
546 break;
547
548 case CC_OP_ICM:
549 r = cc_calc_icm(src, dst);
550 break;
551 case CC_OP_SLA_32:
552 r = cc_calc_sla_32(src, dst);
553 break;
554 case CC_OP_SLA_64:
555 r = cc_calc_sla_64(src, dst);
556 break;
557 case CC_OP_FLOGR:
558 r = cc_calc_flogr(dst);
559 break;
560 case CC_OP_LCBB:
561 r = cc_calc_lcbb(dst);
562 break;
563 case CC_OP_VC:
564 r = cc_calc_vc(src, dst);
565 break;
566
567 case CC_OP_NZ_F32:
568 r = set_cc_nz_f32(dst);
569 break;
570 case CC_OP_NZ_F64:
571 r = set_cc_nz_f64(dst);
572 break;
573 case CC_OP_NZ_F128:
574 r = set_cc_nz_f128(make_float128(src, dst));
575 break;
576
577 default:
578 cpu_abort(env_cpu(env), "Unknown CC operation: %s\n", cc_name(cc_op));
579 }
580
581 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__,
582 cc_name(cc_op), src, dst, vr, r);
583 return r;
584 }
585
586 uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
587 uint64_t vr)
588 {
589 return do_calc_cc(env, cc_op, src, dst, vr);
590 }
591
592 uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src,
593 uint64_t dst, uint64_t vr)
594 {
595 return do_calc_cc(env, cc_op, src, dst, vr);
596 }
597
598 #ifndef CONFIG_USER_ONLY
599 void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr)
600 {
601 load_psw(env, mask, addr);
602 cpu_loop_exit(env_cpu(env));
603 }
604
605 void HELPER(sacf)(CPUS390XState *env, uint64_t a1)
606 {
607 HELPER_LOG("%s: %16" PRIx64 "\n", __func__, a1);
608
609 switch (a1 & 0xf00) {
610 case 0x000:
611 env->psw.mask &= ~PSW_MASK_ASC;
612 env->psw.mask |= PSW_ASC_PRIMARY;
613 break;
614 case 0x100:
615 env->psw.mask &= ~PSW_MASK_ASC;
616 env->psw.mask |= PSW_ASC_SECONDARY;
617 break;
618 case 0x300:
619 env->psw.mask &= ~PSW_MASK_ASC;
620 env->psw.mask |= PSW_ASC_HOME;
621 break;
622 default:
623 HELPER_LOG("unknown sacf mode: %" PRIx64 "\n", a1);
624 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
625 }
626 }
627 #endif