qdev: split out UI portions into a new function
[qemu.git] / target-alpha / op_helper.c
1 /*
2 * Alpha emulation cpu micro-operations helpers for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "dyngen-exec.h"
22 #include "host-utils.h"
23 #include "softfloat.h"
24 #include "helper.h"
25 #include "sysemu.h"
26 #include "qemu-timer.h"
27
28 #define FP_STATUS (env->fp_status)
29
30 /*****************************************************************************/
31 /* Exceptions processing helpers */
32
33 /* This should only be called from translate, via gen_excp.
34 We expect that ENV->PC has already been updated. */
35 void QEMU_NORETURN helper_excp(int excp, int error)
36 {
37 env->exception_index = excp;
38 env->error_code = error;
39 cpu_loop_exit(env);
40 }
41
42 static void do_restore_state(void *retaddr)
43 {
44 unsigned long pc = (unsigned long)retaddr;
45
46 if (pc) {
47 TranslationBlock *tb = tb_find_pc(pc);
48 if (tb) {
49 cpu_restore_state(tb, env, pc);
50 }
51 }
52 }
53
54 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
55 static void QEMU_NORETURN dynamic_excp(int excp, int error)
56 {
57 env->exception_index = excp;
58 env->error_code = error;
59 do_restore_state(GETPC());
60 cpu_loop_exit(env);
61 }
62
63 static void QEMU_NORETURN arith_excp(int exc, uint64_t mask)
64 {
65 env->trap_arg0 = exc;
66 env->trap_arg1 = mask;
67 dynamic_excp(EXCP_ARITH, 0);
68 }
69
70 uint64_t helper_load_pcc (void)
71 {
72 #ifndef CONFIG_USER_ONLY
73 /* In system mode we have access to a decent high-resolution clock.
74 In order to make OS-level time accounting work with the RPCC,
75 present it with a well-timed clock fixed at 250MHz. */
76 return (((uint64_t)env->pcc_ofs << 32)
77 | (uint32_t)(qemu_get_clock_ns(vm_clock) >> 2));
78 #else
79 /* In user-mode, vm_clock doesn't exist. Just pass through the host cpu
80 clock ticks. Also, don't bother taking PCC_OFS into account. */
81 return (uint32_t)cpu_get_real_ticks();
82 #endif
83 }
84
85 uint64_t helper_load_fpcr (void)
86 {
87 return cpu_alpha_load_fpcr (env);
88 }
89
90 void helper_store_fpcr (uint64_t val)
91 {
92 cpu_alpha_store_fpcr (env, val);
93 }
94
95 uint64_t helper_addqv (uint64_t op1, uint64_t op2)
96 {
97 uint64_t tmp = op1;
98 op1 += op2;
99 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
100 arith_excp(EXC_M_IOV, 0);
101 }
102 return op1;
103 }
104
105 uint64_t helper_addlv (uint64_t op1, uint64_t op2)
106 {
107 uint64_t tmp = op1;
108 op1 = (uint32_t)(op1 + op2);
109 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
110 arith_excp(EXC_M_IOV, 0);
111 }
112 return op1;
113 }
114
115 uint64_t helper_subqv (uint64_t op1, uint64_t op2)
116 {
117 uint64_t res;
118 res = op1 - op2;
119 if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
120 arith_excp(EXC_M_IOV, 0);
121 }
122 return res;
123 }
124
125 uint64_t helper_sublv (uint64_t op1, uint64_t op2)
126 {
127 uint32_t res;
128 res = op1 - op2;
129 if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
130 arith_excp(EXC_M_IOV, 0);
131 }
132 return res;
133 }
134
135 uint64_t helper_mullv (uint64_t op1, uint64_t op2)
136 {
137 int64_t res = (int64_t)op1 * (int64_t)op2;
138
139 if (unlikely((int32_t)res != res)) {
140 arith_excp(EXC_M_IOV, 0);
141 }
142 return (int64_t)((int32_t)res);
143 }
144
145 uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
146 {
147 uint64_t tl, th;
148
149 muls64(&tl, &th, op1, op2);
150 /* If th != 0 && th != -1, then we had an overflow */
151 if (unlikely((th + 1) > 1)) {
152 arith_excp(EXC_M_IOV, 0);
153 }
154 return tl;
155 }
156
157 uint64_t helper_umulh (uint64_t op1, uint64_t op2)
158 {
159 uint64_t tl, th;
160
161 mulu64(&tl, &th, op1, op2);
162 return th;
163 }
164
165 uint64_t helper_ctpop (uint64_t arg)
166 {
167 return ctpop64(arg);
168 }
169
170 uint64_t helper_ctlz (uint64_t arg)
171 {
172 return clz64(arg);
173 }
174
175 uint64_t helper_cttz (uint64_t arg)
176 {
177 return ctz64(arg);
178 }
179
180 static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
181 {
182 uint64_t mask;
183
184 mask = 0;
185 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
186 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
187 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
188 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
189 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
190 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
191 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
192 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
193
194 return op & ~mask;
195 }
196
197 uint64_t helper_zap(uint64_t val, uint64_t mask)
198 {
199 return byte_zap(val, mask);
200 }
201
202 uint64_t helper_zapnot(uint64_t val, uint64_t mask)
203 {
204 return byte_zap(val, ~mask);
205 }
206
207 uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
208 {
209 uint8_t opa, opb, res;
210 int i;
211
212 res = 0;
213 for (i = 0; i < 8; i++) {
214 opa = op1 >> (i * 8);
215 opb = op2 >> (i * 8);
216 if (opa >= opb)
217 res |= 1 << i;
218 }
219 return res;
220 }
221
222 uint64_t helper_minub8 (uint64_t op1, uint64_t op2)
223 {
224 uint64_t res = 0;
225 uint8_t opa, opb, opr;
226 int i;
227
228 for (i = 0; i < 8; ++i) {
229 opa = op1 >> (i * 8);
230 opb = op2 >> (i * 8);
231 opr = opa < opb ? opa : opb;
232 res |= (uint64_t)opr << (i * 8);
233 }
234 return res;
235 }
236
237 uint64_t helper_minsb8 (uint64_t op1, uint64_t op2)
238 {
239 uint64_t res = 0;
240 int8_t opa, opb;
241 uint8_t opr;
242 int i;
243
244 for (i = 0; i < 8; ++i) {
245 opa = op1 >> (i * 8);
246 opb = op2 >> (i * 8);
247 opr = opa < opb ? opa : opb;
248 res |= (uint64_t)opr << (i * 8);
249 }
250 return res;
251 }
252
253 uint64_t helper_minuw4 (uint64_t op1, uint64_t op2)
254 {
255 uint64_t res = 0;
256 uint16_t opa, opb, opr;
257 int i;
258
259 for (i = 0; i < 4; ++i) {
260 opa = op1 >> (i * 16);
261 opb = op2 >> (i * 16);
262 opr = opa < opb ? opa : opb;
263 res |= (uint64_t)opr << (i * 16);
264 }
265 return res;
266 }
267
268 uint64_t helper_minsw4 (uint64_t op1, uint64_t op2)
269 {
270 uint64_t res = 0;
271 int16_t opa, opb;
272 uint16_t opr;
273 int i;
274
275 for (i = 0; i < 4; ++i) {
276 opa = op1 >> (i * 16);
277 opb = op2 >> (i * 16);
278 opr = opa < opb ? opa : opb;
279 res |= (uint64_t)opr << (i * 16);
280 }
281 return res;
282 }
283
284 uint64_t helper_maxub8 (uint64_t op1, uint64_t op2)
285 {
286 uint64_t res = 0;
287 uint8_t opa, opb, opr;
288 int i;
289
290 for (i = 0; i < 8; ++i) {
291 opa = op1 >> (i * 8);
292 opb = op2 >> (i * 8);
293 opr = opa > opb ? opa : opb;
294 res |= (uint64_t)opr << (i * 8);
295 }
296 return res;
297 }
298
299 uint64_t helper_maxsb8 (uint64_t op1, uint64_t op2)
300 {
301 uint64_t res = 0;
302 int8_t opa, opb;
303 uint8_t opr;
304 int i;
305
306 for (i = 0; i < 8; ++i) {
307 opa = op1 >> (i * 8);
308 opb = op2 >> (i * 8);
309 opr = opa > opb ? opa : opb;
310 res |= (uint64_t)opr << (i * 8);
311 }
312 return res;
313 }
314
315 uint64_t helper_maxuw4 (uint64_t op1, uint64_t op2)
316 {
317 uint64_t res = 0;
318 uint16_t opa, opb, opr;
319 int i;
320
321 for (i = 0; i < 4; ++i) {
322 opa = op1 >> (i * 16);
323 opb = op2 >> (i * 16);
324 opr = opa > opb ? opa : opb;
325 res |= (uint64_t)opr << (i * 16);
326 }
327 return res;
328 }
329
330 uint64_t helper_maxsw4 (uint64_t op1, uint64_t op2)
331 {
332 uint64_t res = 0;
333 int16_t opa, opb;
334 uint16_t opr;
335 int i;
336
337 for (i = 0; i < 4; ++i) {
338 opa = op1 >> (i * 16);
339 opb = op2 >> (i * 16);
340 opr = opa > opb ? opa : opb;
341 res |= (uint64_t)opr << (i * 16);
342 }
343 return res;
344 }
345
346 uint64_t helper_perr (uint64_t op1, uint64_t op2)
347 {
348 uint64_t res = 0;
349 uint8_t opa, opb, opr;
350 int i;
351
352 for (i = 0; i < 8; ++i) {
353 opa = op1 >> (i * 8);
354 opb = op2 >> (i * 8);
355 if (opa >= opb)
356 opr = opa - opb;
357 else
358 opr = opb - opa;
359 res += opr;
360 }
361 return res;
362 }
363
364 uint64_t helper_pklb (uint64_t op1)
365 {
366 return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
367 }
368
369 uint64_t helper_pkwb (uint64_t op1)
370 {
371 return ((op1 & 0xff)
372 | ((op1 >> 8) & 0xff00)
373 | ((op1 >> 16) & 0xff0000)
374 | ((op1 >> 24) & 0xff000000));
375 }
376
377 uint64_t helper_unpkbl (uint64_t op1)
378 {
379 return (op1 & 0xff) | ((op1 & 0xff00) << 24);
380 }
381
382 uint64_t helper_unpkbw (uint64_t op1)
383 {
384 return ((op1 & 0xff)
385 | ((op1 & 0xff00) << 8)
386 | ((op1 & 0xff0000) << 16)
387 | ((op1 & 0xff000000) << 24));
388 }
389
390 /* Floating point helpers */
391
392 void helper_setroundmode (uint32_t val)
393 {
394 set_float_rounding_mode(val, &FP_STATUS);
395 }
396
397 void helper_setflushzero (uint32_t val)
398 {
399 set_flush_to_zero(val, &FP_STATUS);
400 }
401
402 void helper_fp_exc_clear (void)
403 {
404 set_float_exception_flags(0, &FP_STATUS);
405 }
406
407 uint32_t helper_fp_exc_get (void)
408 {
409 return get_float_exception_flags(&FP_STATUS);
410 }
411
412 /* Raise exceptions for ieee fp insns without software completion.
413 In that case there are no exceptions that don't trap; the mask
414 doesn't apply. */
415 void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
416 {
417 if (exc) {
418 uint32_t hw_exc = 0;
419
420 if (exc & float_flag_invalid) {
421 hw_exc |= EXC_M_INV;
422 }
423 if (exc & float_flag_divbyzero) {
424 hw_exc |= EXC_M_DZE;
425 }
426 if (exc & float_flag_overflow) {
427 hw_exc |= EXC_M_FOV;
428 }
429 if (exc & float_flag_underflow) {
430 hw_exc |= EXC_M_UNF;
431 }
432 if (exc & float_flag_inexact) {
433 hw_exc |= EXC_M_INE;
434 }
435
436 arith_excp(hw_exc, 1ull << regno);
437 }
438 }
439
440 /* Raise exceptions for ieee fp insns with software completion. */
441 void helper_fp_exc_raise_s(uint32_t exc, uint32_t regno)
442 {
443 if (exc) {
444 env->fpcr_exc_status |= exc;
445
446 exc &= ~env->fpcr_exc_mask;
447 if (exc) {
448 helper_fp_exc_raise(exc, regno);
449 }
450 }
451 }
452
453 /* Input remapping without software completion. Handle denormal-map-to-zero
454 and trap for all other non-finite numbers. */
455 uint64_t helper_ieee_input(uint64_t val)
456 {
457 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
458 uint64_t frac = val & 0xfffffffffffffull;
459
460 if (exp == 0) {
461 if (frac != 0) {
462 /* If DNZ is set flush denormals to zero on input. */
463 if (env->fpcr_dnz) {
464 val &= 1ull << 63;
465 } else {
466 arith_excp(EXC_M_UNF, 0);
467 }
468 }
469 } else if (exp == 0x7ff) {
470 /* Infinity or NaN. */
471 /* ??? I'm not sure these exception bit flags are correct. I do
472 know that the Linux kernel, at least, doesn't rely on them and
473 just emulates the insn to figure out what exception to use. */
474 arith_excp(frac ? EXC_M_INV : EXC_M_FOV, 0);
475 }
476 return val;
477 }
478
479 /* Similar, but does not trap for infinities. Used for comparisons. */
480 uint64_t helper_ieee_input_cmp(uint64_t val)
481 {
482 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
483 uint64_t frac = val & 0xfffffffffffffull;
484
485 if (exp == 0) {
486 if (frac != 0) {
487 /* If DNZ is set flush denormals to zero on input. */
488 if (env->fpcr_dnz) {
489 val &= 1ull << 63;
490 } else {
491 arith_excp(EXC_M_UNF, 0);
492 }
493 }
494 } else if (exp == 0x7ff && frac) {
495 /* NaN. */
496 arith_excp(EXC_M_INV, 0);
497 }
498 return val;
499 }
500
501 /* Input remapping with software completion enabled. All we have to do
502 is handle denormal-map-to-zero; all other inputs get exceptions as
503 needed from the actual operation. */
504 uint64_t helper_ieee_input_s(uint64_t val)
505 {
506 if (env->fpcr_dnz) {
507 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
508 if (exp == 0) {
509 val &= 1ull << 63;
510 }
511 }
512 return val;
513 }
514
515 /* F floating (VAX) */
516 static inline uint64_t float32_to_f(float32 fa)
517 {
518 uint64_t r, exp, mant, sig;
519 CPU_FloatU a;
520
521 a.f = fa;
522 sig = ((uint64_t)a.l & 0x80000000) << 32;
523 exp = (a.l >> 23) & 0xff;
524 mant = ((uint64_t)a.l & 0x007fffff) << 29;
525
526 if (exp == 255) {
527 /* NaN or infinity */
528 r = 1; /* VAX dirty zero */
529 } else if (exp == 0) {
530 if (mant == 0) {
531 /* Zero */
532 r = 0;
533 } else {
534 /* Denormalized */
535 r = sig | ((exp + 1) << 52) | mant;
536 }
537 } else {
538 if (exp >= 253) {
539 /* Overflow */
540 r = 1; /* VAX dirty zero */
541 } else {
542 r = sig | ((exp + 2) << 52);
543 }
544 }
545
546 return r;
547 }
548
549 static inline float32 f_to_float32(uint64_t a)
550 {
551 uint32_t exp, mant_sig;
552 CPU_FloatU r;
553
554 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
555 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
556
557 if (unlikely(!exp && mant_sig)) {
558 /* Reserved operands / Dirty zero */
559 dynamic_excp(EXCP_OPCDEC, 0);
560 }
561
562 if (exp < 3) {
563 /* Underflow */
564 r.l = 0;
565 } else {
566 r.l = ((exp - 2) << 23) | mant_sig;
567 }
568
569 return r.f;
570 }
571
572 uint32_t helper_f_to_memory (uint64_t a)
573 {
574 uint32_t r;
575 r = (a & 0x00001fffe0000000ull) >> 13;
576 r |= (a & 0x07ffe00000000000ull) >> 45;
577 r |= (a & 0xc000000000000000ull) >> 48;
578 return r;
579 }
580
581 uint64_t helper_memory_to_f (uint32_t a)
582 {
583 uint64_t r;
584 r = ((uint64_t)(a & 0x0000c000)) << 48;
585 r |= ((uint64_t)(a & 0x003fffff)) << 45;
586 r |= ((uint64_t)(a & 0xffff0000)) << 13;
587 if (!(a & 0x00004000))
588 r |= 0x7ll << 59;
589 return r;
590 }
591
592 /* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
593 either implement VAX arithmetic properly or just signal invalid opcode. */
594
595 uint64_t helper_addf (uint64_t a, uint64_t b)
596 {
597 float32 fa, fb, fr;
598
599 fa = f_to_float32(a);
600 fb = f_to_float32(b);
601 fr = float32_add(fa, fb, &FP_STATUS);
602 return float32_to_f(fr);
603 }
604
605 uint64_t helper_subf (uint64_t a, uint64_t b)
606 {
607 float32 fa, fb, fr;
608
609 fa = f_to_float32(a);
610 fb = f_to_float32(b);
611 fr = float32_sub(fa, fb, &FP_STATUS);
612 return float32_to_f(fr);
613 }
614
615 uint64_t helper_mulf (uint64_t a, uint64_t b)
616 {
617 float32 fa, fb, fr;
618
619 fa = f_to_float32(a);
620 fb = f_to_float32(b);
621 fr = float32_mul(fa, fb, &FP_STATUS);
622 return float32_to_f(fr);
623 }
624
625 uint64_t helper_divf (uint64_t a, uint64_t b)
626 {
627 float32 fa, fb, fr;
628
629 fa = f_to_float32(a);
630 fb = f_to_float32(b);
631 fr = float32_div(fa, fb, &FP_STATUS);
632 return float32_to_f(fr);
633 }
634
635 uint64_t helper_sqrtf (uint64_t t)
636 {
637 float32 ft, fr;
638
639 ft = f_to_float32(t);
640 fr = float32_sqrt(ft, &FP_STATUS);
641 return float32_to_f(fr);
642 }
643
644
645 /* G floating (VAX) */
646 static inline uint64_t float64_to_g(float64 fa)
647 {
648 uint64_t r, exp, mant, sig;
649 CPU_DoubleU a;
650
651 a.d = fa;
652 sig = a.ll & 0x8000000000000000ull;
653 exp = (a.ll >> 52) & 0x7ff;
654 mant = a.ll & 0x000fffffffffffffull;
655
656 if (exp == 2047) {
657 /* NaN or infinity */
658 r = 1; /* VAX dirty zero */
659 } else if (exp == 0) {
660 if (mant == 0) {
661 /* Zero */
662 r = 0;
663 } else {
664 /* Denormalized */
665 r = sig | ((exp + 1) << 52) | mant;
666 }
667 } else {
668 if (exp >= 2045) {
669 /* Overflow */
670 r = 1; /* VAX dirty zero */
671 } else {
672 r = sig | ((exp + 2) << 52);
673 }
674 }
675
676 return r;
677 }
678
679 static inline float64 g_to_float64(uint64_t a)
680 {
681 uint64_t exp, mant_sig;
682 CPU_DoubleU r;
683
684 exp = (a >> 52) & 0x7ff;
685 mant_sig = a & 0x800fffffffffffffull;
686
687 if (!exp && mant_sig) {
688 /* Reserved operands / Dirty zero */
689 dynamic_excp(EXCP_OPCDEC, 0);
690 }
691
692 if (exp < 3) {
693 /* Underflow */
694 r.ll = 0;
695 } else {
696 r.ll = ((exp - 2) << 52) | mant_sig;
697 }
698
699 return r.d;
700 }
701
702 uint64_t helper_g_to_memory (uint64_t a)
703 {
704 uint64_t r;
705 r = (a & 0x000000000000ffffull) << 48;
706 r |= (a & 0x00000000ffff0000ull) << 16;
707 r |= (a & 0x0000ffff00000000ull) >> 16;
708 r |= (a & 0xffff000000000000ull) >> 48;
709 return r;
710 }
711
712 uint64_t helper_memory_to_g (uint64_t a)
713 {
714 uint64_t r;
715 r = (a & 0x000000000000ffffull) << 48;
716 r |= (a & 0x00000000ffff0000ull) << 16;
717 r |= (a & 0x0000ffff00000000ull) >> 16;
718 r |= (a & 0xffff000000000000ull) >> 48;
719 return r;
720 }
721
722 uint64_t helper_addg (uint64_t a, uint64_t b)
723 {
724 float64 fa, fb, fr;
725
726 fa = g_to_float64(a);
727 fb = g_to_float64(b);
728 fr = float64_add(fa, fb, &FP_STATUS);
729 return float64_to_g(fr);
730 }
731
732 uint64_t helper_subg (uint64_t a, uint64_t b)
733 {
734 float64 fa, fb, fr;
735
736 fa = g_to_float64(a);
737 fb = g_to_float64(b);
738 fr = float64_sub(fa, fb, &FP_STATUS);
739 return float64_to_g(fr);
740 }
741
742 uint64_t helper_mulg (uint64_t a, uint64_t b)
743 {
744 float64 fa, fb, fr;
745
746 fa = g_to_float64(a);
747 fb = g_to_float64(b);
748 fr = float64_mul(fa, fb, &FP_STATUS);
749 return float64_to_g(fr);
750 }
751
752 uint64_t helper_divg (uint64_t a, uint64_t b)
753 {
754 float64 fa, fb, fr;
755
756 fa = g_to_float64(a);
757 fb = g_to_float64(b);
758 fr = float64_div(fa, fb, &FP_STATUS);
759 return float64_to_g(fr);
760 }
761
762 uint64_t helper_sqrtg (uint64_t a)
763 {
764 float64 fa, fr;
765
766 fa = g_to_float64(a);
767 fr = float64_sqrt(fa, &FP_STATUS);
768 return float64_to_g(fr);
769 }
770
771
772 /* S floating (single) */
773
774 /* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
775 static inline uint64_t float32_to_s_int(uint32_t fi)
776 {
777 uint32_t frac = fi & 0x7fffff;
778 uint32_t sign = fi >> 31;
779 uint32_t exp_msb = (fi >> 30) & 1;
780 uint32_t exp_low = (fi >> 23) & 0x7f;
781 uint32_t exp;
782
783 exp = (exp_msb << 10) | exp_low;
784 if (exp_msb) {
785 if (exp_low == 0x7f)
786 exp = 0x7ff;
787 } else {
788 if (exp_low != 0x00)
789 exp |= 0x380;
790 }
791
792 return (((uint64_t)sign << 63)
793 | ((uint64_t)exp << 52)
794 | ((uint64_t)frac << 29));
795 }
796
797 static inline uint64_t float32_to_s(float32 fa)
798 {
799 CPU_FloatU a;
800 a.f = fa;
801 return float32_to_s_int(a.l);
802 }
803
804 static inline uint32_t s_to_float32_int(uint64_t a)
805 {
806 return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
807 }
808
809 static inline float32 s_to_float32(uint64_t a)
810 {
811 CPU_FloatU r;
812 r.l = s_to_float32_int(a);
813 return r.f;
814 }
815
816 uint32_t helper_s_to_memory (uint64_t a)
817 {
818 return s_to_float32_int(a);
819 }
820
821 uint64_t helper_memory_to_s (uint32_t a)
822 {
823 return float32_to_s_int(a);
824 }
825
826 uint64_t helper_adds (uint64_t a, uint64_t b)
827 {
828 float32 fa, fb, fr;
829
830 fa = s_to_float32(a);
831 fb = s_to_float32(b);
832 fr = float32_add(fa, fb, &FP_STATUS);
833 return float32_to_s(fr);
834 }
835
836 uint64_t helper_subs (uint64_t a, uint64_t b)
837 {
838 float32 fa, fb, fr;
839
840 fa = s_to_float32(a);
841 fb = s_to_float32(b);
842 fr = float32_sub(fa, fb, &FP_STATUS);
843 return float32_to_s(fr);
844 }
845
846 uint64_t helper_muls (uint64_t a, uint64_t b)
847 {
848 float32 fa, fb, fr;
849
850 fa = s_to_float32(a);
851 fb = s_to_float32(b);
852 fr = float32_mul(fa, fb, &FP_STATUS);
853 return float32_to_s(fr);
854 }
855
856 uint64_t helper_divs (uint64_t a, uint64_t b)
857 {
858 float32 fa, fb, fr;
859
860 fa = s_to_float32(a);
861 fb = s_to_float32(b);
862 fr = float32_div(fa, fb, &FP_STATUS);
863 return float32_to_s(fr);
864 }
865
866 uint64_t helper_sqrts (uint64_t a)
867 {
868 float32 fa, fr;
869
870 fa = s_to_float32(a);
871 fr = float32_sqrt(fa, &FP_STATUS);
872 return float32_to_s(fr);
873 }
874
875
876 /* T floating (double) */
877 static inline float64 t_to_float64(uint64_t a)
878 {
879 /* Memory format is the same as float64 */
880 CPU_DoubleU r;
881 r.ll = a;
882 return r.d;
883 }
884
885 static inline uint64_t float64_to_t(float64 fa)
886 {
887 /* Memory format is the same as float64 */
888 CPU_DoubleU r;
889 r.d = fa;
890 return r.ll;
891 }
892
893 uint64_t helper_addt (uint64_t a, uint64_t b)
894 {
895 float64 fa, fb, fr;
896
897 fa = t_to_float64(a);
898 fb = t_to_float64(b);
899 fr = float64_add(fa, fb, &FP_STATUS);
900 return float64_to_t(fr);
901 }
902
903 uint64_t helper_subt (uint64_t a, uint64_t b)
904 {
905 float64 fa, fb, fr;
906
907 fa = t_to_float64(a);
908 fb = t_to_float64(b);
909 fr = float64_sub(fa, fb, &FP_STATUS);
910 return float64_to_t(fr);
911 }
912
913 uint64_t helper_mult (uint64_t a, uint64_t b)
914 {
915 float64 fa, fb, fr;
916
917 fa = t_to_float64(a);
918 fb = t_to_float64(b);
919 fr = float64_mul(fa, fb, &FP_STATUS);
920 return float64_to_t(fr);
921 }
922
923 uint64_t helper_divt (uint64_t a, uint64_t b)
924 {
925 float64 fa, fb, fr;
926
927 fa = t_to_float64(a);
928 fb = t_to_float64(b);
929 fr = float64_div(fa, fb, &FP_STATUS);
930 return float64_to_t(fr);
931 }
932
933 uint64_t helper_sqrtt (uint64_t a)
934 {
935 float64 fa, fr;
936
937 fa = t_to_float64(a);
938 fr = float64_sqrt(fa, &FP_STATUS);
939 return float64_to_t(fr);
940 }
941
942 /* Comparisons */
943 uint64_t helper_cmptun (uint64_t a, uint64_t b)
944 {
945 float64 fa, fb;
946
947 fa = t_to_float64(a);
948 fb = t_to_float64(b);
949
950 if (float64_unordered_quiet(fa, fb, &FP_STATUS)) {
951 return 0x4000000000000000ULL;
952 } else {
953 return 0;
954 }
955 }
956
957 uint64_t helper_cmpteq(uint64_t a, uint64_t b)
958 {
959 float64 fa, fb;
960
961 fa = t_to_float64(a);
962 fb = t_to_float64(b);
963
964 if (float64_eq_quiet(fa, fb, &FP_STATUS))
965 return 0x4000000000000000ULL;
966 else
967 return 0;
968 }
969
970 uint64_t helper_cmptle(uint64_t a, uint64_t b)
971 {
972 float64 fa, fb;
973
974 fa = t_to_float64(a);
975 fb = t_to_float64(b);
976
977 if (float64_le(fa, fb, &FP_STATUS))
978 return 0x4000000000000000ULL;
979 else
980 return 0;
981 }
982
983 uint64_t helper_cmptlt(uint64_t a, uint64_t b)
984 {
985 float64 fa, fb;
986
987 fa = t_to_float64(a);
988 fb = t_to_float64(b);
989
990 if (float64_lt(fa, fb, &FP_STATUS))
991 return 0x4000000000000000ULL;
992 else
993 return 0;
994 }
995
996 uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
997 {
998 float64 fa, fb;
999
1000 fa = g_to_float64(a);
1001 fb = g_to_float64(b);
1002
1003 if (float64_eq_quiet(fa, fb, &FP_STATUS))
1004 return 0x4000000000000000ULL;
1005 else
1006 return 0;
1007 }
1008
1009 uint64_t helper_cmpgle(uint64_t a, uint64_t b)
1010 {
1011 float64 fa, fb;
1012
1013 fa = g_to_float64(a);
1014 fb = g_to_float64(b);
1015
1016 if (float64_le(fa, fb, &FP_STATUS))
1017 return 0x4000000000000000ULL;
1018 else
1019 return 0;
1020 }
1021
1022 uint64_t helper_cmpglt(uint64_t a, uint64_t b)
1023 {
1024 float64 fa, fb;
1025
1026 fa = g_to_float64(a);
1027 fb = g_to_float64(b);
1028
1029 if (float64_lt(fa, fb, &FP_STATUS))
1030 return 0x4000000000000000ULL;
1031 else
1032 return 0;
1033 }
1034
1035 /* Floating point format conversion */
1036 uint64_t helper_cvtts (uint64_t a)
1037 {
1038 float64 fa;
1039 float32 fr;
1040
1041 fa = t_to_float64(a);
1042 fr = float64_to_float32(fa, &FP_STATUS);
1043 return float32_to_s(fr);
1044 }
1045
1046 uint64_t helper_cvtst (uint64_t a)
1047 {
1048 float32 fa;
1049 float64 fr;
1050
1051 fa = s_to_float32(a);
1052 fr = float32_to_float64(fa, &FP_STATUS);
1053 return float64_to_t(fr);
1054 }
1055
1056 uint64_t helper_cvtqs (uint64_t a)
1057 {
1058 float32 fr = int64_to_float32(a, &FP_STATUS);
1059 return float32_to_s(fr);
1060 }
1061
1062 /* Implement float64 to uint64 conversion without saturation -- we must
1063 supply the truncated result. This behaviour is used by the compiler
1064 to get unsigned conversion for free with the same instruction.
1065
1066 The VI flag is set when overflow or inexact exceptions should be raised. */
1067
1068 static inline uint64_t helper_cvttq_internal(uint64_t a, int roundmode, int VI)
1069 {
1070 uint64_t frac, ret = 0;
1071 uint32_t exp, sign, exc = 0;
1072 int shift;
1073
1074 sign = (a >> 63);
1075 exp = (uint32_t)(a >> 52) & 0x7ff;
1076 frac = a & 0xfffffffffffffull;
1077
1078 if (exp == 0) {
1079 if (unlikely(frac != 0)) {
1080 goto do_underflow;
1081 }
1082 } else if (exp == 0x7ff) {
1083 exc = (frac ? float_flag_invalid : VI ? float_flag_overflow : 0);
1084 } else {
1085 /* Restore implicit bit. */
1086 frac |= 0x10000000000000ull;
1087
1088 shift = exp - 1023 - 52;
1089 if (shift >= 0) {
1090 /* In this case the number is so large that we must shift
1091 the fraction left. There is no rounding to do. */
1092 if (shift < 63) {
1093 ret = frac << shift;
1094 if (VI && (ret >> shift) != frac) {
1095 exc = float_flag_overflow;
1096 }
1097 }
1098 } else {
1099 uint64_t round;
1100
1101 /* In this case the number is smaller than the fraction as
1102 represented by the 52 bit number. Here we must think
1103 about rounding the result. Handle this by shifting the
1104 fractional part of the number into the high bits of ROUND.
1105 This will let us efficiently handle round-to-nearest. */
1106 shift = -shift;
1107 if (shift < 63) {
1108 ret = frac >> shift;
1109 round = frac << (64 - shift);
1110 } else {
1111 /* The exponent is so small we shift out everything.
1112 Leave a sticky bit for proper rounding below. */
1113 do_underflow:
1114 round = 1;
1115 }
1116
1117 if (round) {
1118 exc = (VI ? float_flag_inexact : 0);
1119 switch (roundmode) {
1120 case float_round_nearest_even:
1121 if (round == (1ull << 63)) {
1122 /* Fraction is exactly 0.5; round to even. */
1123 ret += (ret & 1);
1124 } else if (round > (1ull << 63)) {
1125 ret += 1;
1126 }
1127 break;
1128 case float_round_to_zero:
1129 break;
1130 case float_round_up:
1131 ret += 1 - sign;
1132 break;
1133 case float_round_down:
1134 ret += sign;
1135 break;
1136 }
1137 }
1138 }
1139 if (sign) {
1140 ret = -ret;
1141 }
1142 }
1143 if (unlikely(exc)) {
1144 float_raise(exc, &FP_STATUS);
1145 }
1146
1147 return ret;
1148 }
1149
1150 uint64_t helper_cvttq(uint64_t a)
1151 {
1152 return helper_cvttq_internal(a, FP_STATUS.float_rounding_mode, 1);
1153 }
1154
1155 uint64_t helper_cvttq_c(uint64_t a)
1156 {
1157 return helper_cvttq_internal(a, float_round_to_zero, 0);
1158 }
1159
1160 uint64_t helper_cvttq_svic(uint64_t a)
1161 {
1162 return helper_cvttq_internal(a, float_round_to_zero, 1);
1163 }
1164
1165 uint64_t helper_cvtqt (uint64_t a)
1166 {
1167 float64 fr = int64_to_float64(a, &FP_STATUS);
1168 return float64_to_t(fr);
1169 }
1170
1171 uint64_t helper_cvtqf (uint64_t a)
1172 {
1173 float32 fr = int64_to_float32(a, &FP_STATUS);
1174 return float32_to_f(fr);
1175 }
1176
1177 uint64_t helper_cvtgf (uint64_t a)
1178 {
1179 float64 fa;
1180 float32 fr;
1181
1182 fa = g_to_float64(a);
1183 fr = float64_to_float32(fa, &FP_STATUS);
1184 return float32_to_f(fr);
1185 }
1186
1187 uint64_t helper_cvtgq (uint64_t a)
1188 {
1189 float64 fa = g_to_float64(a);
1190 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
1191 }
1192
1193 uint64_t helper_cvtqg (uint64_t a)
1194 {
1195 float64 fr;
1196 fr = int64_to_float64(a, &FP_STATUS);
1197 return float64_to_g(fr);
1198 }
1199
1200 /* PALcode support special instructions */
1201 #if !defined (CONFIG_USER_ONLY)
1202 void helper_hw_ret (uint64_t a)
1203 {
1204 env->pc = a & ~3;
1205 env->intr_flag = 0;
1206 env->lock_addr = -1;
1207 if ((a & 1) == 0) {
1208 env->pal_mode = 0;
1209 swap_shadow_regs(env);
1210 }
1211 }
1212
1213 void helper_tbia(void)
1214 {
1215 tlb_flush(env, 1);
1216 }
1217
1218 void helper_tbis(uint64_t p)
1219 {
1220 tlb_flush_page(env, p);
1221 }
1222
1223 void helper_halt(uint64_t restart)
1224 {
1225 if (restart) {
1226 qemu_system_reset_request();
1227 } else {
1228 qemu_system_shutdown_request();
1229 }
1230 }
1231
1232 uint64_t helper_get_time(void)
1233 {
1234 return qemu_get_clock_ns(rtc_clock);
1235 }
1236
1237 void helper_set_alarm(uint64_t expire)
1238 {
1239 if (expire) {
1240 env->alarm_expire = expire;
1241 qemu_mod_timer(env->alarm_timer, expire);
1242 } else {
1243 qemu_del_timer(env->alarm_timer);
1244 }
1245 }
1246 #endif
1247
1248 /*****************************************************************************/
1249 /* Softmmu support */
1250 #if !defined (CONFIG_USER_ONLY)
1251 uint64_t helper_ldl_phys(uint64_t p)
1252 {
1253 return (int32_t)ldl_phys(p);
1254 }
1255
1256 uint64_t helper_ldq_phys(uint64_t p)
1257 {
1258 return ldq_phys(p);
1259 }
1260
1261 uint64_t helper_ldl_l_phys(uint64_t p)
1262 {
1263 env->lock_addr = p;
1264 return env->lock_value = (int32_t)ldl_phys(p);
1265 }
1266
1267 uint64_t helper_ldq_l_phys(uint64_t p)
1268 {
1269 env->lock_addr = p;
1270 return env->lock_value = ldl_phys(p);
1271 }
1272
1273 void helper_stl_phys(uint64_t p, uint64_t v)
1274 {
1275 stl_phys(p, v);
1276 }
1277
1278 void helper_stq_phys(uint64_t p, uint64_t v)
1279 {
1280 stq_phys(p, v);
1281 }
1282
1283 uint64_t helper_stl_c_phys(uint64_t p, uint64_t v)
1284 {
1285 uint64_t ret = 0;
1286
1287 if (p == env->lock_addr) {
1288 int32_t old = ldl_phys(p);
1289 if (old == (int32_t)env->lock_value) {
1290 stl_phys(p, v);
1291 ret = 1;
1292 }
1293 }
1294 env->lock_addr = -1;
1295
1296 return ret;
1297 }
1298
1299 uint64_t helper_stq_c_phys(uint64_t p, uint64_t v)
1300 {
1301 uint64_t ret = 0;
1302
1303 if (p == env->lock_addr) {
1304 uint64_t old = ldq_phys(p);
1305 if (old == env->lock_value) {
1306 stq_phys(p, v);
1307 ret = 1;
1308 }
1309 }
1310 env->lock_addr = -1;
1311
1312 return ret;
1313 }
1314
1315 static void QEMU_NORETURN do_unaligned_access(target_ulong addr, int is_write,
1316 int is_user, void *retaddr)
1317 {
1318 uint64_t pc;
1319 uint32_t insn;
1320
1321 do_restore_state(retaddr);
1322
1323 pc = env->pc;
1324 insn = ldl_code(pc);
1325
1326 env->trap_arg0 = addr;
1327 env->trap_arg1 = insn >> 26; /* opcode */
1328 env->trap_arg2 = (insn >> 21) & 31; /* dest regno */
1329 helper_excp(EXCP_UNALIGN, 0);
1330 }
1331
1332 void QEMU_NORETURN cpu_unassigned_access(CPUState *env1,
1333 target_phys_addr_t addr, int is_write,
1334 int is_exec, int unused, int size)
1335 {
1336 env = env1;
1337 env->trap_arg0 = addr;
1338 env->trap_arg1 = is_write;
1339 dynamic_excp(EXCP_MCHK, 0);
1340 }
1341
1342 #include "softmmu_exec.h"
1343
1344 #define MMUSUFFIX _mmu
1345 #define ALIGNED_ONLY
1346
1347 #define SHIFT 0
1348 #include "softmmu_template.h"
1349
1350 #define SHIFT 1
1351 #include "softmmu_template.h"
1352
1353 #define SHIFT 2
1354 #include "softmmu_template.h"
1355
1356 #define SHIFT 3
1357 #include "softmmu_template.h"
1358
1359 /* try to fill the TLB and return an exception if error. If retaddr is
1360 NULL, it means that the function was called in C code (i.e. not
1361 from generated code or from helper.c) */
1362 /* XXX: fix it to restore all registers */
1363 void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
1364 void *retaddr)
1365 {
1366 CPUState *saved_env;
1367 int ret;
1368
1369 saved_env = env;
1370 env = env1;
1371 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx);
1372 if (unlikely(ret != 0)) {
1373 do_restore_state(retaddr);
1374 /* Exception index and error code are already set */
1375 cpu_loop_exit(env);
1376 }
1377 env = saved_env;
1378 }
1379 #endif