cpu: Move breakpoints field from CPU_COMMON to CPUState
[qemu.git] / target-sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "helper.h"
30 #include "tcg-op.h"
31
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #define DEBUG_DISAS
36
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
40
41 /* global register indexes */
42 static TCGv_ptr cpu_env, cpu_regwptr;
43 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
44 static TCGv_i32 cpu_cc_op;
45 static TCGv_i32 cpu_psr;
46 static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
47 static TCGv cpu_y;
48 #ifndef CONFIG_USER_ONLY
49 static TCGv cpu_tbr;
50 #endif
51 static TCGv cpu_cond;
52 #ifdef TARGET_SPARC64
53 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
54 static TCGv cpu_gsr;
55 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
56 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
57 static TCGv_i32 cpu_softint;
58 #else
59 static TCGv cpu_wim;
60 #endif
61 /* Floating point registers */
62 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
63
64 static target_ulong gen_opc_npc[OPC_BUF_SIZE];
65 static target_ulong gen_opc_jump_pc[2];
66
67 #include "exec/gen-icount.h"
68
69 typedef struct DisasContext {
70 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
71 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
73 int is_br;
74 int mem_idx;
75 int fpu_enabled;
76 int address_mask_32bit;
77 int singlestep;
78 uint32_t cc_op; /* current CC operation */
79 struct TranslationBlock *tb;
80 sparc_def_t *def;
81 TCGv_i32 t32[3];
82 TCGv ttl[5];
83 int n_t32;
84 int n_ttl;
85 } DisasContext;
86
87 typedef struct {
88 TCGCond cond;
89 bool is_bool;
90 bool g1, g2;
91 TCGv c1, c2;
92 } DisasCompare;
93
94 // This function uses non-native bit order
95 #define GET_FIELD(X, FROM, TO) \
96 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
97
98 // This function uses the order in the manuals, i.e. bit 0 is 2^0
99 #define GET_FIELD_SP(X, FROM, TO) \
100 GET_FIELD(X, 31 - (TO), 31 - (FROM))
101
102 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
103 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
104
105 #ifdef TARGET_SPARC64
106 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
107 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
108 #else
109 #define DFPREG(r) (r & 0x1e)
110 #define QFPREG(r) (r & 0x1c)
111 #endif
112
113 #define UA2005_HTRAP_MASK 0xff
114 #define V8_TRAP_MASK 0x7f
115
116 static int sign_extend(int x, int len)
117 {
118 len = 32 - len;
119 return (x << len) >> len;
120 }
121
122 #define IS_IMM (insn & (1<<13))
123
124 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
125 {
126 TCGv_i32 t;
127 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
128 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
129 return t;
130 }
131
132 static inline TCGv get_temp_tl(DisasContext *dc)
133 {
134 TCGv t;
135 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
136 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
137 return t;
138 }
139
140 static inline void gen_update_fprs_dirty(int rd)
141 {
142 #if defined(TARGET_SPARC64)
143 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
144 #endif
145 }
146
147 /* floating point registers moves */
148 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
149 {
150 #if TCG_TARGET_REG_BITS == 32
151 if (src & 1) {
152 return TCGV_LOW(cpu_fpr[src / 2]);
153 } else {
154 return TCGV_HIGH(cpu_fpr[src / 2]);
155 }
156 #else
157 if (src & 1) {
158 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
159 } else {
160 TCGv_i32 ret = get_temp_i32(dc);
161 TCGv_i64 t = tcg_temp_new_i64();
162
163 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
164 tcg_gen_trunc_i64_i32(ret, t);
165 tcg_temp_free_i64(t);
166
167 return ret;
168 }
169 #endif
170 }
171
172 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
173 {
174 #if TCG_TARGET_REG_BITS == 32
175 if (dst & 1) {
176 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
177 } else {
178 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
179 }
180 #else
181 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
182 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
183 (dst & 1 ? 0 : 32), 32);
184 #endif
185 gen_update_fprs_dirty(dst);
186 }
187
188 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
189 {
190 return get_temp_i32(dc);
191 }
192
193 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
194 {
195 src = DFPREG(src);
196 return cpu_fpr[src / 2];
197 }
198
199 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
200 {
201 dst = DFPREG(dst);
202 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
203 gen_update_fprs_dirty(dst);
204 }
205
206 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
207 {
208 return cpu_fpr[DFPREG(dst) / 2];
209 }
210
211 static void gen_op_load_fpr_QT0(unsigned int src)
212 {
213 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
214 offsetof(CPU_QuadU, ll.upper));
215 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
216 offsetof(CPU_QuadU, ll.lower));
217 }
218
219 static void gen_op_load_fpr_QT1(unsigned int src)
220 {
221 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
222 offsetof(CPU_QuadU, ll.upper));
223 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
224 offsetof(CPU_QuadU, ll.lower));
225 }
226
227 static void gen_op_store_QT0_fpr(unsigned int dst)
228 {
229 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
230 offsetof(CPU_QuadU, ll.upper));
231 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
232 offsetof(CPU_QuadU, ll.lower));
233 }
234
235 #ifdef TARGET_SPARC64
236 static void gen_move_Q(unsigned int rd, unsigned int rs)
237 {
238 rd = QFPREG(rd);
239 rs = QFPREG(rs);
240
241 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
242 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
243 gen_update_fprs_dirty(rd);
244 }
245 #endif
246
247 /* moves */
248 #ifdef CONFIG_USER_ONLY
249 #define supervisor(dc) 0
250 #ifdef TARGET_SPARC64
251 #define hypervisor(dc) 0
252 #endif
253 #else
254 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
255 #ifdef TARGET_SPARC64
256 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
257 #else
258 #endif
259 #endif
260
261 #ifdef TARGET_SPARC64
262 #ifndef TARGET_ABI32
263 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
264 #else
265 #define AM_CHECK(dc) (1)
266 #endif
267 #endif
268
269 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
270 {
271 #ifdef TARGET_SPARC64
272 if (AM_CHECK(dc))
273 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
274 #endif
275 }
276
277 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
278 {
279 if (reg == 0 || reg >= 8) {
280 TCGv t = get_temp_tl(dc);
281 if (reg == 0) {
282 tcg_gen_movi_tl(t, 0);
283 } else {
284 tcg_gen_ld_tl(t, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
285 }
286 return t;
287 } else {
288 return cpu_gregs[reg];
289 }
290 }
291
292 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
293 {
294 if (reg > 0) {
295 if (reg < 8) {
296 tcg_gen_mov_tl(cpu_gregs[reg], v);
297 } else {
298 tcg_gen_st_tl(v, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
299 }
300 }
301 }
302
303 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
304 {
305 if (reg == 0 || reg >= 8) {
306 return get_temp_tl(dc);
307 } else {
308 return cpu_gregs[reg];
309 }
310 }
311
312 static inline void gen_goto_tb(DisasContext *s, int tb_num,
313 target_ulong pc, target_ulong npc)
314 {
315 TranslationBlock *tb;
316
317 tb = s->tb;
318 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
319 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
320 !s->singlestep) {
321 /* jump to same page: we can use a direct jump */
322 tcg_gen_goto_tb(tb_num);
323 tcg_gen_movi_tl(cpu_pc, pc);
324 tcg_gen_movi_tl(cpu_npc, npc);
325 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
326 } else {
327 /* jump to another page: currently not optimized */
328 tcg_gen_movi_tl(cpu_pc, pc);
329 tcg_gen_movi_tl(cpu_npc, npc);
330 tcg_gen_exit_tb(0);
331 }
332 }
333
334 // XXX suboptimal
335 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
336 {
337 tcg_gen_extu_i32_tl(reg, src);
338 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
339 tcg_gen_andi_tl(reg, reg, 0x1);
340 }
341
342 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
343 {
344 tcg_gen_extu_i32_tl(reg, src);
345 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
346 tcg_gen_andi_tl(reg, reg, 0x1);
347 }
348
349 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
350 {
351 tcg_gen_extu_i32_tl(reg, src);
352 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
353 tcg_gen_andi_tl(reg, reg, 0x1);
354 }
355
356 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
357 {
358 tcg_gen_extu_i32_tl(reg, src);
359 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
360 tcg_gen_andi_tl(reg, reg, 0x1);
361 }
362
363 static inline void gen_op_addi_cc(TCGv dst, TCGv src1, target_long src2)
364 {
365 tcg_gen_mov_tl(cpu_cc_src, src1);
366 tcg_gen_movi_tl(cpu_cc_src2, src2);
367 tcg_gen_addi_tl(cpu_cc_dst, cpu_cc_src, src2);
368 tcg_gen_mov_tl(dst, cpu_cc_dst);
369 }
370
371 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
372 {
373 tcg_gen_mov_tl(cpu_cc_src, src1);
374 tcg_gen_mov_tl(cpu_cc_src2, src2);
375 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
376 tcg_gen_mov_tl(dst, cpu_cc_dst);
377 }
378
379 static TCGv_i32 gen_add32_carry32(void)
380 {
381 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
382
383 /* Carry is computed from a previous add: (dst < src) */
384 #if TARGET_LONG_BITS == 64
385 cc_src1_32 = tcg_temp_new_i32();
386 cc_src2_32 = tcg_temp_new_i32();
387 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_dst);
388 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src);
389 #else
390 cc_src1_32 = cpu_cc_dst;
391 cc_src2_32 = cpu_cc_src;
392 #endif
393
394 carry_32 = tcg_temp_new_i32();
395 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
396
397 #if TARGET_LONG_BITS == 64
398 tcg_temp_free_i32(cc_src1_32);
399 tcg_temp_free_i32(cc_src2_32);
400 #endif
401
402 return carry_32;
403 }
404
405 static TCGv_i32 gen_sub32_carry32(void)
406 {
407 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
408
409 /* Carry is computed from a previous borrow: (src1 < src2) */
410 #if TARGET_LONG_BITS == 64
411 cc_src1_32 = tcg_temp_new_i32();
412 cc_src2_32 = tcg_temp_new_i32();
413 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_src);
414 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src2);
415 #else
416 cc_src1_32 = cpu_cc_src;
417 cc_src2_32 = cpu_cc_src2;
418 #endif
419
420 carry_32 = tcg_temp_new_i32();
421 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
422
423 #if TARGET_LONG_BITS == 64
424 tcg_temp_free_i32(cc_src1_32);
425 tcg_temp_free_i32(cc_src2_32);
426 #endif
427
428 return carry_32;
429 }
430
431 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
432 TCGv src2, int update_cc)
433 {
434 TCGv_i32 carry_32;
435 TCGv carry;
436
437 switch (dc->cc_op) {
438 case CC_OP_DIV:
439 case CC_OP_LOGIC:
440 /* Carry is known to be zero. Fall back to plain ADD. */
441 if (update_cc) {
442 gen_op_add_cc(dst, src1, src2);
443 } else {
444 tcg_gen_add_tl(dst, src1, src2);
445 }
446 return;
447
448 case CC_OP_ADD:
449 case CC_OP_TADD:
450 case CC_OP_TADDTV:
451 if (TARGET_LONG_BITS == 32) {
452 /* We can re-use the host's hardware carry generation by using
453 an ADD2 opcode. We discard the low part of the output.
454 Ideally we'd combine this operation with the add that
455 generated the carry in the first place. */
456 carry = tcg_temp_new();
457 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
458 tcg_temp_free(carry);
459 goto add_done;
460 }
461 carry_32 = gen_add32_carry32();
462 break;
463
464 case CC_OP_SUB:
465 case CC_OP_TSUB:
466 case CC_OP_TSUBTV:
467 carry_32 = gen_sub32_carry32();
468 break;
469
470 default:
471 /* We need external help to produce the carry. */
472 carry_32 = tcg_temp_new_i32();
473 gen_helper_compute_C_icc(carry_32, cpu_env);
474 break;
475 }
476
477 #if TARGET_LONG_BITS == 64
478 carry = tcg_temp_new();
479 tcg_gen_extu_i32_i64(carry, carry_32);
480 #else
481 carry = carry_32;
482 #endif
483
484 tcg_gen_add_tl(dst, src1, src2);
485 tcg_gen_add_tl(dst, dst, carry);
486
487 tcg_temp_free_i32(carry_32);
488 #if TARGET_LONG_BITS == 64
489 tcg_temp_free(carry);
490 #endif
491
492 add_done:
493 if (update_cc) {
494 tcg_gen_mov_tl(cpu_cc_src, src1);
495 tcg_gen_mov_tl(cpu_cc_src2, src2);
496 tcg_gen_mov_tl(cpu_cc_dst, dst);
497 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
498 dc->cc_op = CC_OP_ADDX;
499 }
500 }
501
502 static inline void gen_op_subi_cc(TCGv dst, TCGv src1, target_long src2, DisasContext *dc)
503 {
504 tcg_gen_mov_tl(cpu_cc_src, src1);
505 tcg_gen_movi_tl(cpu_cc_src2, src2);
506 if (src2 == 0) {
507 tcg_gen_mov_tl(cpu_cc_dst, src1);
508 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
509 dc->cc_op = CC_OP_LOGIC;
510 } else {
511 tcg_gen_subi_tl(cpu_cc_dst, cpu_cc_src, src2);
512 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
513 dc->cc_op = CC_OP_SUB;
514 }
515 tcg_gen_mov_tl(dst, cpu_cc_dst);
516 }
517
518 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
519 {
520 tcg_gen_mov_tl(cpu_cc_src, src1);
521 tcg_gen_mov_tl(cpu_cc_src2, src2);
522 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
523 tcg_gen_mov_tl(dst, cpu_cc_dst);
524 }
525
526 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
527 TCGv src2, int update_cc)
528 {
529 TCGv_i32 carry_32;
530 TCGv carry;
531
532 switch (dc->cc_op) {
533 case CC_OP_DIV:
534 case CC_OP_LOGIC:
535 /* Carry is known to be zero. Fall back to plain SUB. */
536 if (update_cc) {
537 gen_op_sub_cc(dst, src1, src2);
538 } else {
539 tcg_gen_sub_tl(dst, src1, src2);
540 }
541 return;
542
543 case CC_OP_ADD:
544 case CC_OP_TADD:
545 case CC_OP_TADDTV:
546 carry_32 = gen_add32_carry32();
547 break;
548
549 case CC_OP_SUB:
550 case CC_OP_TSUB:
551 case CC_OP_TSUBTV:
552 if (TARGET_LONG_BITS == 32) {
553 /* We can re-use the host's hardware carry generation by using
554 a SUB2 opcode. We discard the low part of the output.
555 Ideally we'd combine this operation with the add that
556 generated the carry in the first place. */
557 carry = tcg_temp_new();
558 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
559 tcg_temp_free(carry);
560 goto sub_done;
561 }
562 carry_32 = gen_sub32_carry32();
563 break;
564
565 default:
566 /* We need external help to produce the carry. */
567 carry_32 = tcg_temp_new_i32();
568 gen_helper_compute_C_icc(carry_32, cpu_env);
569 break;
570 }
571
572 #if TARGET_LONG_BITS == 64
573 carry = tcg_temp_new();
574 tcg_gen_extu_i32_i64(carry, carry_32);
575 #else
576 carry = carry_32;
577 #endif
578
579 tcg_gen_sub_tl(dst, src1, src2);
580 tcg_gen_sub_tl(dst, dst, carry);
581
582 tcg_temp_free_i32(carry_32);
583 #if TARGET_LONG_BITS == 64
584 tcg_temp_free(carry);
585 #endif
586
587 sub_done:
588 if (update_cc) {
589 tcg_gen_mov_tl(cpu_cc_src, src1);
590 tcg_gen_mov_tl(cpu_cc_src2, src2);
591 tcg_gen_mov_tl(cpu_cc_dst, dst);
592 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
593 dc->cc_op = CC_OP_SUBX;
594 }
595 }
596
597 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
598 {
599 TCGv r_temp, zero, t0;
600
601 r_temp = tcg_temp_new();
602 t0 = tcg_temp_new();
603
604 /* old op:
605 if (!(env->y & 1))
606 T1 = 0;
607 */
608 zero = tcg_const_tl(0);
609 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
610 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
611 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
612 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
613 zero, cpu_cc_src2);
614 tcg_temp_free(zero);
615
616 // b2 = T0 & 1;
617 // env->y = (b2 << 31) | (env->y >> 1);
618 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
619 tcg_gen_shli_tl(r_temp, r_temp, 31);
620 tcg_gen_shri_tl(t0, cpu_y, 1);
621 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
622 tcg_gen_or_tl(t0, t0, r_temp);
623 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
624
625 // b1 = N ^ V;
626 gen_mov_reg_N(t0, cpu_psr);
627 gen_mov_reg_V(r_temp, cpu_psr);
628 tcg_gen_xor_tl(t0, t0, r_temp);
629 tcg_temp_free(r_temp);
630
631 // T0 = (b1 << 31) | (T0 >> 1);
632 // src1 = T0;
633 tcg_gen_shli_tl(t0, t0, 31);
634 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
635 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
636 tcg_temp_free(t0);
637
638 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
639
640 tcg_gen_mov_tl(dst, cpu_cc_dst);
641 }
642
643 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
644 {
645 #if TARGET_LONG_BITS == 32
646 if (sign_ext) {
647 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
648 } else {
649 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
650 }
651 #else
652 TCGv t0 = tcg_temp_new_i64();
653 TCGv t1 = tcg_temp_new_i64();
654
655 if (sign_ext) {
656 tcg_gen_ext32s_i64(t0, src1);
657 tcg_gen_ext32s_i64(t1, src2);
658 } else {
659 tcg_gen_ext32u_i64(t0, src1);
660 tcg_gen_ext32u_i64(t1, src2);
661 }
662
663 tcg_gen_mul_i64(dst, t0, t1);
664 tcg_temp_free(t0);
665 tcg_temp_free(t1);
666
667 tcg_gen_shri_i64(cpu_y, dst, 32);
668 #endif
669 }
670
671 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
672 {
673 /* zero-extend truncated operands before multiplication */
674 gen_op_multiply(dst, src1, src2, 0);
675 }
676
677 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
678 {
679 /* sign-extend truncated operands before multiplication */
680 gen_op_multiply(dst, src1, src2, 1);
681 }
682
683 // 1
684 static inline void gen_op_eval_ba(TCGv dst)
685 {
686 tcg_gen_movi_tl(dst, 1);
687 }
688
689 // Z
690 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
691 {
692 gen_mov_reg_Z(dst, src);
693 }
694
695 // Z | (N ^ V)
696 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
697 {
698 TCGv t0 = tcg_temp_new();
699 gen_mov_reg_N(t0, src);
700 gen_mov_reg_V(dst, src);
701 tcg_gen_xor_tl(dst, dst, t0);
702 gen_mov_reg_Z(t0, src);
703 tcg_gen_or_tl(dst, dst, t0);
704 tcg_temp_free(t0);
705 }
706
707 // N ^ V
708 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
709 {
710 TCGv t0 = tcg_temp_new();
711 gen_mov_reg_V(t0, src);
712 gen_mov_reg_N(dst, src);
713 tcg_gen_xor_tl(dst, dst, t0);
714 tcg_temp_free(t0);
715 }
716
717 // C | Z
718 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
719 {
720 TCGv t0 = tcg_temp_new();
721 gen_mov_reg_Z(t0, src);
722 gen_mov_reg_C(dst, src);
723 tcg_gen_or_tl(dst, dst, t0);
724 tcg_temp_free(t0);
725 }
726
727 // C
728 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
729 {
730 gen_mov_reg_C(dst, src);
731 }
732
733 // V
734 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
735 {
736 gen_mov_reg_V(dst, src);
737 }
738
739 // 0
740 static inline void gen_op_eval_bn(TCGv dst)
741 {
742 tcg_gen_movi_tl(dst, 0);
743 }
744
745 // N
746 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
747 {
748 gen_mov_reg_N(dst, src);
749 }
750
751 // !Z
752 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
753 {
754 gen_mov_reg_Z(dst, src);
755 tcg_gen_xori_tl(dst, dst, 0x1);
756 }
757
758 // !(Z | (N ^ V))
759 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
760 {
761 gen_op_eval_ble(dst, src);
762 tcg_gen_xori_tl(dst, dst, 0x1);
763 }
764
765 // !(N ^ V)
766 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
767 {
768 gen_op_eval_bl(dst, src);
769 tcg_gen_xori_tl(dst, dst, 0x1);
770 }
771
772 // !(C | Z)
773 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
774 {
775 gen_op_eval_bleu(dst, src);
776 tcg_gen_xori_tl(dst, dst, 0x1);
777 }
778
779 // !C
780 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
781 {
782 gen_mov_reg_C(dst, src);
783 tcg_gen_xori_tl(dst, dst, 0x1);
784 }
785
786 // !N
787 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
788 {
789 gen_mov_reg_N(dst, src);
790 tcg_gen_xori_tl(dst, dst, 0x1);
791 }
792
793 // !V
794 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
795 {
796 gen_mov_reg_V(dst, src);
797 tcg_gen_xori_tl(dst, dst, 0x1);
798 }
799
800 /*
801 FPSR bit field FCC1 | FCC0:
802 0 =
803 1 <
804 2 >
805 3 unordered
806 */
807 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
808 unsigned int fcc_offset)
809 {
810 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
811 tcg_gen_andi_tl(reg, reg, 0x1);
812 }
813
814 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
815 unsigned int fcc_offset)
816 {
817 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
818 tcg_gen_andi_tl(reg, reg, 0x1);
819 }
820
821 // !0: FCC0 | FCC1
822 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
823 unsigned int fcc_offset)
824 {
825 TCGv t0 = tcg_temp_new();
826 gen_mov_reg_FCC0(dst, src, fcc_offset);
827 gen_mov_reg_FCC1(t0, src, fcc_offset);
828 tcg_gen_or_tl(dst, dst, t0);
829 tcg_temp_free(t0);
830 }
831
832 // 1 or 2: FCC0 ^ FCC1
833 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
834 unsigned int fcc_offset)
835 {
836 TCGv t0 = tcg_temp_new();
837 gen_mov_reg_FCC0(dst, src, fcc_offset);
838 gen_mov_reg_FCC1(t0, src, fcc_offset);
839 tcg_gen_xor_tl(dst, dst, t0);
840 tcg_temp_free(t0);
841 }
842
843 // 1 or 3: FCC0
844 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
845 unsigned int fcc_offset)
846 {
847 gen_mov_reg_FCC0(dst, src, fcc_offset);
848 }
849
850 // 1: FCC0 & !FCC1
851 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
852 unsigned int fcc_offset)
853 {
854 TCGv t0 = tcg_temp_new();
855 gen_mov_reg_FCC0(dst, src, fcc_offset);
856 gen_mov_reg_FCC1(t0, src, fcc_offset);
857 tcg_gen_andc_tl(dst, dst, t0);
858 tcg_temp_free(t0);
859 }
860
861 // 2 or 3: FCC1
862 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
863 unsigned int fcc_offset)
864 {
865 gen_mov_reg_FCC1(dst, src, fcc_offset);
866 }
867
868 // 2: !FCC0 & FCC1
869 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
870 unsigned int fcc_offset)
871 {
872 TCGv t0 = tcg_temp_new();
873 gen_mov_reg_FCC0(dst, src, fcc_offset);
874 gen_mov_reg_FCC1(t0, src, fcc_offset);
875 tcg_gen_andc_tl(dst, t0, dst);
876 tcg_temp_free(t0);
877 }
878
879 // 3: FCC0 & FCC1
880 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
881 unsigned int fcc_offset)
882 {
883 TCGv t0 = tcg_temp_new();
884 gen_mov_reg_FCC0(dst, src, fcc_offset);
885 gen_mov_reg_FCC1(t0, src, fcc_offset);
886 tcg_gen_and_tl(dst, dst, t0);
887 tcg_temp_free(t0);
888 }
889
890 // 0: !(FCC0 | FCC1)
891 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
892 unsigned int fcc_offset)
893 {
894 TCGv t0 = tcg_temp_new();
895 gen_mov_reg_FCC0(dst, src, fcc_offset);
896 gen_mov_reg_FCC1(t0, src, fcc_offset);
897 tcg_gen_or_tl(dst, dst, t0);
898 tcg_gen_xori_tl(dst, dst, 0x1);
899 tcg_temp_free(t0);
900 }
901
902 // 0 or 3: !(FCC0 ^ FCC1)
903 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
904 unsigned int fcc_offset)
905 {
906 TCGv t0 = tcg_temp_new();
907 gen_mov_reg_FCC0(dst, src, fcc_offset);
908 gen_mov_reg_FCC1(t0, src, fcc_offset);
909 tcg_gen_xor_tl(dst, dst, t0);
910 tcg_gen_xori_tl(dst, dst, 0x1);
911 tcg_temp_free(t0);
912 }
913
914 // 0 or 2: !FCC0
915 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
916 unsigned int fcc_offset)
917 {
918 gen_mov_reg_FCC0(dst, src, fcc_offset);
919 tcg_gen_xori_tl(dst, dst, 0x1);
920 }
921
922 // !1: !(FCC0 & !FCC1)
923 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
924 unsigned int fcc_offset)
925 {
926 TCGv t0 = tcg_temp_new();
927 gen_mov_reg_FCC0(dst, src, fcc_offset);
928 gen_mov_reg_FCC1(t0, src, fcc_offset);
929 tcg_gen_andc_tl(dst, dst, t0);
930 tcg_gen_xori_tl(dst, dst, 0x1);
931 tcg_temp_free(t0);
932 }
933
934 // 0 or 1: !FCC1
935 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
936 unsigned int fcc_offset)
937 {
938 gen_mov_reg_FCC1(dst, src, fcc_offset);
939 tcg_gen_xori_tl(dst, dst, 0x1);
940 }
941
942 // !2: !(!FCC0 & FCC1)
943 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
944 unsigned int fcc_offset)
945 {
946 TCGv t0 = tcg_temp_new();
947 gen_mov_reg_FCC0(dst, src, fcc_offset);
948 gen_mov_reg_FCC1(t0, src, fcc_offset);
949 tcg_gen_andc_tl(dst, t0, dst);
950 tcg_gen_xori_tl(dst, dst, 0x1);
951 tcg_temp_free(t0);
952 }
953
954 // !3: !(FCC0 & FCC1)
955 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
956 unsigned int fcc_offset)
957 {
958 TCGv t0 = tcg_temp_new();
959 gen_mov_reg_FCC0(dst, src, fcc_offset);
960 gen_mov_reg_FCC1(t0, src, fcc_offset);
961 tcg_gen_and_tl(dst, dst, t0);
962 tcg_gen_xori_tl(dst, dst, 0x1);
963 tcg_temp_free(t0);
964 }
965
966 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
967 target_ulong pc2, TCGv r_cond)
968 {
969 int l1;
970
971 l1 = gen_new_label();
972
973 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
974
975 gen_goto_tb(dc, 0, pc1, pc1 + 4);
976
977 gen_set_label(l1);
978 gen_goto_tb(dc, 1, pc2, pc2 + 4);
979 }
980
981 static inline void gen_branch_a(DisasContext *dc, target_ulong pc1,
982 target_ulong pc2, TCGv r_cond)
983 {
984 int l1;
985
986 l1 = gen_new_label();
987
988 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
989
990 gen_goto_tb(dc, 0, pc2, pc1);
991
992 gen_set_label(l1);
993 gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8);
994 }
995
996 static inline void gen_generic_branch(DisasContext *dc)
997 {
998 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
999 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1000 TCGv zero = tcg_const_tl(0);
1001
1002 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1003
1004 tcg_temp_free(npc0);
1005 tcg_temp_free(npc1);
1006 tcg_temp_free(zero);
1007 }
1008
1009 /* call this function before using the condition register as it may
1010 have been set for a jump */
1011 static inline void flush_cond(DisasContext *dc)
1012 {
1013 if (dc->npc == JUMP_PC) {
1014 gen_generic_branch(dc);
1015 dc->npc = DYNAMIC_PC;
1016 }
1017 }
1018
1019 static inline void save_npc(DisasContext *dc)
1020 {
1021 if (dc->npc == JUMP_PC) {
1022 gen_generic_branch(dc);
1023 dc->npc = DYNAMIC_PC;
1024 } else if (dc->npc != DYNAMIC_PC) {
1025 tcg_gen_movi_tl(cpu_npc, dc->npc);
1026 }
1027 }
1028
1029 static inline void update_psr(DisasContext *dc)
1030 {
1031 if (dc->cc_op != CC_OP_FLAGS) {
1032 dc->cc_op = CC_OP_FLAGS;
1033 gen_helper_compute_psr(cpu_env);
1034 }
1035 }
1036
1037 static inline void save_state(DisasContext *dc)
1038 {
1039 tcg_gen_movi_tl(cpu_pc, dc->pc);
1040 save_npc(dc);
1041 }
1042
1043 static inline void gen_mov_pc_npc(DisasContext *dc)
1044 {
1045 if (dc->npc == JUMP_PC) {
1046 gen_generic_branch(dc);
1047 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1048 dc->pc = DYNAMIC_PC;
1049 } else if (dc->npc == DYNAMIC_PC) {
1050 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1051 dc->pc = DYNAMIC_PC;
1052 } else {
1053 dc->pc = dc->npc;
1054 }
1055 }
1056
1057 static inline void gen_op_next_insn(void)
1058 {
1059 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1060 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1061 }
1062
1063 static void free_compare(DisasCompare *cmp)
1064 {
1065 if (!cmp->g1) {
1066 tcg_temp_free(cmp->c1);
1067 }
1068 if (!cmp->g2) {
1069 tcg_temp_free(cmp->c2);
1070 }
1071 }
1072
1073 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1074 DisasContext *dc)
1075 {
1076 static int subcc_cond[16] = {
1077 TCG_COND_NEVER,
1078 TCG_COND_EQ,
1079 TCG_COND_LE,
1080 TCG_COND_LT,
1081 TCG_COND_LEU,
1082 TCG_COND_LTU,
1083 -1, /* neg */
1084 -1, /* overflow */
1085 TCG_COND_ALWAYS,
1086 TCG_COND_NE,
1087 TCG_COND_GT,
1088 TCG_COND_GE,
1089 TCG_COND_GTU,
1090 TCG_COND_GEU,
1091 -1, /* pos */
1092 -1, /* no overflow */
1093 };
1094
1095 static int logic_cond[16] = {
1096 TCG_COND_NEVER,
1097 TCG_COND_EQ, /* eq: Z */
1098 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1099 TCG_COND_LT, /* lt: N ^ V -> N */
1100 TCG_COND_EQ, /* leu: C | Z -> Z */
1101 TCG_COND_NEVER, /* ltu: C -> 0 */
1102 TCG_COND_LT, /* neg: N */
1103 TCG_COND_NEVER, /* vs: V -> 0 */
1104 TCG_COND_ALWAYS,
1105 TCG_COND_NE, /* ne: !Z */
1106 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1107 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1108 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1109 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1110 TCG_COND_GE, /* pos: !N */
1111 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1112 };
1113
1114 TCGv_i32 r_src;
1115 TCGv r_dst;
1116
1117 #ifdef TARGET_SPARC64
1118 if (xcc) {
1119 r_src = cpu_xcc;
1120 } else {
1121 r_src = cpu_psr;
1122 }
1123 #else
1124 r_src = cpu_psr;
1125 #endif
1126
1127 switch (dc->cc_op) {
1128 case CC_OP_LOGIC:
1129 cmp->cond = logic_cond[cond];
1130 do_compare_dst_0:
1131 cmp->is_bool = false;
1132 cmp->g2 = false;
1133 cmp->c2 = tcg_const_tl(0);
1134 #ifdef TARGET_SPARC64
1135 if (!xcc) {
1136 cmp->g1 = false;
1137 cmp->c1 = tcg_temp_new();
1138 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1139 break;
1140 }
1141 #endif
1142 cmp->g1 = true;
1143 cmp->c1 = cpu_cc_dst;
1144 break;
1145
1146 case CC_OP_SUB:
1147 switch (cond) {
1148 case 6: /* neg */
1149 case 14: /* pos */
1150 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1151 goto do_compare_dst_0;
1152
1153 case 7: /* overflow */
1154 case 15: /* !overflow */
1155 goto do_dynamic;
1156
1157 default:
1158 cmp->cond = subcc_cond[cond];
1159 cmp->is_bool = false;
1160 #ifdef TARGET_SPARC64
1161 if (!xcc) {
1162 /* Note that sign-extension works for unsigned compares as
1163 long as both operands are sign-extended. */
1164 cmp->g1 = cmp->g2 = false;
1165 cmp->c1 = tcg_temp_new();
1166 cmp->c2 = tcg_temp_new();
1167 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1168 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1169 break;
1170 }
1171 #endif
1172 cmp->g1 = cmp->g2 = true;
1173 cmp->c1 = cpu_cc_src;
1174 cmp->c2 = cpu_cc_src2;
1175 break;
1176 }
1177 break;
1178
1179 default:
1180 do_dynamic:
1181 gen_helper_compute_psr(cpu_env);
1182 dc->cc_op = CC_OP_FLAGS;
1183 /* FALLTHRU */
1184
1185 case CC_OP_FLAGS:
1186 /* We're going to generate a boolean result. */
1187 cmp->cond = TCG_COND_NE;
1188 cmp->is_bool = true;
1189 cmp->g1 = cmp->g2 = false;
1190 cmp->c1 = r_dst = tcg_temp_new();
1191 cmp->c2 = tcg_const_tl(0);
1192
1193 switch (cond) {
1194 case 0x0:
1195 gen_op_eval_bn(r_dst);
1196 break;
1197 case 0x1:
1198 gen_op_eval_be(r_dst, r_src);
1199 break;
1200 case 0x2:
1201 gen_op_eval_ble(r_dst, r_src);
1202 break;
1203 case 0x3:
1204 gen_op_eval_bl(r_dst, r_src);
1205 break;
1206 case 0x4:
1207 gen_op_eval_bleu(r_dst, r_src);
1208 break;
1209 case 0x5:
1210 gen_op_eval_bcs(r_dst, r_src);
1211 break;
1212 case 0x6:
1213 gen_op_eval_bneg(r_dst, r_src);
1214 break;
1215 case 0x7:
1216 gen_op_eval_bvs(r_dst, r_src);
1217 break;
1218 case 0x8:
1219 gen_op_eval_ba(r_dst);
1220 break;
1221 case 0x9:
1222 gen_op_eval_bne(r_dst, r_src);
1223 break;
1224 case 0xa:
1225 gen_op_eval_bg(r_dst, r_src);
1226 break;
1227 case 0xb:
1228 gen_op_eval_bge(r_dst, r_src);
1229 break;
1230 case 0xc:
1231 gen_op_eval_bgu(r_dst, r_src);
1232 break;
1233 case 0xd:
1234 gen_op_eval_bcc(r_dst, r_src);
1235 break;
1236 case 0xe:
1237 gen_op_eval_bpos(r_dst, r_src);
1238 break;
1239 case 0xf:
1240 gen_op_eval_bvc(r_dst, r_src);
1241 break;
1242 }
1243 break;
1244 }
1245 }
1246
1247 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1248 {
1249 unsigned int offset;
1250 TCGv r_dst;
1251
1252 /* For now we still generate a straight boolean result. */
1253 cmp->cond = TCG_COND_NE;
1254 cmp->is_bool = true;
1255 cmp->g1 = cmp->g2 = false;
1256 cmp->c1 = r_dst = tcg_temp_new();
1257 cmp->c2 = tcg_const_tl(0);
1258
1259 switch (cc) {
1260 default:
1261 case 0x0:
1262 offset = 0;
1263 break;
1264 case 0x1:
1265 offset = 32 - 10;
1266 break;
1267 case 0x2:
1268 offset = 34 - 10;
1269 break;
1270 case 0x3:
1271 offset = 36 - 10;
1272 break;
1273 }
1274
1275 switch (cond) {
1276 case 0x0:
1277 gen_op_eval_bn(r_dst);
1278 break;
1279 case 0x1:
1280 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1281 break;
1282 case 0x2:
1283 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1284 break;
1285 case 0x3:
1286 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1287 break;
1288 case 0x4:
1289 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1290 break;
1291 case 0x5:
1292 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1293 break;
1294 case 0x6:
1295 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1296 break;
1297 case 0x7:
1298 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1299 break;
1300 case 0x8:
1301 gen_op_eval_ba(r_dst);
1302 break;
1303 case 0x9:
1304 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1305 break;
1306 case 0xa:
1307 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1308 break;
1309 case 0xb:
1310 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1311 break;
1312 case 0xc:
1313 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1314 break;
1315 case 0xd:
1316 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1317 break;
1318 case 0xe:
1319 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1320 break;
1321 case 0xf:
1322 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1323 break;
1324 }
1325 }
1326
1327 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1328 DisasContext *dc)
1329 {
1330 DisasCompare cmp;
1331 gen_compare(&cmp, cc, cond, dc);
1332
1333 /* The interface is to return a boolean in r_dst. */
1334 if (cmp.is_bool) {
1335 tcg_gen_mov_tl(r_dst, cmp.c1);
1336 } else {
1337 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1338 }
1339
1340 free_compare(&cmp);
1341 }
1342
1343 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1344 {
1345 DisasCompare cmp;
1346 gen_fcompare(&cmp, cc, cond);
1347
1348 /* The interface is to return a boolean in r_dst. */
1349 if (cmp.is_bool) {
1350 tcg_gen_mov_tl(r_dst, cmp.c1);
1351 } else {
1352 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1353 }
1354
1355 free_compare(&cmp);
1356 }
1357
1358 #ifdef TARGET_SPARC64
1359 // Inverted logic
1360 static const int gen_tcg_cond_reg[8] = {
1361 -1,
1362 TCG_COND_NE,
1363 TCG_COND_GT,
1364 TCG_COND_GE,
1365 -1,
1366 TCG_COND_EQ,
1367 TCG_COND_LE,
1368 TCG_COND_LT,
1369 };
1370
1371 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1372 {
1373 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1374 cmp->is_bool = false;
1375 cmp->g1 = true;
1376 cmp->g2 = false;
1377 cmp->c1 = r_src;
1378 cmp->c2 = tcg_const_tl(0);
1379 }
1380
1381 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1382 {
1383 DisasCompare cmp;
1384 gen_compare_reg(&cmp, cond, r_src);
1385
1386 /* The interface is to return a boolean in r_dst. */
1387 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1388
1389 free_compare(&cmp);
1390 }
1391 #endif
1392
1393 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1394 {
1395 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1396 target_ulong target = dc->pc + offset;
1397
1398 #ifdef TARGET_SPARC64
1399 if (unlikely(AM_CHECK(dc))) {
1400 target &= 0xffffffffULL;
1401 }
1402 #endif
1403 if (cond == 0x0) {
1404 /* unconditional not taken */
1405 if (a) {
1406 dc->pc = dc->npc + 4;
1407 dc->npc = dc->pc + 4;
1408 } else {
1409 dc->pc = dc->npc;
1410 dc->npc = dc->pc + 4;
1411 }
1412 } else if (cond == 0x8) {
1413 /* unconditional taken */
1414 if (a) {
1415 dc->pc = target;
1416 dc->npc = dc->pc + 4;
1417 } else {
1418 dc->pc = dc->npc;
1419 dc->npc = target;
1420 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1421 }
1422 } else {
1423 flush_cond(dc);
1424 gen_cond(cpu_cond, cc, cond, dc);
1425 if (a) {
1426 gen_branch_a(dc, target, dc->npc, cpu_cond);
1427 dc->is_br = 1;
1428 } else {
1429 dc->pc = dc->npc;
1430 dc->jump_pc[0] = target;
1431 if (unlikely(dc->npc == DYNAMIC_PC)) {
1432 dc->jump_pc[1] = DYNAMIC_PC;
1433 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1434 } else {
1435 dc->jump_pc[1] = dc->npc + 4;
1436 dc->npc = JUMP_PC;
1437 }
1438 }
1439 }
1440 }
1441
1442 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1443 {
1444 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1445 target_ulong target = dc->pc + offset;
1446
1447 #ifdef TARGET_SPARC64
1448 if (unlikely(AM_CHECK(dc))) {
1449 target &= 0xffffffffULL;
1450 }
1451 #endif
1452 if (cond == 0x0) {
1453 /* unconditional not taken */
1454 if (a) {
1455 dc->pc = dc->npc + 4;
1456 dc->npc = dc->pc + 4;
1457 } else {
1458 dc->pc = dc->npc;
1459 dc->npc = dc->pc + 4;
1460 }
1461 } else if (cond == 0x8) {
1462 /* unconditional taken */
1463 if (a) {
1464 dc->pc = target;
1465 dc->npc = dc->pc + 4;
1466 } else {
1467 dc->pc = dc->npc;
1468 dc->npc = target;
1469 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1470 }
1471 } else {
1472 flush_cond(dc);
1473 gen_fcond(cpu_cond, cc, cond);
1474 if (a) {
1475 gen_branch_a(dc, target, dc->npc, cpu_cond);
1476 dc->is_br = 1;
1477 } else {
1478 dc->pc = dc->npc;
1479 dc->jump_pc[0] = target;
1480 if (unlikely(dc->npc == DYNAMIC_PC)) {
1481 dc->jump_pc[1] = DYNAMIC_PC;
1482 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1483 } else {
1484 dc->jump_pc[1] = dc->npc + 4;
1485 dc->npc = JUMP_PC;
1486 }
1487 }
1488 }
1489 }
1490
1491 #ifdef TARGET_SPARC64
1492 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1493 TCGv r_reg)
1494 {
1495 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1496 target_ulong target = dc->pc + offset;
1497
1498 if (unlikely(AM_CHECK(dc))) {
1499 target &= 0xffffffffULL;
1500 }
1501 flush_cond(dc);
1502 gen_cond_reg(cpu_cond, cond, r_reg);
1503 if (a) {
1504 gen_branch_a(dc, target, dc->npc, cpu_cond);
1505 dc->is_br = 1;
1506 } else {
1507 dc->pc = dc->npc;
1508 dc->jump_pc[0] = target;
1509 if (unlikely(dc->npc == DYNAMIC_PC)) {
1510 dc->jump_pc[1] = DYNAMIC_PC;
1511 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1512 } else {
1513 dc->jump_pc[1] = dc->npc + 4;
1514 dc->npc = JUMP_PC;
1515 }
1516 }
1517 }
1518
1519 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1520 {
1521 switch (fccno) {
1522 case 0:
1523 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1524 break;
1525 case 1:
1526 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1527 break;
1528 case 2:
1529 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1530 break;
1531 case 3:
1532 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1533 break;
1534 }
1535 }
1536
1537 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1538 {
1539 switch (fccno) {
1540 case 0:
1541 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1542 break;
1543 case 1:
1544 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1545 break;
1546 case 2:
1547 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1548 break;
1549 case 3:
1550 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1551 break;
1552 }
1553 }
1554
1555 static inline void gen_op_fcmpq(int fccno)
1556 {
1557 switch (fccno) {
1558 case 0:
1559 gen_helper_fcmpq(cpu_env);
1560 break;
1561 case 1:
1562 gen_helper_fcmpq_fcc1(cpu_env);
1563 break;
1564 case 2:
1565 gen_helper_fcmpq_fcc2(cpu_env);
1566 break;
1567 case 3:
1568 gen_helper_fcmpq_fcc3(cpu_env);
1569 break;
1570 }
1571 }
1572
1573 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1574 {
1575 switch (fccno) {
1576 case 0:
1577 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1578 break;
1579 case 1:
1580 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1581 break;
1582 case 2:
1583 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1584 break;
1585 case 3:
1586 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1587 break;
1588 }
1589 }
1590
1591 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1592 {
1593 switch (fccno) {
1594 case 0:
1595 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1596 break;
1597 case 1:
1598 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1599 break;
1600 case 2:
1601 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1602 break;
1603 case 3:
1604 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1605 break;
1606 }
1607 }
1608
1609 static inline void gen_op_fcmpeq(int fccno)
1610 {
1611 switch (fccno) {
1612 case 0:
1613 gen_helper_fcmpeq(cpu_env);
1614 break;
1615 case 1:
1616 gen_helper_fcmpeq_fcc1(cpu_env);
1617 break;
1618 case 2:
1619 gen_helper_fcmpeq_fcc2(cpu_env);
1620 break;
1621 case 3:
1622 gen_helper_fcmpeq_fcc3(cpu_env);
1623 break;
1624 }
1625 }
1626
1627 #else
1628
1629 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1630 {
1631 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1632 }
1633
1634 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1635 {
1636 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1637 }
1638
1639 static inline void gen_op_fcmpq(int fccno)
1640 {
1641 gen_helper_fcmpq(cpu_env);
1642 }
1643
1644 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1645 {
1646 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1647 }
1648
1649 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1650 {
1651 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1652 }
1653
1654 static inline void gen_op_fcmpeq(int fccno)
1655 {
1656 gen_helper_fcmpeq(cpu_env);
1657 }
1658 #endif
1659
1660 static inline void gen_op_fpexception_im(int fsr_flags)
1661 {
1662 TCGv_i32 r_const;
1663
1664 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1665 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1666 r_const = tcg_const_i32(TT_FP_EXCP);
1667 gen_helper_raise_exception(cpu_env, r_const);
1668 tcg_temp_free_i32(r_const);
1669 }
1670
1671 static int gen_trap_ifnofpu(DisasContext *dc)
1672 {
1673 #if !defined(CONFIG_USER_ONLY)
1674 if (!dc->fpu_enabled) {
1675 TCGv_i32 r_const;
1676
1677 save_state(dc);
1678 r_const = tcg_const_i32(TT_NFPU_INSN);
1679 gen_helper_raise_exception(cpu_env, r_const);
1680 tcg_temp_free_i32(r_const);
1681 dc->is_br = 1;
1682 return 1;
1683 }
1684 #endif
1685 return 0;
1686 }
1687
1688 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1689 {
1690 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1691 }
1692
1693 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1694 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1695 {
1696 TCGv_i32 dst, src;
1697
1698 src = gen_load_fpr_F(dc, rs);
1699 dst = gen_dest_fpr_F(dc);
1700
1701 gen(dst, cpu_env, src);
1702
1703 gen_store_fpr_F(dc, rd, dst);
1704 }
1705
1706 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1707 void (*gen)(TCGv_i32, TCGv_i32))
1708 {
1709 TCGv_i32 dst, src;
1710
1711 src = gen_load_fpr_F(dc, rs);
1712 dst = gen_dest_fpr_F(dc);
1713
1714 gen(dst, src);
1715
1716 gen_store_fpr_F(dc, rd, dst);
1717 }
1718
1719 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1720 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1721 {
1722 TCGv_i32 dst, src1, src2;
1723
1724 src1 = gen_load_fpr_F(dc, rs1);
1725 src2 = gen_load_fpr_F(dc, rs2);
1726 dst = gen_dest_fpr_F(dc);
1727
1728 gen(dst, cpu_env, src1, src2);
1729
1730 gen_store_fpr_F(dc, rd, dst);
1731 }
1732
1733 #ifdef TARGET_SPARC64
1734 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1735 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1736 {
1737 TCGv_i32 dst, src1, src2;
1738
1739 src1 = gen_load_fpr_F(dc, rs1);
1740 src2 = gen_load_fpr_F(dc, rs2);
1741 dst = gen_dest_fpr_F(dc);
1742
1743 gen(dst, src1, src2);
1744
1745 gen_store_fpr_F(dc, rd, dst);
1746 }
1747 #endif
1748
1749 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1750 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1751 {
1752 TCGv_i64 dst, src;
1753
1754 src = gen_load_fpr_D(dc, rs);
1755 dst = gen_dest_fpr_D(dc, rd);
1756
1757 gen(dst, cpu_env, src);
1758
1759 gen_store_fpr_D(dc, rd, dst);
1760 }
1761
1762 #ifdef TARGET_SPARC64
1763 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1764 void (*gen)(TCGv_i64, TCGv_i64))
1765 {
1766 TCGv_i64 dst, src;
1767
1768 src = gen_load_fpr_D(dc, rs);
1769 dst = gen_dest_fpr_D(dc, rd);
1770
1771 gen(dst, src);
1772
1773 gen_store_fpr_D(dc, rd, dst);
1774 }
1775 #endif
1776
1777 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1778 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1779 {
1780 TCGv_i64 dst, src1, src2;
1781
1782 src1 = gen_load_fpr_D(dc, rs1);
1783 src2 = gen_load_fpr_D(dc, rs2);
1784 dst = gen_dest_fpr_D(dc, rd);
1785
1786 gen(dst, cpu_env, src1, src2);
1787
1788 gen_store_fpr_D(dc, rd, dst);
1789 }
1790
1791 #ifdef TARGET_SPARC64
1792 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1793 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1794 {
1795 TCGv_i64 dst, src1, src2;
1796
1797 src1 = gen_load_fpr_D(dc, rs1);
1798 src2 = gen_load_fpr_D(dc, rs2);
1799 dst = gen_dest_fpr_D(dc, rd);
1800
1801 gen(dst, src1, src2);
1802
1803 gen_store_fpr_D(dc, rd, dst);
1804 }
1805
1806 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1807 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1808 {
1809 TCGv_i64 dst, src1, src2;
1810
1811 src1 = gen_load_fpr_D(dc, rs1);
1812 src2 = gen_load_fpr_D(dc, rs2);
1813 dst = gen_dest_fpr_D(dc, rd);
1814
1815 gen(dst, cpu_gsr, src1, src2);
1816
1817 gen_store_fpr_D(dc, rd, dst);
1818 }
1819
1820 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1821 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1822 {
1823 TCGv_i64 dst, src0, src1, src2;
1824
1825 src1 = gen_load_fpr_D(dc, rs1);
1826 src2 = gen_load_fpr_D(dc, rs2);
1827 src0 = gen_load_fpr_D(dc, rd);
1828 dst = gen_dest_fpr_D(dc, rd);
1829
1830 gen(dst, src0, src1, src2);
1831
1832 gen_store_fpr_D(dc, rd, dst);
1833 }
1834 #endif
1835
1836 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1837 void (*gen)(TCGv_ptr))
1838 {
1839 gen_op_load_fpr_QT1(QFPREG(rs));
1840
1841 gen(cpu_env);
1842
1843 gen_op_store_QT0_fpr(QFPREG(rd));
1844 gen_update_fprs_dirty(QFPREG(rd));
1845 }
1846
1847 #ifdef TARGET_SPARC64
1848 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1849 void (*gen)(TCGv_ptr))
1850 {
1851 gen_op_load_fpr_QT1(QFPREG(rs));
1852
1853 gen(cpu_env);
1854
1855 gen_op_store_QT0_fpr(QFPREG(rd));
1856 gen_update_fprs_dirty(QFPREG(rd));
1857 }
1858 #endif
1859
1860 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1861 void (*gen)(TCGv_ptr))
1862 {
1863 gen_op_load_fpr_QT0(QFPREG(rs1));
1864 gen_op_load_fpr_QT1(QFPREG(rs2));
1865
1866 gen(cpu_env);
1867
1868 gen_op_store_QT0_fpr(QFPREG(rd));
1869 gen_update_fprs_dirty(QFPREG(rd));
1870 }
1871
1872 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1873 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1874 {
1875 TCGv_i64 dst;
1876 TCGv_i32 src1, src2;
1877
1878 src1 = gen_load_fpr_F(dc, rs1);
1879 src2 = gen_load_fpr_F(dc, rs2);
1880 dst = gen_dest_fpr_D(dc, rd);
1881
1882 gen(dst, cpu_env, src1, src2);
1883
1884 gen_store_fpr_D(dc, rd, dst);
1885 }
1886
1887 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1888 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1889 {
1890 TCGv_i64 src1, src2;
1891
1892 src1 = gen_load_fpr_D(dc, rs1);
1893 src2 = gen_load_fpr_D(dc, rs2);
1894
1895 gen(cpu_env, src1, src2);
1896
1897 gen_op_store_QT0_fpr(QFPREG(rd));
1898 gen_update_fprs_dirty(QFPREG(rd));
1899 }
1900
1901 #ifdef TARGET_SPARC64
1902 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1903 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1904 {
1905 TCGv_i64 dst;
1906 TCGv_i32 src;
1907
1908 src = gen_load_fpr_F(dc, rs);
1909 dst = gen_dest_fpr_D(dc, rd);
1910
1911 gen(dst, cpu_env, src);
1912
1913 gen_store_fpr_D(dc, rd, dst);
1914 }
1915 #endif
1916
1917 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1918 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1919 {
1920 TCGv_i64 dst;
1921 TCGv_i32 src;
1922
1923 src = gen_load_fpr_F(dc, rs);
1924 dst = gen_dest_fpr_D(dc, rd);
1925
1926 gen(dst, cpu_env, src);
1927
1928 gen_store_fpr_D(dc, rd, dst);
1929 }
1930
1931 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1932 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1933 {
1934 TCGv_i32 dst;
1935 TCGv_i64 src;
1936
1937 src = gen_load_fpr_D(dc, rs);
1938 dst = gen_dest_fpr_F(dc);
1939
1940 gen(dst, cpu_env, src);
1941
1942 gen_store_fpr_F(dc, rd, dst);
1943 }
1944
1945 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1946 void (*gen)(TCGv_i32, TCGv_ptr))
1947 {
1948 TCGv_i32 dst;
1949
1950 gen_op_load_fpr_QT1(QFPREG(rs));
1951 dst = gen_dest_fpr_F(dc);
1952
1953 gen(dst, cpu_env);
1954
1955 gen_store_fpr_F(dc, rd, dst);
1956 }
1957
1958 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1959 void (*gen)(TCGv_i64, TCGv_ptr))
1960 {
1961 TCGv_i64 dst;
1962
1963 gen_op_load_fpr_QT1(QFPREG(rs));
1964 dst = gen_dest_fpr_D(dc, rd);
1965
1966 gen(dst, cpu_env);
1967
1968 gen_store_fpr_D(dc, rd, dst);
1969 }
1970
1971 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1972 void (*gen)(TCGv_ptr, TCGv_i32))
1973 {
1974 TCGv_i32 src;
1975
1976 src = gen_load_fpr_F(dc, rs);
1977
1978 gen(cpu_env, src);
1979
1980 gen_op_store_QT0_fpr(QFPREG(rd));
1981 gen_update_fprs_dirty(QFPREG(rd));
1982 }
1983
1984 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1985 void (*gen)(TCGv_ptr, TCGv_i64))
1986 {
1987 TCGv_i64 src;
1988
1989 src = gen_load_fpr_D(dc, rs);
1990
1991 gen(cpu_env, src);
1992
1993 gen_op_store_QT0_fpr(QFPREG(rd));
1994 gen_update_fprs_dirty(QFPREG(rd));
1995 }
1996
1997 /* asi moves */
1998 #ifdef TARGET_SPARC64
1999 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
2000 {
2001 int asi;
2002 TCGv_i32 r_asi;
2003
2004 if (IS_IMM) {
2005 r_asi = tcg_temp_new_i32();
2006 tcg_gen_mov_i32(r_asi, cpu_asi);
2007 } else {
2008 asi = GET_FIELD(insn, 19, 26);
2009 r_asi = tcg_const_i32(asi);
2010 }
2011 return r_asi;
2012 }
2013
2014 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2015 int sign)
2016 {
2017 TCGv_i32 r_asi, r_size, r_sign;
2018
2019 r_asi = gen_get_asi(insn, addr);
2020 r_size = tcg_const_i32(size);
2021 r_sign = tcg_const_i32(sign);
2022 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
2023 tcg_temp_free_i32(r_sign);
2024 tcg_temp_free_i32(r_size);
2025 tcg_temp_free_i32(r_asi);
2026 }
2027
2028 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2029 {
2030 TCGv_i32 r_asi, r_size;
2031
2032 r_asi = gen_get_asi(insn, addr);
2033 r_size = tcg_const_i32(size);
2034 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2035 tcg_temp_free_i32(r_size);
2036 tcg_temp_free_i32(r_asi);
2037 }
2038
2039 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2040 {
2041 TCGv_i32 r_asi, r_size, r_rd;
2042
2043 r_asi = gen_get_asi(insn, addr);
2044 r_size = tcg_const_i32(size);
2045 r_rd = tcg_const_i32(rd);
2046 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2047 tcg_temp_free_i32(r_rd);
2048 tcg_temp_free_i32(r_size);
2049 tcg_temp_free_i32(r_asi);
2050 }
2051
2052 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2053 {
2054 TCGv_i32 r_asi, r_size, r_rd;
2055
2056 r_asi = gen_get_asi(insn, addr);
2057 r_size = tcg_const_i32(size);
2058 r_rd = tcg_const_i32(rd);
2059 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2060 tcg_temp_free_i32(r_rd);
2061 tcg_temp_free_i32(r_size);
2062 tcg_temp_free_i32(r_asi);
2063 }
2064
2065 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2066 {
2067 TCGv_i32 r_asi, r_size, r_sign;
2068 TCGv_i64 t64 = tcg_temp_new_i64();
2069
2070 r_asi = gen_get_asi(insn, addr);
2071 r_size = tcg_const_i32(4);
2072 r_sign = tcg_const_i32(0);
2073 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2074 tcg_temp_free_i32(r_sign);
2075 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2076 tcg_temp_free_i32(r_size);
2077 tcg_temp_free_i32(r_asi);
2078 tcg_gen_trunc_i64_tl(dst, t64);
2079 tcg_temp_free_i64(t64);
2080 }
2081
2082 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2083 int insn, int rd)
2084 {
2085 TCGv_i32 r_asi, r_rd;
2086
2087 r_asi = gen_get_asi(insn, addr);
2088 r_rd = tcg_const_i32(rd);
2089 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2090 tcg_temp_free_i32(r_rd);
2091 tcg_temp_free_i32(r_asi);
2092 }
2093
2094 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2095 int insn, int rd)
2096 {
2097 TCGv_i32 r_asi, r_size;
2098 TCGv lo = gen_load_gpr(dc, rd + 1);
2099 TCGv_i64 t64 = tcg_temp_new_i64();
2100
2101 tcg_gen_concat_tl_i64(t64, lo, hi);
2102 r_asi = gen_get_asi(insn, addr);
2103 r_size = tcg_const_i32(8);
2104 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2105 tcg_temp_free_i32(r_size);
2106 tcg_temp_free_i32(r_asi);
2107 tcg_temp_free_i64(t64);
2108 }
2109
2110 static inline void gen_casx_asi(DisasContext *dc, TCGv addr,
2111 TCGv val2, int insn, int rd)
2112 {
2113 TCGv val1 = gen_load_gpr(dc, rd);
2114 TCGv dst = gen_dest_gpr(dc, rd);
2115 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2116
2117 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2118 tcg_temp_free_i32(r_asi);
2119 gen_store_gpr(dc, rd, dst);
2120 }
2121
2122 #elif !defined(CONFIG_USER_ONLY)
2123
2124 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2125 int sign)
2126 {
2127 TCGv_i32 r_asi, r_size, r_sign;
2128 TCGv_i64 t64 = tcg_temp_new_i64();
2129
2130 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2131 r_size = tcg_const_i32(size);
2132 r_sign = tcg_const_i32(sign);
2133 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2134 tcg_temp_free_i32(r_sign);
2135 tcg_temp_free_i32(r_size);
2136 tcg_temp_free_i32(r_asi);
2137 tcg_gen_trunc_i64_tl(dst, t64);
2138 tcg_temp_free_i64(t64);
2139 }
2140
2141 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2142 {
2143 TCGv_i32 r_asi, r_size;
2144 TCGv_i64 t64 = tcg_temp_new_i64();
2145
2146 tcg_gen_extu_tl_i64(t64, src);
2147 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2148 r_size = tcg_const_i32(size);
2149 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2150 tcg_temp_free_i32(r_size);
2151 tcg_temp_free_i32(r_asi);
2152 tcg_temp_free_i64(t64);
2153 }
2154
2155 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2156 {
2157 TCGv_i32 r_asi, r_size, r_sign;
2158 TCGv_i64 r_val, t64;
2159
2160 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2161 r_size = tcg_const_i32(4);
2162 r_sign = tcg_const_i32(0);
2163 t64 = tcg_temp_new_i64();
2164 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2165 tcg_temp_free(r_sign);
2166 r_val = tcg_temp_new_i64();
2167 tcg_gen_extu_tl_i64(r_val, src);
2168 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2169 tcg_temp_free_i64(r_val);
2170 tcg_temp_free_i32(r_size);
2171 tcg_temp_free_i32(r_asi);
2172 tcg_gen_trunc_i64_tl(dst, t64);
2173 tcg_temp_free_i64(t64);
2174 }
2175
2176 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2177 int insn, int rd)
2178 {
2179 TCGv_i32 r_asi, r_size, r_sign;
2180 TCGv t;
2181 TCGv_i64 t64;
2182
2183 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2184 r_size = tcg_const_i32(8);
2185 r_sign = tcg_const_i32(0);
2186 t64 = tcg_temp_new_i64();
2187 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2188 tcg_temp_free_i32(r_sign);
2189 tcg_temp_free_i32(r_size);
2190 tcg_temp_free_i32(r_asi);
2191
2192 t = gen_dest_gpr(dc, rd + 1);
2193 tcg_gen_trunc_i64_tl(t, t64);
2194 gen_store_gpr(dc, rd + 1, t);
2195
2196 tcg_gen_shri_i64(t64, t64, 32);
2197 tcg_gen_trunc_i64_tl(hi, t64);
2198 tcg_temp_free_i64(t64);
2199 gen_store_gpr(dc, rd, hi);
2200 }
2201
2202 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2203 int insn, int rd)
2204 {
2205 TCGv_i32 r_asi, r_size;
2206 TCGv lo = gen_load_gpr(dc, rd + 1);
2207 TCGv_i64 t64 = tcg_temp_new_i64();
2208
2209 tcg_gen_concat_tl_i64(t64, lo, hi);
2210 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2211 r_size = tcg_const_i32(8);
2212 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2213 tcg_temp_free_i32(r_size);
2214 tcg_temp_free_i32(r_asi);
2215 tcg_temp_free_i64(t64);
2216 }
2217 #endif
2218
2219 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2220 static inline void gen_cas_asi(DisasContext *dc, TCGv addr,
2221 TCGv val2, int insn, int rd)
2222 {
2223 TCGv val1 = gen_load_gpr(dc, rd);
2224 TCGv dst = gen_dest_gpr(dc, rd);
2225 #ifdef TARGET_SPARC64
2226 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2227 #else
2228 TCGv_i32 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2229 #endif
2230
2231 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2232 tcg_temp_free_i32(r_asi);
2233 gen_store_gpr(dc, rd, dst);
2234 }
2235
2236 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2237 {
2238 TCGv_i64 r_val;
2239 TCGv_i32 r_asi, r_size;
2240
2241 gen_ld_asi(dst, addr, insn, 1, 0);
2242
2243 r_val = tcg_const_i64(0xffULL);
2244 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2245 r_size = tcg_const_i32(1);
2246 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2247 tcg_temp_free_i32(r_size);
2248 tcg_temp_free_i32(r_asi);
2249 tcg_temp_free_i64(r_val);
2250 }
2251 #endif
2252
2253 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2254 {
2255 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2256 return gen_load_gpr(dc, rs1);
2257 }
2258
2259 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2260 {
2261 if (IS_IMM) { /* immediate */
2262 target_long simm = GET_FIELDs(insn, 19, 31);
2263 TCGv t = get_temp_tl(dc);
2264 tcg_gen_movi_tl(t, simm);
2265 return t;
2266 } else { /* register */
2267 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2268 return gen_load_gpr(dc, rs2);
2269 }
2270 }
2271
2272 #ifdef TARGET_SPARC64
2273 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2274 {
2275 TCGv_i32 c32, zero, dst, s1, s2;
2276
2277 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2278 or fold the comparison down to 32 bits and use movcond_i32. Choose
2279 the later. */
2280 c32 = tcg_temp_new_i32();
2281 if (cmp->is_bool) {
2282 tcg_gen_trunc_i64_i32(c32, cmp->c1);
2283 } else {
2284 TCGv_i64 c64 = tcg_temp_new_i64();
2285 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2286 tcg_gen_trunc_i64_i32(c32, c64);
2287 tcg_temp_free_i64(c64);
2288 }
2289
2290 s1 = gen_load_fpr_F(dc, rs);
2291 s2 = gen_load_fpr_F(dc, rd);
2292 dst = gen_dest_fpr_F(dc);
2293 zero = tcg_const_i32(0);
2294
2295 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2296
2297 tcg_temp_free_i32(c32);
2298 tcg_temp_free_i32(zero);
2299 gen_store_fpr_F(dc, rd, dst);
2300 }
2301
2302 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2303 {
2304 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2305 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2306 gen_load_fpr_D(dc, rs),
2307 gen_load_fpr_D(dc, rd));
2308 gen_store_fpr_D(dc, rd, dst);
2309 }
2310
2311 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2312 {
2313 int qd = QFPREG(rd);
2314 int qs = QFPREG(rs);
2315
2316 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2317 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2318 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2319 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2320
2321 gen_update_fprs_dirty(qd);
2322 }
2323
2324 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2325 {
2326 TCGv_i32 r_tl = tcg_temp_new_i32();
2327
2328 /* load env->tl into r_tl */
2329 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2330
2331 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2332 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2333
2334 /* calculate offset to current trap state from env->ts, reuse r_tl */
2335 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2336 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2337
2338 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2339 {
2340 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2341 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2342 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2343 tcg_temp_free_ptr(r_tl_tmp);
2344 }
2345
2346 tcg_temp_free_i32(r_tl);
2347 }
2348
2349 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2350 int width, bool cc, bool left)
2351 {
2352 TCGv lo1, lo2, t1, t2;
2353 uint64_t amask, tabl, tabr;
2354 int shift, imask, omask;
2355
2356 if (cc) {
2357 tcg_gen_mov_tl(cpu_cc_src, s1);
2358 tcg_gen_mov_tl(cpu_cc_src2, s2);
2359 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2360 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2361 dc->cc_op = CC_OP_SUB;
2362 }
2363
2364 /* Theory of operation: there are two tables, left and right (not to
2365 be confused with the left and right versions of the opcode). These
2366 are indexed by the low 3 bits of the inputs. To make things "easy",
2367 these tables are loaded into two constants, TABL and TABR below.
2368 The operation index = (input & imask) << shift calculates the index
2369 into the constant, while val = (table >> index) & omask calculates
2370 the value we're looking for. */
2371 switch (width) {
2372 case 8:
2373 imask = 0x7;
2374 shift = 3;
2375 omask = 0xff;
2376 if (left) {
2377 tabl = 0x80c0e0f0f8fcfeffULL;
2378 tabr = 0xff7f3f1f0f070301ULL;
2379 } else {
2380 tabl = 0x0103070f1f3f7fffULL;
2381 tabr = 0xfffefcf8f0e0c080ULL;
2382 }
2383 break;
2384 case 16:
2385 imask = 0x6;
2386 shift = 1;
2387 omask = 0xf;
2388 if (left) {
2389 tabl = 0x8cef;
2390 tabr = 0xf731;
2391 } else {
2392 tabl = 0x137f;
2393 tabr = 0xfec8;
2394 }
2395 break;
2396 case 32:
2397 imask = 0x4;
2398 shift = 0;
2399 omask = 0x3;
2400 if (left) {
2401 tabl = (2 << 2) | 3;
2402 tabr = (3 << 2) | 1;
2403 } else {
2404 tabl = (1 << 2) | 3;
2405 tabr = (3 << 2) | 2;
2406 }
2407 break;
2408 default:
2409 abort();
2410 }
2411
2412 lo1 = tcg_temp_new();
2413 lo2 = tcg_temp_new();
2414 tcg_gen_andi_tl(lo1, s1, imask);
2415 tcg_gen_andi_tl(lo2, s2, imask);
2416 tcg_gen_shli_tl(lo1, lo1, shift);
2417 tcg_gen_shli_tl(lo2, lo2, shift);
2418
2419 t1 = tcg_const_tl(tabl);
2420 t2 = tcg_const_tl(tabr);
2421 tcg_gen_shr_tl(lo1, t1, lo1);
2422 tcg_gen_shr_tl(lo2, t2, lo2);
2423 tcg_gen_andi_tl(dst, lo1, omask);
2424 tcg_gen_andi_tl(lo2, lo2, omask);
2425
2426 amask = -8;
2427 if (AM_CHECK(dc)) {
2428 amask &= 0xffffffffULL;
2429 }
2430 tcg_gen_andi_tl(s1, s1, amask);
2431 tcg_gen_andi_tl(s2, s2, amask);
2432
2433 /* We want to compute
2434 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2435 We've already done dst = lo1, so this reduces to
2436 dst &= (s1 == s2 ? -1 : lo2)
2437 Which we perform by
2438 lo2 |= -(s1 == s2)
2439 dst &= lo2
2440 */
2441 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2442 tcg_gen_neg_tl(t1, t1);
2443 tcg_gen_or_tl(lo2, lo2, t1);
2444 tcg_gen_and_tl(dst, dst, lo2);
2445
2446 tcg_temp_free(lo1);
2447 tcg_temp_free(lo2);
2448 tcg_temp_free(t1);
2449 tcg_temp_free(t2);
2450 }
2451
2452 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2453 {
2454 TCGv tmp = tcg_temp_new();
2455
2456 tcg_gen_add_tl(tmp, s1, s2);
2457 tcg_gen_andi_tl(dst, tmp, -8);
2458 if (left) {
2459 tcg_gen_neg_tl(tmp, tmp);
2460 }
2461 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2462
2463 tcg_temp_free(tmp);
2464 }
2465
2466 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2467 {
2468 TCGv t1, t2, shift;
2469
2470 t1 = tcg_temp_new();
2471 t2 = tcg_temp_new();
2472 shift = tcg_temp_new();
2473
2474 tcg_gen_andi_tl(shift, gsr, 7);
2475 tcg_gen_shli_tl(shift, shift, 3);
2476 tcg_gen_shl_tl(t1, s1, shift);
2477
2478 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2479 shift of (up to 63) followed by a constant shift of 1. */
2480 tcg_gen_xori_tl(shift, shift, 63);
2481 tcg_gen_shr_tl(t2, s2, shift);
2482 tcg_gen_shri_tl(t2, t2, 1);
2483
2484 tcg_gen_or_tl(dst, t1, t2);
2485
2486 tcg_temp_free(t1);
2487 tcg_temp_free(t2);
2488 tcg_temp_free(shift);
2489 }
2490 #endif
2491
2492 #define CHECK_IU_FEATURE(dc, FEATURE) \
2493 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2494 goto illegal_insn;
2495 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2496 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2497 goto nfpu_insn;
2498
2499 /* before an instruction, dc->pc must be static */
2500 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2501 {
2502 unsigned int opc, rs1, rs2, rd;
2503 TCGv cpu_src1, cpu_src2;
2504 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2505 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2506 target_long simm;
2507
2508 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2509 tcg_gen_debug_insn_start(dc->pc);
2510 }
2511
2512 opc = GET_FIELD(insn, 0, 1);
2513 rd = GET_FIELD(insn, 2, 6);
2514
2515 switch (opc) {
2516 case 0: /* branches/sethi */
2517 {
2518 unsigned int xop = GET_FIELD(insn, 7, 9);
2519 int32_t target;
2520 switch (xop) {
2521 #ifdef TARGET_SPARC64
2522 case 0x1: /* V9 BPcc */
2523 {
2524 int cc;
2525
2526 target = GET_FIELD_SP(insn, 0, 18);
2527 target = sign_extend(target, 19);
2528 target <<= 2;
2529 cc = GET_FIELD_SP(insn, 20, 21);
2530 if (cc == 0)
2531 do_branch(dc, target, insn, 0);
2532 else if (cc == 2)
2533 do_branch(dc, target, insn, 1);
2534 else
2535 goto illegal_insn;
2536 goto jmp_insn;
2537 }
2538 case 0x3: /* V9 BPr */
2539 {
2540 target = GET_FIELD_SP(insn, 0, 13) |
2541 (GET_FIELD_SP(insn, 20, 21) << 14);
2542 target = sign_extend(target, 16);
2543 target <<= 2;
2544 cpu_src1 = get_src1(dc, insn);
2545 do_branch_reg(dc, target, insn, cpu_src1);
2546 goto jmp_insn;
2547 }
2548 case 0x5: /* V9 FBPcc */
2549 {
2550 int cc = GET_FIELD_SP(insn, 20, 21);
2551 if (gen_trap_ifnofpu(dc)) {
2552 goto jmp_insn;
2553 }
2554 target = GET_FIELD_SP(insn, 0, 18);
2555 target = sign_extend(target, 19);
2556 target <<= 2;
2557 do_fbranch(dc, target, insn, cc);
2558 goto jmp_insn;
2559 }
2560 #else
2561 case 0x7: /* CBN+x */
2562 {
2563 goto ncp_insn;
2564 }
2565 #endif
2566 case 0x2: /* BN+x */
2567 {
2568 target = GET_FIELD(insn, 10, 31);
2569 target = sign_extend(target, 22);
2570 target <<= 2;
2571 do_branch(dc, target, insn, 0);
2572 goto jmp_insn;
2573 }
2574 case 0x6: /* FBN+x */
2575 {
2576 if (gen_trap_ifnofpu(dc)) {
2577 goto jmp_insn;
2578 }
2579 target = GET_FIELD(insn, 10, 31);
2580 target = sign_extend(target, 22);
2581 target <<= 2;
2582 do_fbranch(dc, target, insn, 0);
2583 goto jmp_insn;
2584 }
2585 case 0x4: /* SETHI */
2586 /* Special-case %g0 because that's the canonical nop. */
2587 if (rd) {
2588 uint32_t value = GET_FIELD(insn, 10, 31);
2589 TCGv t = gen_dest_gpr(dc, rd);
2590 tcg_gen_movi_tl(t, value << 10);
2591 gen_store_gpr(dc, rd, t);
2592 }
2593 break;
2594 case 0x0: /* UNIMPL */
2595 default:
2596 goto illegal_insn;
2597 }
2598 break;
2599 }
2600 break;
2601 case 1: /*CALL*/
2602 {
2603 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2604 TCGv o7 = gen_dest_gpr(dc, 15);
2605
2606 tcg_gen_movi_tl(o7, dc->pc);
2607 gen_store_gpr(dc, 15, o7);
2608 target += dc->pc;
2609 gen_mov_pc_npc(dc);
2610 #ifdef TARGET_SPARC64
2611 if (unlikely(AM_CHECK(dc))) {
2612 target &= 0xffffffffULL;
2613 }
2614 #endif
2615 dc->npc = target;
2616 }
2617 goto jmp_insn;
2618 case 2: /* FPU & Logical Operations */
2619 {
2620 unsigned int xop = GET_FIELD(insn, 7, 12);
2621 TCGv cpu_dst = get_temp_tl(dc);
2622 TCGv cpu_tmp0;
2623
2624 if (xop == 0x3a) { /* generate trap */
2625 int cond = GET_FIELD(insn, 3, 6);
2626 TCGv_i32 trap;
2627 int l1 = -1, mask;
2628
2629 if (cond == 0) {
2630 /* Trap never. */
2631 break;
2632 }
2633
2634 save_state(dc);
2635
2636 if (cond != 8) {
2637 /* Conditional trap. */
2638 DisasCompare cmp;
2639 #ifdef TARGET_SPARC64
2640 /* V9 icc/xcc */
2641 int cc = GET_FIELD_SP(insn, 11, 12);
2642 if (cc == 0) {
2643 gen_compare(&cmp, 0, cond, dc);
2644 } else if (cc == 2) {
2645 gen_compare(&cmp, 1, cond, dc);
2646 } else {
2647 goto illegal_insn;
2648 }
2649 #else
2650 gen_compare(&cmp, 0, cond, dc);
2651 #endif
2652 l1 = gen_new_label();
2653 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2654 cmp.c1, cmp.c2, l1);
2655 free_compare(&cmp);
2656 }
2657
2658 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2659 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2660
2661 /* Don't use the normal temporaries, as they may well have
2662 gone out of scope with the branch above. While we're
2663 doing that we might as well pre-truncate to 32-bit. */
2664 trap = tcg_temp_new_i32();
2665
2666 rs1 = GET_FIELD_SP(insn, 14, 18);
2667 if (IS_IMM) {
2668 rs2 = GET_FIELD_SP(insn, 0, 6);
2669 if (rs1 == 0) {
2670 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2671 /* Signal that the trap value is fully constant. */
2672 mask = 0;
2673 } else {
2674 TCGv t1 = gen_load_gpr(dc, rs1);
2675 tcg_gen_trunc_tl_i32(trap, t1);
2676 tcg_gen_addi_i32(trap, trap, rs2);
2677 }
2678 } else {
2679 TCGv t1, t2;
2680 rs2 = GET_FIELD_SP(insn, 0, 4);
2681 t1 = gen_load_gpr(dc, rs1);
2682 t2 = gen_load_gpr(dc, rs2);
2683 tcg_gen_add_tl(t1, t1, t2);
2684 tcg_gen_trunc_tl_i32(trap, t1);
2685 }
2686 if (mask != 0) {
2687 tcg_gen_andi_i32(trap, trap, mask);
2688 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2689 }
2690
2691 gen_helper_raise_exception(cpu_env, trap);
2692 tcg_temp_free_i32(trap);
2693
2694 if (cond == 8) {
2695 /* An unconditional trap ends the TB. */
2696 dc->is_br = 1;
2697 goto jmp_insn;
2698 } else {
2699 /* A conditional trap falls through to the next insn. */
2700 gen_set_label(l1);
2701 break;
2702 }
2703 } else if (xop == 0x28) {
2704 rs1 = GET_FIELD(insn, 13, 17);
2705 switch(rs1) {
2706 case 0: /* rdy */
2707 #ifndef TARGET_SPARC64
2708 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2709 manual, rdy on the microSPARC
2710 II */
2711 case 0x0f: /* stbar in the SPARCv8 manual,
2712 rdy on the microSPARC II */
2713 case 0x10 ... 0x1f: /* implementation-dependent in the
2714 SPARCv8 manual, rdy on the
2715 microSPARC II */
2716 /* Read Asr17 */
2717 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2718 TCGv t = gen_dest_gpr(dc, rd);
2719 /* Read Asr17 for a Leon3 monoprocessor */
2720 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
2721 gen_store_gpr(dc, rd, t);
2722 break;
2723 }
2724 #endif
2725 gen_store_gpr(dc, rd, cpu_y);
2726 break;
2727 #ifdef TARGET_SPARC64
2728 case 0x2: /* V9 rdccr */
2729 update_psr(dc);
2730 gen_helper_rdccr(cpu_dst, cpu_env);
2731 gen_store_gpr(dc, rd, cpu_dst);
2732 break;
2733 case 0x3: /* V9 rdasi */
2734 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2735 gen_store_gpr(dc, rd, cpu_dst);
2736 break;
2737 case 0x4: /* V9 rdtick */
2738 {
2739 TCGv_ptr r_tickptr;
2740
2741 r_tickptr = tcg_temp_new_ptr();
2742 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2743 offsetof(CPUSPARCState, tick));
2744 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2745 tcg_temp_free_ptr(r_tickptr);
2746 gen_store_gpr(dc, rd, cpu_dst);
2747 }
2748 break;
2749 case 0x5: /* V9 rdpc */
2750 {
2751 TCGv t = gen_dest_gpr(dc, rd);
2752 if (unlikely(AM_CHECK(dc))) {
2753 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
2754 } else {
2755 tcg_gen_movi_tl(t, dc->pc);
2756 }
2757 gen_store_gpr(dc, rd, t);
2758 }
2759 break;
2760 case 0x6: /* V9 rdfprs */
2761 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2762 gen_store_gpr(dc, rd, cpu_dst);
2763 break;
2764 case 0xf: /* V9 membar */
2765 break; /* no effect */
2766 case 0x13: /* Graphics Status */
2767 if (gen_trap_ifnofpu(dc)) {
2768 goto jmp_insn;
2769 }
2770 gen_store_gpr(dc, rd, cpu_gsr);
2771 break;
2772 case 0x16: /* Softint */
2773 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2774 gen_store_gpr(dc, rd, cpu_dst);
2775 break;
2776 case 0x17: /* Tick compare */
2777 gen_store_gpr(dc, rd, cpu_tick_cmpr);
2778 break;
2779 case 0x18: /* System tick */
2780 {
2781 TCGv_ptr r_tickptr;
2782
2783 r_tickptr = tcg_temp_new_ptr();
2784 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2785 offsetof(CPUSPARCState, stick));
2786 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2787 tcg_temp_free_ptr(r_tickptr);
2788 gen_store_gpr(dc, rd, cpu_dst);
2789 }
2790 break;
2791 case 0x19: /* System tick compare */
2792 gen_store_gpr(dc, rd, cpu_stick_cmpr);
2793 break;
2794 case 0x10: /* Performance Control */
2795 case 0x11: /* Performance Instrumentation Counter */
2796 case 0x12: /* Dispatch Control */
2797 case 0x14: /* Softint set, WO */
2798 case 0x15: /* Softint clear, WO */
2799 #endif
2800 default:
2801 goto illegal_insn;
2802 }
2803 #if !defined(CONFIG_USER_ONLY)
2804 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2805 #ifndef TARGET_SPARC64
2806 if (!supervisor(dc)) {
2807 goto priv_insn;
2808 }
2809 update_psr(dc);
2810 gen_helper_rdpsr(cpu_dst, cpu_env);
2811 #else
2812 CHECK_IU_FEATURE(dc, HYPV);
2813 if (!hypervisor(dc))
2814 goto priv_insn;
2815 rs1 = GET_FIELD(insn, 13, 17);
2816 switch (rs1) {
2817 case 0: // hpstate
2818 // gen_op_rdhpstate();
2819 break;
2820 case 1: // htstate
2821 // gen_op_rdhtstate();
2822 break;
2823 case 3: // hintp
2824 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2825 break;
2826 case 5: // htba
2827 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2828 break;
2829 case 6: // hver
2830 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2831 break;
2832 case 31: // hstick_cmpr
2833 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2834 break;
2835 default:
2836 goto illegal_insn;
2837 }
2838 #endif
2839 gen_store_gpr(dc, rd, cpu_dst);
2840 break;
2841 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2842 if (!supervisor(dc)) {
2843 goto priv_insn;
2844 }
2845 cpu_tmp0 = get_temp_tl(dc);
2846 #ifdef TARGET_SPARC64
2847 rs1 = GET_FIELD(insn, 13, 17);
2848 switch (rs1) {
2849 case 0: // tpc
2850 {
2851 TCGv_ptr r_tsptr;
2852
2853 r_tsptr = tcg_temp_new_ptr();
2854 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2855 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2856 offsetof(trap_state, tpc));
2857 tcg_temp_free_ptr(r_tsptr);
2858 }
2859 break;
2860 case 1: // tnpc
2861 {
2862 TCGv_ptr r_tsptr;
2863
2864 r_tsptr = tcg_temp_new_ptr();
2865 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2866 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2867 offsetof(trap_state, tnpc));
2868 tcg_temp_free_ptr(r_tsptr);
2869 }
2870 break;
2871 case 2: // tstate
2872 {
2873 TCGv_ptr r_tsptr;
2874
2875 r_tsptr = tcg_temp_new_ptr();
2876 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2877 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2878 offsetof(trap_state, tstate));
2879 tcg_temp_free_ptr(r_tsptr);
2880 }
2881 break;
2882 case 3: // tt
2883 {
2884 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2885
2886 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2887 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
2888 offsetof(trap_state, tt));
2889 tcg_temp_free_ptr(r_tsptr);
2890 }
2891 break;
2892 case 4: // tick
2893 {
2894 TCGv_ptr r_tickptr;
2895
2896 r_tickptr = tcg_temp_new_ptr();
2897 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2898 offsetof(CPUSPARCState, tick));
2899 gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
2900 tcg_temp_free_ptr(r_tickptr);
2901 }
2902 break;
2903 case 5: // tba
2904 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2905 break;
2906 case 6: // pstate
2907 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2908 offsetof(CPUSPARCState, pstate));
2909 break;
2910 case 7: // tl
2911 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2912 offsetof(CPUSPARCState, tl));
2913 break;
2914 case 8: // pil
2915 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2916 offsetof(CPUSPARCState, psrpil));
2917 break;
2918 case 9: // cwp
2919 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2920 break;
2921 case 10: // cansave
2922 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2923 offsetof(CPUSPARCState, cansave));
2924 break;
2925 case 11: // canrestore
2926 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2927 offsetof(CPUSPARCState, canrestore));
2928 break;
2929 case 12: // cleanwin
2930 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2931 offsetof(CPUSPARCState, cleanwin));
2932 break;
2933 case 13: // otherwin
2934 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2935 offsetof(CPUSPARCState, otherwin));
2936 break;
2937 case 14: // wstate
2938 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2939 offsetof(CPUSPARCState, wstate));
2940 break;
2941 case 16: // UA2005 gl
2942 CHECK_IU_FEATURE(dc, GL);
2943 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2944 offsetof(CPUSPARCState, gl));
2945 break;
2946 case 26: // UA2005 strand status
2947 CHECK_IU_FEATURE(dc, HYPV);
2948 if (!hypervisor(dc))
2949 goto priv_insn;
2950 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2951 break;
2952 case 31: // ver
2953 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2954 break;
2955 case 15: // fq
2956 default:
2957 goto illegal_insn;
2958 }
2959 #else
2960 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2961 #endif
2962 gen_store_gpr(dc, rd, cpu_tmp0);
2963 break;
2964 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2965 #ifdef TARGET_SPARC64
2966 save_state(dc);
2967 gen_helper_flushw(cpu_env);
2968 #else
2969 if (!supervisor(dc))
2970 goto priv_insn;
2971 gen_store_gpr(dc, rd, cpu_tbr);
2972 #endif
2973 break;
2974 #endif
2975 } else if (xop == 0x34) { /* FPU Operations */
2976 if (gen_trap_ifnofpu(dc)) {
2977 goto jmp_insn;
2978 }
2979 gen_op_clear_ieee_excp_and_FTT();
2980 rs1 = GET_FIELD(insn, 13, 17);
2981 rs2 = GET_FIELD(insn, 27, 31);
2982 xop = GET_FIELD(insn, 18, 26);
2983 save_state(dc);
2984 switch (xop) {
2985 case 0x1: /* fmovs */
2986 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2987 gen_store_fpr_F(dc, rd, cpu_src1_32);
2988 break;
2989 case 0x5: /* fnegs */
2990 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2991 break;
2992 case 0x9: /* fabss */
2993 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2994 break;
2995 case 0x29: /* fsqrts */
2996 CHECK_FPU_FEATURE(dc, FSQRT);
2997 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2998 break;
2999 case 0x2a: /* fsqrtd */
3000 CHECK_FPU_FEATURE(dc, FSQRT);
3001 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3002 break;
3003 case 0x2b: /* fsqrtq */
3004 CHECK_FPU_FEATURE(dc, FLOAT128);
3005 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3006 break;
3007 case 0x41: /* fadds */
3008 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3009 break;
3010 case 0x42: /* faddd */
3011 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3012 break;
3013 case 0x43: /* faddq */
3014 CHECK_FPU_FEATURE(dc, FLOAT128);
3015 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3016 break;
3017 case 0x45: /* fsubs */
3018 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3019 break;
3020 case 0x46: /* fsubd */
3021 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3022 break;
3023 case 0x47: /* fsubq */
3024 CHECK_FPU_FEATURE(dc, FLOAT128);
3025 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3026 break;
3027 case 0x49: /* fmuls */
3028 CHECK_FPU_FEATURE(dc, FMUL);
3029 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3030 break;
3031 case 0x4a: /* fmuld */
3032 CHECK_FPU_FEATURE(dc, FMUL);
3033 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3034 break;
3035 case 0x4b: /* fmulq */
3036 CHECK_FPU_FEATURE(dc, FLOAT128);
3037 CHECK_FPU_FEATURE(dc, FMUL);
3038 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3039 break;
3040 case 0x4d: /* fdivs */
3041 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3042 break;
3043 case 0x4e: /* fdivd */
3044 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3045 break;
3046 case 0x4f: /* fdivq */
3047 CHECK_FPU_FEATURE(dc, FLOAT128);
3048 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3049 break;
3050 case 0x69: /* fsmuld */
3051 CHECK_FPU_FEATURE(dc, FSMULD);
3052 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3053 break;
3054 case 0x6e: /* fdmulq */
3055 CHECK_FPU_FEATURE(dc, FLOAT128);
3056 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3057 break;
3058 case 0xc4: /* fitos */
3059 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3060 break;
3061 case 0xc6: /* fdtos */
3062 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3063 break;
3064 case 0xc7: /* fqtos */
3065 CHECK_FPU_FEATURE(dc, FLOAT128);
3066 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3067 break;
3068 case 0xc8: /* fitod */
3069 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3070 break;
3071 case 0xc9: /* fstod */
3072 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3073 break;
3074 case 0xcb: /* fqtod */
3075 CHECK_FPU_FEATURE(dc, FLOAT128);
3076 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3077 break;
3078 case 0xcc: /* fitoq */
3079 CHECK_FPU_FEATURE(dc, FLOAT128);
3080 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3081 break;
3082 case 0xcd: /* fstoq */
3083 CHECK_FPU_FEATURE(dc, FLOAT128);
3084 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3085 break;
3086 case 0xce: /* fdtoq */
3087 CHECK_FPU_FEATURE(dc, FLOAT128);
3088 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3089 break;
3090 case 0xd1: /* fstoi */
3091 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3092 break;
3093 case 0xd2: /* fdtoi */
3094 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3095 break;
3096 case 0xd3: /* fqtoi */
3097 CHECK_FPU_FEATURE(dc, FLOAT128);
3098 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3099 break;
3100 #ifdef TARGET_SPARC64
3101 case 0x2: /* V9 fmovd */
3102 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3103 gen_store_fpr_D(dc, rd, cpu_src1_64);
3104 break;
3105 case 0x3: /* V9 fmovq */
3106 CHECK_FPU_FEATURE(dc, FLOAT128);
3107 gen_move_Q(rd, rs2);
3108 break;
3109 case 0x6: /* V9 fnegd */
3110 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3111 break;
3112 case 0x7: /* V9 fnegq */
3113 CHECK_FPU_FEATURE(dc, FLOAT128);
3114 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3115 break;
3116 case 0xa: /* V9 fabsd */
3117 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3118 break;
3119 case 0xb: /* V9 fabsq */
3120 CHECK_FPU_FEATURE(dc, FLOAT128);
3121 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3122 break;
3123 case 0x81: /* V9 fstox */
3124 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3125 break;
3126 case 0x82: /* V9 fdtox */
3127 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3128 break;
3129 case 0x83: /* V9 fqtox */
3130 CHECK_FPU_FEATURE(dc, FLOAT128);
3131 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3132 break;
3133 case 0x84: /* V9 fxtos */
3134 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3135 break;
3136 case 0x88: /* V9 fxtod */
3137 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3138 break;
3139 case 0x8c: /* V9 fxtoq */
3140 CHECK_FPU_FEATURE(dc, FLOAT128);
3141 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3142 break;
3143 #endif
3144 default:
3145 goto illegal_insn;
3146 }
3147 } else if (xop == 0x35) { /* FPU Operations */
3148 #ifdef TARGET_SPARC64
3149 int cond;
3150 #endif
3151 if (gen_trap_ifnofpu(dc)) {
3152 goto jmp_insn;
3153 }
3154 gen_op_clear_ieee_excp_and_FTT();
3155 rs1 = GET_FIELD(insn, 13, 17);
3156 rs2 = GET_FIELD(insn, 27, 31);
3157 xop = GET_FIELD(insn, 18, 26);
3158 save_state(dc);
3159
3160 #ifdef TARGET_SPARC64
3161 #define FMOVR(sz) \
3162 do { \
3163 DisasCompare cmp; \
3164 cond = GET_FIELD_SP(insn, 10, 12); \
3165 cpu_src1 = get_src1(dc, insn); \
3166 gen_compare_reg(&cmp, cond, cpu_src1); \
3167 gen_fmov##sz(dc, &cmp, rd, rs2); \
3168 free_compare(&cmp); \
3169 } while (0)
3170
3171 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3172 FMOVR(s);
3173 break;
3174 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3175 FMOVR(d);
3176 break;
3177 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3178 CHECK_FPU_FEATURE(dc, FLOAT128);
3179 FMOVR(q);
3180 break;
3181 }
3182 #undef FMOVR
3183 #endif
3184 switch (xop) {
3185 #ifdef TARGET_SPARC64
3186 #define FMOVCC(fcc, sz) \
3187 do { \
3188 DisasCompare cmp; \
3189 cond = GET_FIELD_SP(insn, 14, 17); \
3190 gen_fcompare(&cmp, fcc, cond); \
3191 gen_fmov##sz(dc, &cmp, rd, rs2); \
3192 free_compare(&cmp); \
3193 } while (0)
3194
3195 case 0x001: /* V9 fmovscc %fcc0 */
3196 FMOVCC(0, s);
3197 break;
3198 case 0x002: /* V9 fmovdcc %fcc0 */
3199 FMOVCC(0, d);
3200 break;
3201 case 0x003: /* V9 fmovqcc %fcc0 */
3202 CHECK_FPU_FEATURE(dc, FLOAT128);
3203 FMOVCC(0, q);
3204 break;
3205 case 0x041: /* V9 fmovscc %fcc1 */
3206 FMOVCC(1, s);
3207 break;
3208 case 0x042: /* V9 fmovdcc %fcc1 */
3209 FMOVCC(1, d);
3210 break;
3211 case 0x043: /* V9 fmovqcc %fcc1 */
3212 CHECK_FPU_FEATURE(dc, FLOAT128);
3213 FMOVCC(1, q);
3214 break;
3215 case 0x081: /* V9 fmovscc %fcc2 */
3216 FMOVCC(2, s);
3217 break;
3218 case 0x082: /* V9 fmovdcc %fcc2 */
3219 FMOVCC(2, d);
3220 break;
3221 case 0x083: /* V9 fmovqcc %fcc2 */
3222 CHECK_FPU_FEATURE(dc, FLOAT128);
3223 FMOVCC(2, q);
3224 break;
3225 case 0x0c1: /* V9 fmovscc %fcc3 */
3226 FMOVCC(3, s);
3227 break;
3228 case 0x0c2: /* V9 fmovdcc %fcc3 */
3229 FMOVCC(3, d);
3230 break;
3231 case 0x0c3: /* V9 fmovqcc %fcc3 */
3232 CHECK_FPU_FEATURE(dc, FLOAT128);
3233 FMOVCC(3, q);
3234 break;
3235 #undef FMOVCC
3236 #define FMOVCC(xcc, sz) \
3237 do { \
3238 DisasCompare cmp; \
3239 cond = GET_FIELD_SP(insn, 14, 17); \