qdev: remove baked in notion of aliases (v2)
[qemu.git] / target-unicore32 / translate.c
1 /*
2 * UniCore32 translation
3 *
4 * Copyright (C) 2010-2011 GUAN Xue-tao
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <stdarg.h>
11 #include <stdlib.h>
12 #include <stdio.h>
13 #include <string.h>
14 #include <inttypes.h>
15
16 #include "cpu.h"
17 #include "disas.h"
18 #include "tcg-op.h"
19 #include "qemu-log.h"
20
21 #include "helper.h"
22 #define GEN_HELPER 1
23 #include "helper.h"
24
25 /* internal defines */
26 typedef struct DisasContext {
27 target_ulong pc;
28 int is_jmp;
29 /* Nonzero if this instruction has been conditionally skipped. */
30 int condjmp;
31 /* The label that will be jumped to when the instruction is skipped. */
32 int condlabel;
33 struct TranslationBlock *tb;
34 int singlestep_enabled;
35 } DisasContext;
36
37 #define IS_USER(s) 1
38
39 /* These instructions trap after executing, so defer them until after the
40 conditional executions state has been updated. */
41 #define DISAS_SYSCALL 5
42
43 static TCGv_ptr cpu_env;
44 static TCGv_i32 cpu_R[32];
45
46 /* FIXME: These should be removed. */
47 static TCGv cpu_F0s, cpu_F1s;
48 static TCGv_i64 cpu_F0d, cpu_F1d;
49
50 #include "gen-icount.h"
51
52 static const char *regnames[] = {
53 "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
54 "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
55 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
56 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
57
58 /* initialize TCG globals. */
59 void uc32_translate_init(void)
60 {
61 int i;
62
63 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
64
65 for (i = 0; i < 32; i++) {
66 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
67 offsetof(CPUState, regs[i]), regnames[i]);
68 }
69
70 #define GEN_HELPER 2
71 #include "helper.h"
72 }
73
74 static int num_temps;
75
76 /* Allocate a temporary variable. */
77 static TCGv_i32 new_tmp(void)
78 {
79 num_temps++;
80 return tcg_temp_new_i32();
81 }
82
83 /* Release a temporary variable. */
84 static void dead_tmp(TCGv tmp)
85 {
86 tcg_temp_free(tmp);
87 num_temps--;
88 }
89
90 static inline TCGv load_cpu_offset(int offset)
91 {
92 TCGv tmp = new_tmp();
93 tcg_gen_ld_i32(tmp, cpu_env, offset);
94 return tmp;
95 }
96
97 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
98
99 static inline void store_cpu_offset(TCGv var, int offset)
100 {
101 tcg_gen_st_i32(var, cpu_env, offset);
102 dead_tmp(var);
103 }
104
105 #define store_cpu_field(var, name) \
106 store_cpu_offset(var, offsetof(CPUState, name))
107
108 /* Set a variable to the value of a CPU register. */
109 static void load_reg_var(DisasContext *s, TCGv var, int reg)
110 {
111 if (reg == 31) {
112 uint32_t addr;
113 /* normaly, since we updated PC */
114 addr = (long)s->pc;
115 tcg_gen_movi_i32(var, addr);
116 } else {
117 tcg_gen_mov_i32(var, cpu_R[reg]);
118 }
119 }
120
121 /* Create a new temporary and set it to the value of a CPU register. */
122 static inline TCGv load_reg(DisasContext *s, int reg)
123 {
124 TCGv tmp = new_tmp();
125 load_reg_var(s, tmp, reg);
126 return tmp;
127 }
128
129 /* Set a CPU register. The source must be a temporary and will be
130 marked as dead. */
131 static void store_reg(DisasContext *s, int reg, TCGv var)
132 {
133 if (reg == 31) {
134 tcg_gen_andi_i32(var, var, ~3);
135 s->is_jmp = DISAS_JUMP;
136 }
137 tcg_gen_mov_i32(cpu_R[reg], var);
138 dead_tmp(var);
139 }
140
141 /* Value extensions. */
142 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
143 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
144 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
145 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
146
147 #define UCOP_REG_M (((insn) >> 0) & 0x1f)
148 #define UCOP_REG_N (((insn) >> 19) & 0x1f)
149 #define UCOP_REG_D (((insn) >> 14) & 0x1f)
150 #define UCOP_REG_S (((insn) >> 9) & 0x1f)
151 #define UCOP_REG_LO (((insn) >> 14) & 0x1f)
152 #define UCOP_REG_HI (((insn) >> 9) & 0x1f)
153 #define UCOP_SH_OP (((insn) >> 6) & 0x03)
154 #define UCOP_SH_IM (((insn) >> 9) & 0x1f)
155 #define UCOP_OPCODES (((insn) >> 25) & 0x0f)
156 #define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
157 #define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
158 #define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
159 #define UCOP_COND (((insn) >> 25) & 0x0f)
160 #define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
161 #define UCOP_CPNUM (((insn) >> 10) & 0x0f)
162 #define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
163 #define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
164 #define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
165
166 #define UCOP_SET(i) ((insn) & (1 << (i)))
167 #define UCOP_SET_P UCOP_SET(28)
168 #define UCOP_SET_U UCOP_SET(27)
169 #define UCOP_SET_B UCOP_SET(26)
170 #define UCOP_SET_W UCOP_SET(25)
171 #define UCOP_SET_L UCOP_SET(24)
172 #define UCOP_SET_S UCOP_SET(24)
173
174 #define ILLEGAL cpu_abort(env, \
175 "Illegal UniCore32 instruction %x at line %d!", \
176 insn, __LINE__)
177
178 static inline void gen_set_asr(TCGv var, uint32_t mask)
179 {
180 TCGv tmp_mask = tcg_const_i32(mask);
181 gen_helper_asr_write(var, tmp_mask);
182 tcg_temp_free_i32(tmp_mask);
183 }
184 /* Set NZCV flags from the high 4 bits of var. */
185 #define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
186
187 static void gen_exception(int excp)
188 {
189 TCGv tmp = new_tmp();
190 tcg_gen_movi_i32(tmp, excp);
191 gen_helper_exception(tmp);
192 dead_tmp(tmp);
193 }
194
195 /* FIXME: Most targets have native widening multiplication.
196 It would be good to use that instead of a full wide multiply. */
197 /* 32x32->64 multiply. Marks inputs as dead. */
198 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
199 {
200 TCGv_i64 tmp1 = tcg_temp_new_i64();
201 TCGv_i64 tmp2 = tcg_temp_new_i64();
202
203 tcg_gen_extu_i32_i64(tmp1, a);
204 dead_tmp(a);
205 tcg_gen_extu_i32_i64(tmp2, b);
206 dead_tmp(b);
207 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
208 tcg_temp_free_i64(tmp2);
209 return tmp1;
210 }
211
212 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
213 {
214 TCGv_i64 tmp1 = tcg_temp_new_i64();
215 TCGv_i64 tmp2 = tcg_temp_new_i64();
216
217 tcg_gen_ext_i32_i64(tmp1, a);
218 dead_tmp(a);
219 tcg_gen_ext_i32_i64(tmp2, b);
220 dead_tmp(b);
221 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
222 tcg_temp_free_i64(tmp2);
223 return tmp1;
224 }
225
226 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
227
228 /* Set CF to the top bit of var. */
229 static void gen_set_CF_bit31(TCGv var)
230 {
231 TCGv tmp = new_tmp();
232 tcg_gen_shri_i32(tmp, var, 31);
233 gen_set_CF(tmp);
234 dead_tmp(tmp);
235 }
236
237 /* Set N and Z flags from var. */
238 static inline void gen_logic_CC(TCGv var)
239 {
240 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
241 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
242 }
243
244 /* dest = T0 + T1 + CF. */
245 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
246 {
247 TCGv tmp;
248 tcg_gen_add_i32(dest, t0, t1);
249 tmp = load_cpu_field(CF);
250 tcg_gen_add_i32(dest, dest, tmp);
251 dead_tmp(tmp);
252 }
253
254 /* dest = T0 - T1 + CF - 1. */
255 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
256 {
257 TCGv tmp;
258 tcg_gen_sub_i32(dest, t0, t1);
259 tmp = load_cpu_field(CF);
260 tcg_gen_add_i32(dest, dest, tmp);
261 tcg_gen_subi_i32(dest, dest, 1);
262 dead_tmp(tmp);
263 }
264
265 static void shifter_out_im(TCGv var, int shift)
266 {
267 TCGv tmp = new_tmp();
268 if (shift == 0) {
269 tcg_gen_andi_i32(tmp, var, 1);
270 } else {
271 tcg_gen_shri_i32(tmp, var, shift);
272 if (shift != 31) {
273 tcg_gen_andi_i32(tmp, tmp, 1);
274 }
275 }
276 gen_set_CF(tmp);
277 dead_tmp(tmp);
278 }
279
280 /* Shift by immediate. Includes special handling for shift == 0. */
281 static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift,
282 int flags)
283 {
284 switch (shiftop) {
285 case 0: /* LSL */
286 if (shift != 0) {
287 if (flags) {
288 shifter_out_im(var, 32 - shift);
289 }
290 tcg_gen_shli_i32(var, var, shift);
291 }
292 break;
293 case 1: /* LSR */
294 if (shift == 0) {
295 if (flags) {
296 tcg_gen_shri_i32(var, var, 31);
297 gen_set_CF(var);
298 }
299 tcg_gen_movi_i32(var, 0);
300 } else {
301 if (flags) {
302 shifter_out_im(var, shift - 1);
303 }
304 tcg_gen_shri_i32(var, var, shift);
305 }
306 break;
307 case 2: /* ASR */
308 if (shift == 0) {
309 shift = 32;
310 }
311 if (flags) {
312 shifter_out_im(var, shift - 1);
313 }
314 if (shift == 32) {
315 shift = 31;
316 }
317 tcg_gen_sari_i32(var, var, shift);
318 break;
319 case 3: /* ROR/RRX */
320 if (shift != 0) {
321 if (flags) {
322 shifter_out_im(var, shift - 1);
323 }
324 tcg_gen_rotri_i32(var, var, shift); break;
325 } else {
326 TCGv tmp = load_cpu_field(CF);
327 if (flags) {
328 shifter_out_im(var, 0);
329 }
330 tcg_gen_shri_i32(var, var, 1);
331 tcg_gen_shli_i32(tmp, tmp, 31);
332 tcg_gen_or_i32(var, var, tmp);
333 dead_tmp(tmp);
334 }
335 }
336 };
337
338 static inline void gen_uc32_shift_reg(TCGv var, int shiftop,
339 TCGv shift, int flags)
340 {
341 if (flags) {
342 switch (shiftop) {
343 case 0:
344 gen_helper_shl_cc(var, var, shift);
345 break;
346 case 1:
347 gen_helper_shr_cc(var, var, shift);
348 break;
349 case 2:
350 gen_helper_sar_cc(var, var, shift);
351 break;
352 case 3:
353 gen_helper_ror_cc(var, var, shift);
354 break;
355 }
356 } else {
357 switch (shiftop) {
358 case 0:
359 gen_helper_shl(var, var, shift);
360 break;
361 case 1:
362 gen_helper_shr(var, var, shift);
363 break;
364 case 2:
365 gen_helper_sar(var, var, shift);
366 break;
367 case 3:
368 tcg_gen_andi_i32(shift, shift, 0x1f);
369 tcg_gen_rotr_i32(var, var, shift);
370 break;
371 }
372 }
373 dead_tmp(shift);
374 }
375
376 static void gen_test_cc(int cc, int label)
377 {
378 TCGv tmp;
379 TCGv tmp2;
380 int inv;
381
382 switch (cc) {
383 case 0: /* eq: Z */
384 tmp = load_cpu_field(ZF);
385 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
386 break;
387 case 1: /* ne: !Z */
388 tmp = load_cpu_field(ZF);
389 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
390 break;
391 case 2: /* cs: C */
392 tmp = load_cpu_field(CF);
393 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
394 break;
395 case 3: /* cc: !C */
396 tmp = load_cpu_field(CF);
397 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
398 break;
399 case 4: /* mi: N */
400 tmp = load_cpu_field(NF);
401 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
402 break;
403 case 5: /* pl: !N */
404 tmp = load_cpu_field(NF);
405 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
406 break;
407 case 6: /* vs: V */
408 tmp = load_cpu_field(VF);
409 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
410 break;
411 case 7: /* vc: !V */
412 tmp = load_cpu_field(VF);
413 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
414 break;
415 case 8: /* hi: C && !Z */
416 inv = gen_new_label();
417 tmp = load_cpu_field(CF);
418 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
419 dead_tmp(tmp);
420 tmp = load_cpu_field(ZF);
421 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
422 gen_set_label(inv);
423 break;
424 case 9: /* ls: !C || Z */
425 tmp = load_cpu_field(CF);
426 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
427 dead_tmp(tmp);
428 tmp = load_cpu_field(ZF);
429 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
430 break;
431 case 10: /* ge: N == V -> N ^ V == 0 */
432 tmp = load_cpu_field(VF);
433 tmp2 = load_cpu_field(NF);
434 tcg_gen_xor_i32(tmp, tmp, tmp2);
435 dead_tmp(tmp2);
436 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
437 break;
438 case 11: /* lt: N != V -> N ^ V != 0 */
439 tmp = load_cpu_field(VF);
440 tmp2 = load_cpu_field(NF);
441 tcg_gen_xor_i32(tmp, tmp, tmp2);
442 dead_tmp(tmp2);
443 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
444 break;
445 case 12: /* gt: !Z && N == V */
446 inv = gen_new_label();
447 tmp = load_cpu_field(ZF);
448 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
449 dead_tmp(tmp);
450 tmp = load_cpu_field(VF);
451 tmp2 = load_cpu_field(NF);
452 tcg_gen_xor_i32(tmp, tmp, tmp2);
453 dead_tmp(tmp2);
454 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
455 gen_set_label(inv);
456 break;
457 case 13: /* le: Z || N != V */
458 tmp = load_cpu_field(ZF);
459 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
460 dead_tmp(tmp);
461 tmp = load_cpu_field(VF);
462 tmp2 = load_cpu_field(NF);
463 tcg_gen_xor_i32(tmp, tmp, tmp2);
464 dead_tmp(tmp2);
465 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
466 break;
467 default:
468 fprintf(stderr, "Bad condition code 0x%x\n", cc);
469 abort();
470 }
471 dead_tmp(tmp);
472 }
473
474 static const uint8_t table_logic_cc[16] = {
475 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
476 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
477 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
478 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
479 };
480
481 /* Set PC state from an immediate address. */
482 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
483 {
484 s->is_jmp = DISAS_UPDATE;
485 tcg_gen_movi_i32(cpu_R[31], addr & ~3);
486 }
487
488 /* Set PC state from var. var is marked as dead. */
489 static inline void gen_bx(DisasContext *s, TCGv var)
490 {
491 s->is_jmp = DISAS_UPDATE;
492 tcg_gen_andi_i32(cpu_R[31], var, ~3);
493 dead_tmp(var);
494 }
495
496 static inline void store_reg_bx(DisasContext *s, int reg, TCGv var)
497 {
498 store_reg(s, reg, var);
499 }
500
501 static inline TCGv gen_ld8s(TCGv addr, int index)
502 {
503 TCGv tmp = new_tmp();
504 tcg_gen_qemu_ld8s(tmp, addr, index);
505 return tmp;
506 }
507
508 static inline TCGv gen_ld8u(TCGv addr, int index)
509 {
510 TCGv tmp = new_tmp();
511 tcg_gen_qemu_ld8u(tmp, addr, index);
512 return tmp;
513 }
514
515 static inline TCGv gen_ld16s(TCGv addr, int index)
516 {
517 TCGv tmp = new_tmp();
518 tcg_gen_qemu_ld16s(tmp, addr, index);
519 return tmp;
520 }
521
522 static inline TCGv gen_ld16u(TCGv addr, int index)
523 {
524 TCGv tmp = new_tmp();
525 tcg_gen_qemu_ld16u(tmp, addr, index);
526 return tmp;
527 }
528
529 static inline TCGv gen_ld32(TCGv addr, int index)
530 {
531 TCGv tmp = new_tmp();
532 tcg_gen_qemu_ld32u(tmp, addr, index);
533 return tmp;
534 }
535
536 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
537 {
538 TCGv_i64 tmp = tcg_temp_new_i64();
539 tcg_gen_qemu_ld64(tmp, addr, index);
540 return tmp;
541 }
542
543 static inline void gen_st8(TCGv val, TCGv addr, int index)
544 {
545 tcg_gen_qemu_st8(val, addr, index);
546 dead_tmp(val);
547 }
548
549 static inline void gen_st16(TCGv val, TCGv addr, int index)
550 {
551 tcg_gen_qemu_st16(val, addr, index);
552 dead_tmp(val);
553 }
554
555 static inline void gen_st32(TCGv val, TCGv addr, int index)
556 {
557 tcg_gen_qemu_st32(val, addr, index);
558 dead_tmp(val);
559 }
560
561 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
562 {
563 tcg_gen_qemu_st64(val, addr, index);
564 tcg_temp_free_i64(val);
565 }
566
567 static inline void gen_set_pc_im(uint32_t val)
568 {
569 tcg_gen_movi_i32(cpu_R[31], val);
570 }
571
572 /* Force a TB lookup after an instruction that changes the CPU state. */
573 static inline void gen_lookup_tb(DisasContext *s)
574 {
575 tcg_gen_movi_i32(cpu_R[31], s->pc & ~1);
576 s->is_jmp = DISAS_UPDATE;
577 }
578
579 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
580 TCGv var)
581 {
582 int val;
583 TCGv offset;
584
585 if (UCOP_SET(29)) {
586 /* immediate */
587 val = UCOP_IMM14;
588 if (!UCOP_SET_U) {
589 val = -val;
590 }
591 if (val != 0) {
592 tcg_gen_addi_i32(var, var, val);
593 }
594 } else {
595 /* shift/register */
596 offset = load_reg(s, UCOP_REG_M);
597 gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0);
598 if (!UCOP_SET_U) {
599 tcg_gen_sub_i32(var, var, offset);
600 } else {
601 tcg_gen_add_i32(var, var, offset);
602 }
603 dead_tmp(offset);
604 }
605 }
606
607 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
608 TCGv var)
609 {
610 int val;
611 TCGv offset;
612
613 if (UCOP_SET(26)) {
614 /* immediate */
615 val = (insn & 0x1f) | ((insn >> 4) & 0x3e0);
616 if (!UCOP_SET_U) {
617 val = -val;
618 }
619 if (val != 0) {
620 tcg_gen_addi_i32(var, var, val);
621 }
622 } else {
623 /* register */
624 offset = load_reg(s, UCOP_REG_M);
625 if (!UCOP_SET_U) {
626 tcg_gen_sub_i32(var, var, offset);
627 } else {
628 tcg_gen_add_i32(var, var, offset);
629 }
630 dead_tmp(offset);
631 }
632 }
633
634 static inline long ucf64_reg_offset(int reg)
635 {
636 if (reg & 1) {
637 return offsetof(CPUState, ucf64.regs[reg >> 1])
638 + offsetof(CPU_DoubleU, l.upper);
639 } else {
640 return offsetof(CPUState, ucf64.regs[reg >> 1])
641 + offsetof(CPU_DoubleU, l.lower);
642 }
643 }
644
645 #define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
646 #define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
647
648 /* UniCore-F64 single load/store I_offset */
649 static void do_ucf64_ldst_i(CPUState *env, DisasContext *s, uint32_t insn)
650 {
651 int offset;
652 TCGv tmp;
653 TCGv addr;
654
655 addr = load_reg(s, UCOP_REG_N);
656 if (!UCOP_SET_P && !UCOP_SET_W) {
657 ILLEGAL;
658 }
659
660 if (UCOP_SET_P) {
661 offset = UCOP_IMM10 << 2;
662 if (!UCOP_SET_U) {
663 offset = -offset;
664 }
665 if (offset != 0) {
666 tcg_gen_addi_i32(addr, addr, offset);
667 }
668 }
669
670 if (UCOP_SET_L) { /* load */
671 tmp = gen_ld32(addr, IS_USER(s));
672 ucf64_gen_st32(tmp, UCOP_REG_D);
673 } else { /* store */
674 tmp = ucf64_gen_ld32(UCOP_REG_D);
675 gen_st32(tmp, addr, IS_USER(s));
676 }
677
678 if (!UCOP_SET_P) {
679 offset = UCOP_IMM10 << 2;
680 if (!UCOP_SET_U) {
681 offset = -offset;
682 }
683 if (offset != 0) {
684 tcg_gen_addi_i32(addr, addr, offset);
685 }
686 }
687 if (UCOP_SET_W) {
688 store_reg(s, UCOP_REG_N, addr);
689 } else {
690 dead_tmp(addr);
691 }
692 }
693
694 /* UniCore-F64 load/store multiple words */
695 static void do_ucf64_ldst_m(CPUState *env, DisasContext *s, uint32_t insn)
696 {
697 unsigned int i;
698 int j, n, freg;
699 TCGv tmp;
700 TCGv addr;
701
702 if (UCOP_REG_D != 0) {
703 ILLEGAL;
704 }
705 if (UCOP_REG_N == 31) {
706 ILLEGAL;
707 }
708 if ((insn << 24) == 0) {
709 ILLEGAL;
710 }
711
712 addr = load_reg(s, UCOP_REG_N);
713
714 n = 0;
715 for (i = 0; i < 8; i++) {
716 if (UCOP_SET(i)) {
717 n++;
718 }
719 }
720
721 if (UCOP_SET_U) {
722 if (UCOP_SET_P) { /* pre increment */
723 tcg_gen_addi_i32(addr, addr, 4);
724 } /* unnecessary to do anything when post increment */
725 } else {
726 if (UCOP_SET_P) { /* pre decrement */
727 tcg_gen_addi_i32(addr, addr, -(n * 4));
728 } else { /* post decrement */
729 if (n != 1) {
730 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
731 }
732 }
733 }
734
735 freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
736
737 for (i = 0, j = 0; i < 8; i++, freg++) {
738 if (!UCOP_SET(i)) {
739 continue;
740 }
741
742 if (UCOP_SET_L) { /* load */
743 tmp = gen_ld32(addr, IS_USER(s));
744 ucf64_gen_st32(tmp, freg);
745 } else { /* store */
746 tmp = ucf64_gen_ld32(freg);
747 gen_st32(tmp, addr, IS_USER(s));
748 }
749
750 j++;
751 /* unnecessary to add after the last transfer */
752 if (j != n) {
753 tcg_gen_addi_i32(addr, addr, 4);
754 }
755 }
756
757 if (UCOP_SET_W) { /* write back */
758 if (UCOP_SET_U) {
759 if (!UCOP_SET_P) { /* post increment */
760 tcg_gen_addi_i32(addr, addr, 4);
761 } /* unnecessary to do anything when pre increment */
762 } else {
763 if (UCOP_SET_P) {
764 /* pre decrement */
765 if (n != 1) {
766 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
767 }
768 } else {
769 /* post decrement */
770 tcg_gen_addi_i32(addr, addr, -(n * 4));
771 }
772 }
773 store_reg(s, UCOP_REG_N, addr);
774 } else {
775 dead_tmp(addr);
776 }
777 }
778
779 /* UniCore-F64 mrc/mcr */
780 static void do_ucf64_trans(CPUState *env, DisasContext *s, uint32_t insn)
781 {
782 TCGv tmp;
783
784 if ((insn & 0xfe0003ff) == 0xe2000000) {
785 /* control register */
786 if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) {
787 ILLEGAL;
788 }
789 if (UCOP_SET(24)) {
790 /* CFF */
791 tmp = new_tmp();
792 gen_helper_ucf64_get_fpscr(tmp, cpu_env);
793 store_reg(s, UCOP_REG_D, tmp);
794 } else {
795 /* CTF */
796 tmp = load_reg(s, UCOP_REG_D);
797 gen_helper_ucf64_set_fpscr(cpu_env, tmp);
798 dead_tmp(tmp);
799 gen_lookup_tb(s);
800 }
801 return;
802 }
803 if ((insn & 0xfe0003ff) == 0xe0000000) {
804 /* general register */
805 if (UCOP_REG_D == 31) {
806 ILLEGAL;
807 }
808 if (UCOP_SET(24)) { /* MFF */
809 tmp = ucf64_gen_ld32(UCOP_REG_N);
810 store_reg(s, UCOP_REG_D, tmp);
811 } else { /* MTF */
812 tmp = load_reg(s, UCOP_REG_D);
813 ucf64_gen_st32(tmp, UCOP_REG_N);
814 }
815 return;
816 }
817 if ((insn & 0xfb000000) == 0xe9000000) {
818 /* MFFC */
819 if (UCOP_REG_D != 31) {
820 ILLEGAL;
821 }
822 if (UCOP_UCF64_COND & 0x8) {
823 ILLEGAL;
824 }
825
826 tmp = new_tmp();
827 tcg_gen_movi_i32(tmp, UCOP_UCF64_COND);
828 if (UCOP_SET(26)) {
829 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
830 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
831 gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env);
832 } else {
833 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
834 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
835 gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env);
836 }
837 dead_tmp(tmp);
838 return;
839 }
840 ILLEGAL;
841 }
842
843 /* UniCore-F64 convert instructions */
844 static void do_ucf64_fcvt(CPUState *env, DisasContext *s, uint32_t insn)
845 {
846 if (UCOP_UCF64_FMT == 3) {
847 ILLEGAL;
848 }
849 if (UCOP_REG_N != 0) {
850 ILLEGAL;
851 }
852 switch (UCOP_UCF64_FUNC) {
853 case 0: /* cvt.s */
854 switch (UCOP_UCF64_FMT) {
855 case 1 /* d */:
856 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
857 gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env);
858 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
859 break;
860 case 2 /* w */:
861 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
862 gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env);
863 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
864 break;
865 default /* s */:
866 ILLEGAL;
867 break;
868 }
869 break;
870 case 1: /* cvt.d */
871 switch (UCOP_UCF64_FMT) {
872 case 0 /* s */:
873 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
874 gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env);
875 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
876 break;
877 case 2 /* w */:
878 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
879 gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env);
880 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
881 break;
882 default /* d */:
883 ILLEGAL;
884 break;
885 }
886 break;
887 case 4: /* cvt.w */
888 switch (UCOP_UCF64_FMT) {
889 case 0 /* s */:
890 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
891 gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env);
892 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
893 break;
894 case 1 /* d */:
895 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
896 gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env);
897 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
898 break;
899 default /* w */:
900 ILLEGAL;
901 break;
902 }
903 break;
904 default:
905 ILLEGAL;
906 }
907 }
908
909 /* UniCore-F64 compare instructions */
910 static void do_ucf64_fcmp(CPUState *env, DisasContext *s, uint32_t insn)
911 {
912 if (UCOP_SET(25)) {
913 ILLEGAL;
914 }
915 if (UCOP_REG_D != 0) {
916 ILLEGAL;
917 }
918
919 ILLEGAL; /* TODO */
920 if (UCOP_SET(24)) {
921 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
922 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
923 /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
924 } else {
925 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
926 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
927 /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
928 }
929 }
930
931 #define gen_helper_ucf64_movs(x, y) do { } while (0)
932 #define gen_helper_ucf64_movd(x, y) do { } while (0)
933
934 #define UCF64_OP1(name) do { \
935 if (UCOP_REG_N != 0) { \
936 ILLEGAL; \
937 } \
938 switch (UCOP_UCF64_FMT) { \
939 case 0 /* s */: \
940 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
941 ucf64_reg_offset(UCOP_REG_M)); \
942 gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
943 tcg_gen_st_i32(cpu_F0s, cpu_env, \
944 ucf64_reg_offset(UCOP_REG_D)); \
945 break; \
946 case 1 /* d */: \
947 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
948 ucf64_reg_offset(UCOP_REG_M)); \
949 gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
950 tcg_gen_st_i64(cpu_F0d, cpu_env, \
951 ucf64_reg_offset(UCOP_REG_D)); \
952 break; \
953 case 2 /* w */: \
954 ILLEGAL; \
955 break; \
956 } \
957 } while (0)
958
959 #define UCF64_OP2(name) do { \
960 switch (UCOP_UCF64_FMT) { \
961 case 0 /* s */: \
962 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
963 ucf64_reg_offset(UCOP_REG_N)); \
964 tcg_gen_ld_i32(cpu_F1s, cpu_env, \
965 ucf64_reg_offset(UCOP_REG_M)); \
966 gen_helper_ucf64_##name##s(cpu_F0s, \
967 cpu_F0s, cpu_F1s, cpu_env); \
968 tcg_gen_st_i32(cpu_F0s, cpu_env, \
969 ucf64_reg_offset(UCOP_REG_D)); \
970 break; \
971 case 1 /* d */: \
972 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
973 ucf64_reg_offset(UCOP_REG_N)); \
974 tcg_gen_ld_i64(cpu_F1d, cpu_env, \
975 ucf64_reg_offset(UCOP_REG_M)); \
976 gen_helper_ucf64_##name##d(cpu_F0d, \
977 cpu_F0d, cpu_F1d, cpu_env); \
978 tcg_gen_st_i64(cpu_F0d, cpu_env, \
979 ucf64_reg_offset(UCOP_REG_D)); \
980 break; \
981 case 2 /* w */: \
982 ILLEGAL; \
983 break; \
984 } \
985 } while (0)
986
987 /* UniCore-F64 data processing */
988 static void do_ucf64_datap(CPUState *env, DisasContext *s, uint32_t insn)
989 {
990 if (UCOP_UCF64_FMT == 3) {
991 ILLEGAL;
992 }
993 switch (UCOP_UCF64_FUNC) {
994 case 0: /* add */
995 UCF64_OP2(add);
996 break;
997 case 1: /* sub */
998 UCF64_OP2(sub);
999 break;
1000 case 2: /* mul */
1001 UCF64_OP2(mul);
1002 break;
1003 case 4: /* div */
1004 UCF64_OP2(div);
1005 break;
1006 case 5: /* abs */
1007 UCF64_OP1(abs);
1008 break;
1009 case 6: /* mov */
1010 UCF64_OP1(mov);
1011 break;
1012 case 7: /* neg */
1013 UCF64_OP1(neg);
1014 break;
1015 default:
1016 ILLEGAL;
1017 }
1018 }
1019
1020 /* Disassemble an F64 instruction */
1021 static void disas_ucf64_insn(CPUState *env, DisasContext *s, uint32_t insn)
1022 {
1023 if (!UCOP_SET(29)) {
1024 if (UCOP_SET(26)) {
1025 do_ucf64_ldst_m(env, s, insn);
1026 } else {
1027 do_ucf64_ldst_i(env, s, insn);
1028 }
1029 } else {
1030 if (UCOP_SET(5)) {
1031 switch ((insn >> 26) & 0x3) {
1032 case 0:
1033 do_ucf64_datap(env, s, insn);
1034 break;
1035 case 1:
1036 ILLEGAL;
1037 break;
1038 case 2:
1039 do_ucf64_fcvt(env, s, insn);
1040 break;
1041 case 3:
1042 do_ucf64_fcmp(env, s, insn);
1043 break;
1044 }
1045 } else {
1046 do_ucf64_trans(env, s, insn);
1047 }
1048 }
1049 }
1050
1051 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1052 {
1053 TranslationBlock *tb;
1054
1055 tb = s->tb;
1056 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
1057 tcg_gen_goto_tb(n);
1058 gen_set_pc_im(dest);
1059 tcg_gen_exit_tb((tcg_target_long)tb + n);
1060 } else {
1061 gen_set_pc_im(dest);
1062 tcg_gen_exit_tb(0);
1063 }
1064 }
1065
1066 static inline void gen_jmp(DisasContext *s, uint32_t dest)
1067 {
1068 if (unlikely(s->singlestep_enabled)) {
1069 /* An indirect jump so that we still trigger the debug exception. */
1070 gen_bx_im(s, dest);
1071 } else {
1072 gen_goto_tb(s, 0, dest);
1073 s->is_jmp = DISAS_TB_JUMP;
1074 }
1075 }
1076
1077 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
1078 {
1079 if (x) {
1080 tcg_gen_sari_i32(t0, t0, 16);
1081 } else {
1082 gen_sxth(t0);
1083 }
1084 if (y) {
1085 tcg_gen_sari_i32(t1, t1, 16);
1086 } else {
1087 gen_sxth(t1);
1088 }
1089 tcg_gen_mul_i32(t0, t0, t1);
1090 }
1091
1092 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
1093 static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0)
1094 {
1095 TCGv tmp;
1096 if (bsr) {
1097 /* ??? This is also undefined in system mode. */
1098 if (IS_USER(s)) {
1099 return 1;
1100 }
1101
1102 tmp = load_cpu_field(bsr);
1103 tcg_gen_andi_i32(tmp, tmp, ~mask);
1104 tcg_gen_andi_i32(t0, t0, mask);
1105 tcg_gen_or_i32(tmp, tmp, t0);
1106 store_cpu_field(tmp, bsr);
1107 } else {
1108 gen_set_asr(t0, mask);
1109 }
1110 dead_tmp(t0);
1111 gen_lookup_tb(s);
1112 return 0;
1113 }
1114
1115 /* Generate an old-style exception return. Marks pc as dead. */
1116 static void gen_exception_return(DisasContext *s, TCGv pc)
1117 {
1118 TCGv tmp;
1119 store_reg(s, 31, pc);
1120 tmp = load_cpu_field(bsr);
1121 gen_set_asr(tmp, 0xffffffff);
1122 dead_tmp(tmp);
1123 s->is_jmp = DISAS_UPDATE;
1124 }
1125
1126 static void disas_coproc_insn(CPUState *env, DisasContext *s, uint32_t insn)
1127 {
1128 switch (UCOP_CPNUM) {
1129 case 2:
1130 disas_ucf64_insn(env, s, insn);
1131 break;
1132 default:
1133 /* Unknown coprocessor. */
1134 cpu_abort(env, "Unknown coprocessor!");
1135 }
1136 }
1137
1138
1139 /* Store a 64-bit value to a register pair. Clobbers val. */
1140 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
1141 {
1142 TCGv tmp;
1143 tmp = new_tmp();
1144 tcg_gen_trunc_i64_i32(tmp, val);
1145 store_reg(s, rlow, tmp);
1146 tmp = new_tmp();
1147 tcg_gen_shri_i64(val, val, 32);
1148 tcg_gen_trunc_i64_i32(tmp, val);
1149 store_reg(s, rhigh, tmp);
1150 }
1151
1152 /* load and add a 64-bit value from a register pair. */
1153 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
1154 {
1155 TCGv_i64 tmp;
1156 TCGv tmpl;
1157 TCGv tmph;
1158
1159 /* Load 64-bit value rd:rn. */
1160 tmpl = load_reg(s, rlow);
1161 tmph = load_reg(s, rhigh);
1162 tmp = tcg_temp_new_i64();
1163 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
1164 dead_tmp(tmpl);
1165 dead_tmp(tmph);
1166 tcg_gen_add_i64(val, val, tmp);
1167 tcg_temp_free_i64(tmp);
1168 }
1169
1170 /* data processing instructions */
1171 static void do_datap(CPUState *env, DisasContext *s, uint32_t insn)
1172 {
1173 TCGv tmp;
1174 TCGv tmp2;
1175 int logic_cc;
1176
1177 if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) {
1178 if (UCOP_SET(23)) { /* CMOV instructions */
1179 if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) {
1180 ILLEGAL;
1181 }
1182 /* if not always execute, we generate a conditional jump to
1183 next instruction */
1184 s->condlabel = gen_new_label();
1185 gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel);
1186 s->condjmp = 1;
1187 }
1188 }
1189
1190 logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24);
1191
1192 if (UCOP_SET(29)) {
1193 unsigned int val;
1194 /* immediate operand */
1195 val = UCOP_IMM_9;
1196 if (UCOP_SH_IM) {
1197 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1198 }
1199 tmp2 = new_tmp();
1200 tcg_gen_movi_i32(tmp2, val);
1201 if (logic_cc && UCOP_SH_IM) {
1202 gen_set_CF_bit31(tmp2);
1203 }
1204 } else {
1205 /* register */
1206 tmp2 = load_reg(s, UCOP_REG_M);
1207 if (UCOP_SET(5)) {
1208 tmp = load_reg(s, UCOP_REG_S);
1209 gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc);
1210 } else {
1211 gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc);
1212 }
1213 }
1214
1215 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1216 tmp = load_reg(s, UCOP_REG_N);
1217 } else {
1218 TCGV_UNUSED(tmp);
1219 }
1220
1221 switch (UCOP_OPCODES) {
1222 case 0x00:
1223 tcg_gen_and_i32(tmp, tmp, tmp2);
1224 if (logic_cc) {
1225 gen_logic_CC(tmp);
1226 }
1227 store_reg_bx(s, UCOP_REG_D, tmp);
1228 break;
1229 case 0x01:
1230 tcg_gen_xor_i32(tmp, tmp, tmp2);
1231 if (logic_cc) {
1232 gen_logic_CC(tmp);
1233 }
1234 store_reg_bx(s, UCOP_REG_D, tmp);
1235 break;
1236 case 0x02:
1237 if (UCOP_SET_S && UCOP_REG_D == 31) {
1238 /* SUBS r31, ... is used for exception return. */
1239 if (IS_USER(s)) {
1240 ILLEGAL;
1241 }
1242 gen_helper_sub_cc(tmp, tmp, tmp2);
1243 gen_exception_return(s, tmp);
1244 } else {
1245 if (UCOP_SET_S) {
1246 gen_helper_sub_cc(tmp, tmp, tmp2);
1247 } else {
1248 tcg_gen_sub_i32(tmp, tmp, tmp2);
1249 }
1250 store_reg_bx(s, UCOP_REG_D, tmp);
1251 }
1252 break;
1253 case 0x03:
1254 if (UCOP_SET_S) {
1255 gen_helper_sub_cc(tmp, tmp2, tmp);
1256 } else {
1257 tcg_gen_sub_i32(tmp, tmp2, tmp);
1258 }
1259 store_reg_bx(s, UCOP_REG_D, tmp);
1260 break;
1261 case 0x04:
1262 if (UCOP_SET_S) {
1263 gen_helper_add_cc(tmp, tmp, tmp2);
1264 } else {
1265 tcg_gen_add_i32(tmp, tmp, tmp2);
1266 }
1267 store_reg_bx(s, UCOP_REG_D, tmp);
1268 break;
1269 case 0x05:
1270 if (UCOP_SET_S) {
1271 gen_helper_adc_cc(tmp, tmp, tmp2);
1272 } else {
1273 gen_add_carry(tmp, tmp, tmp2);
1274 }
1275 store_reg_bx(s, UCOP_REG_D, tmp);
1276 break;
1277 case 0x06:
1278 if (UCOP_SET_S) {
1279 gen_helper_sbc_cc(tmp, tmp, tmp2);
1280 } else {
1281 gen_sub_carry(tmp, tmp, tmp2);
1282 }
1283 store_reg_bx(s, UCOP_REG_D, tmp);
1284 break;
1285 case 0x07:
1286 if (UCOP_SET_S) {
1287 gen_helper_sbc_cc(tmp, tmp2, tmp);
1288 } else {
1289 gen_sub_carry(tmp, tmp2, tmp);
1290 }
1291 store_reg_bx(s, UCOP_REG_D, tmp);
1292 break;
1293 case 0x08:
1294 if (UCOP_SET_S) {
1295 tcg_gen_and_i32(tmp, tmp, tmp2);
1296 gen_logic_CC(tmp);
1297 }
1298 dead_tmp(tmp);
1299 break;
1300 case 0x09:
1301 if (UCOP_SET_S) {
1302 tcg_gen_xor_i32(tmp, tmp, tmp2);
1303 gen_logic_CC(tmp);
1304 }
1305 dead_tmp(tmp);
1306 break;
1307 case 0x0a:
1308 if (UCOP_SET_S) {
1309 gen_helper_sub_cc(tmp, tmp, tmp2);
1310 }
1311 dead_tmp(tmp);
1312 break;
1313 case 0x0b:
1314 if (UCOP_SET_S) {
1315 gen_helper_add_cc(tmp, tmp, tmp2);
1316 }
1317 dead_tmp(tmp);
1318 break;
1319 case 0x0c:
1320 tcg_gen_or_i32(tmp, tmp, tmp2);
1321 if (logic_cc) {
1322 gen_logic_CC(tmp);
1323 }
1324 store_reg_bx(s, UCOP_REG_D, tmp);
1325 break;
1326 case 0x0d:
1327 if (logic_cc && UCOP_REG_D == 31) {
1328 /* MOVS r31, ... is used for exception return. */
1329 if (IS_USER(s)) {
1330 ILLEGAL;
1331 }
1332 gen_exception_return(s, tmp2);
1333 } else {
1334 if (logic_cc) {
1335 gen_logic_CC(tmp2);
1336 }
1337 store_reg_bx(s, UCOP_REG_D, tmp2);
1338 }
1339 break;
1340 case 0x0e:
1341 tcg_gen_andc_i32(tmp, tmp, tmp2);
1342 if (logic_cc) {
1343 gen_logic_CC(tmp);
1344 }
1345 store_reg_bx(s, UCOP_REG_D, tmp);
1346 break;
1347 default:
1348 case 0x0f:
1349 tcg_gen_not_i32(tmp2, tmp2);
1350 if (logic_cc) {
1351 gen_logic_CC(tmp2);
1352 }
1353 store_reg_bx(s, UCOP_REG_D, tmp2);
1354 break;
1355 }
1356 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1357 dead_tmp(tmp2);
1358 }
1359 }
1360
1361 /* multiply */
1362 static void do_mult(CPUState *env, DisasContext *s, uint32_t insn)
1363 {
1364 TCGv tmp;
1365 TCGv tmp2;
1366 TCGv_i64 tmp64;
1367
1368 if (UCOP_SET(27)) {
1369 /* 64 bit mul */
1370 tmp = load_reg(s, UCOP_REG_M);
1371 tmp2 = load_reg(s, UCOP_REG_N);
1372 if (UCOP_SET(26)) {
1373 tmp64 = gen_muls_i64_i32(tmp, tmp2);
1374 } else {
1375 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
1376 }
1377 if (UCOP_SET(25)) { /* mult accumulate */
1378 gen_addq(s, tmp64, UCOP_REG_LO, UCOP_REG_HI);
1379 }
1380 gen_storeq_reg(s, UCOP_REG_LO, UCOP_REG_HI, tmp64);
1381 tcg_temp_free_i64(tmp64);
1382 } else {
1383 /* 32 bit mul */
1384 tmp = load_reg(s, UCOP_REG_M);
1385 tmp2 = load_reg(s, UCOP_REG_N);
1386 tcg_gen_mul_i32(tmp, tmp, tmp2);
1387 dead_tmp(tmp2);
1388 if (UCOP_SET(25)) {
1389 /* Add */
1390 tmp2 = load_reg(s, UCOP_REG_S);
1391 tcg_gen_add_i32(tmp, tmp, tmp2);
1392 dead_tmp(tmp2);
1393 }
1394 if (UCOP_SET_S) {
1395 gen_logic_CC(tmp);
1396 }
1397 store_reg(s, UCOP_REG_D, tmp);
1398 }
1399 }
1400
1401 /* miscellaneous instructions */
1402 static void do_misc(CPUState *env, DisasContext *s, uint32_t insn)
1403 {
1404 unsigned int val;
1405 TCGv tmp;
1406
1407 if ((insn & 0xffffffe0) == 0x10ffc120) {
1408 /* Trivial implementation equivalent to bx. */
1409 tmp = load_reg(s, UCOP_REG_M);
1410 gen_bx(s, tmp);
1411 return;
1412 }
1413
1414 if ((insn & 0xfbffc000) == 0x30ffc000) {
1415 /* PSR = immediate */
1416 val = UCOP_IMM_9;
1417 if (UCOP_SH_IM) {
1418 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1419 }
1420 tmp = new_tmp();
1421 tcg_gen_movi_i32(tmp, val);
1422 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1423 ILLEGAL;
1424 }
1425 return;
1426 }
1427
1428 if ((insn & 0xfbffffe0) == 0x12ffc020) {
1429 /* PSR.flag = reg */
1430 tmp = load_reg(s, UCOP_REG_M);
1431 if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) {
1432 ILLEGAL;
1433 }
1434 return;
1435 }
1436
1437 if ((insn & 0xfbffffe0) == 0x10ffc020) {
1438 /* PSR = reg */
1439 tmp = load_reg(s, UCOP_REG_M);
1440 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1441 ILLEGAL;
1442 }
1443 return;
1444 }
1445
1446 if ((insn & 0xfbf83fff) == 0x10f80000) {
1447 /* reg = PSR */
1448 if (UCOP_SET_B) {
1449 if (IS_USER(s)) {
1450 ILLEGAL;
1451 }
1452 tmp = load_cpu_field(bsr);
1453 } else {
1454 tmp = new_tmp();
1455 gen_helper_asr_read(tmp);
1456 }
1457 store_reg(s, UCOP_REG_D, tmp);
1458 return;
1459 }
1460
1461 if ((insn & 0xfbf83fe0) == 0x12f80120) {
1462 /* clz */
1463 tmp = load_reg(s, UCOP_REG_M);
1464 if (UCOP_SET(26)) {
1465 gen_helper_clo(tmp, tmp);
1466 } else {
1467 gen_helper_clz(tmp, tmp);
1468 }
1469 store_reg(s, UCOP_REG_D, tmp);
1470 return;
1471 }
1472
1473 /* otherwise */
1474 ILLEGAL;
1475 }
1476
1477 /* load/store I_offset and R_offset */
1478 static void do_ldst_ir(CPUState *env, DisasContext *s, uint32_t insn)
1479 {
1480 unsigned int i;
1481 TCGv tmp;
1482 TCGv tmp2;
1483
1484 tmp2 = load_reg(s, UCOP_REG_N);
1485 i = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1486
1487 /* immediate */
1488 if (UCOP_SET_P) {
1489 gen_add_data_offset(s, insn, tmp2);
1490 }
1491
1492 if (UCOP_SET_L) {
1493 /* load */
1494 if (UCOP_SET_B) {
1495 tmp = gen_ld8u(tmp2, i);
1496 } else {
1497 tmp = gen_ld32(tmp2, i);
1498 }
1499 } else {
1500 /* store */
1501 tmp = load_reg(s, UCOP_REG_D);
1502 if (UCOP_SET_B) {
1503 gen_st8(tmp, tmp2, i);
1504 } else {
1505 gen_st32(tmp, tmp2, i);
1506 }
1507 }
1508 if (!UCOP_SET_P) {
1509 gen_add_data_offset(s, insn, tmp2);
1510 store_reg(s, UCOP_REG_N, tmp2);
1511 } else if (UCOP_SET_W) {
1512 store_reg(s, UCOP_REG_N, tmp2);
1513 } else {
1514 dead_tmp(tmp2);
1515 }
1516 if (UCOP_SET_L) {
1517 /* Complete the load. */
1518 if (UCOP_REG_D == 31) {
1519 gen_bx(s, tmp);
1520 } else {
1521 store_reg(s, UCOP_REG_D, tmp);
1522 }
1523 }
1524 }
1525
1526 /* SWP instruction */
1527 static void do_swap(CPUState *env, DisasContext *s, uint32_t insn)
1528 {
1529 TCGv addr;
1530 TCGv tmp;
1531 TCGv tmp2;
1532
1533 if ((insn & 0xff003fe0) != 0x40000120) {
1534 ILLEGAL;
1535 }
1536
1537 /* ??? This is not really atomic. However we know
1538 we never have multiple CPUs running in parallel,
1539 so it is good enough. */
1540 addr = load_reg(s, UCOP_REG_N);
1541 tmp = load_reg(s, UCOP_REG_M);
1542 if (UCOP_SET_B) {
1543 tmp2 = gen_ld8u(addr, IS_USER(s));
1544 gen_st8(tmp, addr, IS_USER(s));
1545 } else {
1546 tmp2 = gen_ld32(addr, IS_USER(s));
1547 gen_st32(tmp, addr, IS_USER(s));
1548 }
1549 dead_tmp(addr);
1550 store_reg(s, UCOP_REG_D, tmp2);
1551 }
1552
1553 /* load/store hw/sb */
1554 static void do_ldst_hwsb(CPUState *env, DisasContext *s, uint32_t insn)
1555 {
1556 TCGv addr;
1557 TCGv tmp;
1558
1559 if (UCOP_SH_OP == 0) {
1560 do_swap(env, s, insn);
1561 return;
1562 }
1563
1564 addr = load_reg(s, UCOP_REG_N);
1565 if (UCOP_SET_P) {
1566 gen_add_datah_offset(s, insn, addr);
1567 }
1568
1569 if (UCOP_SET_L) { /* load */
1570 switch (UCOP_SH_OP) {
1571 case 1:
1572 tmp = gen_ld16u(addr, IS_USER(s));
1573 break;
1574 case 2:
1575 tmp = gen_ld8s(addr, IS_USER(s));
1576 break;
1577 default: /* see do_swap */
1578 case 3:
1579 tmp = gen_ld16s(addr, IS_USER(s));
1580 break;
1581 }
1582 } else { /* store */
1583 if (UCOP_SH_OP != 1) {
1584 ILLEGAL;
1585 }
1586 tmp = load_reg(s, UCOP_REG_D);
1587 gen_st16(tmp, addr, IS_USER(s));
1588 }
1589 /* Perform base writeback before the loaded value to
1590 ensure correct behavior with overlapping index registers. */
1591 if (!UCOP_SET_P) {
1592 gen_add_datah_offset(s, insn, addr);
1593 store_reg(s, UCOP_REG_N, addr);
1594 } else if (UCOP_SET_W) {
1595 store_reg(s, UCOP_REG_N, addr);
1596 } else {
1597 dead_tmp(addr);
1598 }
1599 if (UCOP_SET_L) {
1600 /* Complete the load. */
1601 store_reg(s, UCOP_REG_D, tmp);
1602 }
1603 }
1604
1605 /* load/store multiple words */
1606 static void do_ldst_m(CPUState *env, DisasContext *s, uint32_t insn)
1607 {
1608 unsigned int val, i;
1609 int j, n, reg, user, loaded_base;
1610 TCGv tmp;
1611 TCGv tmp2;
1612 TCGv addr;
1613 TCGv loaded_var;
1614
1615 if (UCOP_SET(7)) {
1616 ILLEGAL;
1617 }
1618 /* XXX: store correct base if write back */
1619 user = 0;
1620 if (UCOP_SET_B) { /* S bit in instruction table */
1621 if (IS_USER(s)) {
1622 ILLEGAL; /* only usable in supervisor mode */
1623 }
1624 if (UCOP_SET(18) == 0) { /* pc reg */
1625 user = 1;
1626 }
1627 }
1628
1629 addr = load_reg(s, UCOP_REG_N);
1630
1631 /* compute total size */
1632 loaded_base = 0;
1633 TCGV_UNUSED(loaded_var);
1634 n = 0;
1635 for (i = 0; i < 6; i++) {
1636 if (UCOP_SET(i)) {
1637 n++;
1638 }
1639 }
1640 for (i = 9; i < 19; i++) {
1641 if (UCOP_SET(i)) {
1642 n++;
1643 }
1644 }
1645 /* XXX: test invalid n == 0 case ? */
1646 if (UCOP_SET_U) {
1647 if (UCOP_SET_P) {
1648 /* pre increment */
1649 tcg_gen_addi_i32(addr, addr, 4);
1650 } else {
1651 /* post increment */
1652 }
1653 } else {
1654 if (UCOP_SET_P) {
1655 /* pre decrement */
1656 tcg_gen_addi_i32(addr, addr, -(n * 4));
1657 } else {
1658 /* post decrement */
1659 if (n != 1) {
1660 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1661 }
1662 }
1663 }
1664
1665 j = 0;
1666 reg = UCOP_SET(6) ? 16 : 0;
1667 for (i = 0; i < 19; i++, reg++) {
1668 if (i == 6) {
1669 i = i + 3;
1670 }
1671 if (UCOP_SET(i)) {
1672 if (UCOP_SET_L) { /* load */
1673 tmp = gen_ld32(addr, IS_USER(s));
1674 if (reg == 31) {
1675 gen_bx(s, tmp);
1676 } else if (user) {
1677 tmp2 = tcg_const_i32(reg);
1678 gen_helper_set_user_reg(tmp2, tmp);
1679 tcg_temp_free_i32(tmp2);
1680 dead_tmp(tmp);
1681 } else if (reg == UCOP_REG_N) {
1682 loaded_var = tmp;
1683 loaded_base = 1;
1684 } else {
1685 store_reg(s, reg, tmp);
1686 }
1687 } else { /* store */
1688 if (reg == 31) {
1689 /* special case: r31 = PC + 4 */
1690 val = (long)s->pc;
1691 tmp = new_tmp();
1692 tcg_gen_movi_i32(tmp, val);
1693 } else if (user) {
1694 tmp = new_tmp();
1695 tmp2 = tcg_const_i32(reg);
1696 gen_helper_get_user_reg(tmp, tmp2);
1697 tcg_temp_free_i32(tmp2);
1698 } else {
1699 tmp = load_reg(s, reg);
1700 }
1701 gen_st32(tmp, addr, IS_USER(s));
1702 }
1703 j++;
1704 /* no need to add after the last transfer */
1705 if (j != n) {
1706 tcg_gen_addi_i32(addr, addr, 4);
1707 }
1708 }
1709 }
1710 if (UCOP_SET_W) { /* write back */
1711 if (UCOP_SET_U) {
1712 if (UCOP_SET_P) {
1713 /* pre increment */
1714 } else {
1715 /* post increment */
1716 tcg_gen_addi_i32(addr, addr, 4);
1717 }
1718 } else {
1719 if (UCOP_SET_P) {
1720 /* pre decrement */
1721 if (n != 1) {
1722 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1723 }
1724 } else {
1725 /* post decrement */
1726 tcg_gen_addi_i32(addr, addr, -(n * 4));
1727 }
1728 }
1729 store_reg(s, UCOP_REG_N, addr);
1730 } else {
1731 dead_tmp(addr);
1732 }
1733 if (loaded_base) {
1734 store_reg(s, UCOP_REG_N, loaded_var);
1735 }
1736 if (UCOP_SET_B && !user) {
1737 /* Restore ASR from BSR. */
1738 tmp = load_cpu_field(bsr);
1739 gen_set_asr(tmp, 0xffffffff);
1740 dead_tmp(tmp);
1741 s->is_jmp = DISAS_UPDATE;
1742 }
1743 }
1744
1745 /* branch (and link) */
1746 static void do_branch(CPUState *env, DisasContext *s, uint32_t insn)
1747 {
1748 unsigned int val;
1749 int32_t offset;
1750 TCGv tmp;
1751
1752 if (UCOP_COND == 0xf) {
1753 ILLEGAL;
1754 }
1755
1756 if (UCOP_COND != 0xe) {
1757 /* if not always execute, we generate a conditional jump to
1758 next instruction */
1759 s->condlabel = gen_new_label();
1760 gen_test_cc(UCOP_COND ^ 1, s->condlabel);
1761 s->condjmp = 1;
1762 }
1763
1764 val = (int32_t)s->pc;
1765 if (UCOP_SET_L) {
1766 tmp = new_tmp();
1767 tcg_gen_movi_i32(tmp, val);
1768 store_reg(s, 30, tmp);
1769 }
1770 offset = (((int32_t)insn << 8) >> 8);
1771 val += (offset << 2); /* unicore is pc+4 */
1772 gen_jmp(s, val);
1773 }
1774
1775 static void disas_uc32_insn(CPUState *env, DisasContext *s)
1776 {
1777 unsigned int insn;
1778
1779 insn = ldl_code(s->pc);
1780 s->pc += 4;
1781
1782 /* UniCore instructions class:
1783 * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
1784 * AAA : see switch case
1785 * BBBB : opcodes or cond or PUBW
1786 * C : S OR L
1787 * D : 8
1788 * E : 5
1789 */
1790 switch (insn >> 29) {
1791 case 0x0:
1792 if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
1793 do_mult(env, s, insn);
1794 break;
1795 }
1796
1797 if (UCOP_SET(8)) {
1798 do_misc(env, s, insn);
1799 break;
1800 }
1801 case 0x1:
1802 if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) {
1803 do_misc(env, s, insn);
1804 break;
1805 }
1806 do_datap(env, s, insn);
1807 break;
1808
1809 case 0x2:
1810 if (UCOP_SET(8) && UCOP_SET(5)) {
1811 do_ldst_hwsb(env, s, insn);
1812 break;
1813 }
1814 if (UCOP_SET(8) || UCOP_SET(5)) {
1815 ILLEGAL;
1816 }
1817 case 0x3:
1818 do_ldst_ir(env, s, insn);
1819 break;
1820
1821 case 0x4:
1822 if (UCOP_SET(8)) {
1823 ILLEGAL; /* extended instructions */
1824 }
1825 do_ldst_m(env, s, insn);
1826 break;
1827 case 0x5:
1828 do_branch(env, s, insn);
1829 break;
1830 case 0x6:
1831 /* Coprocessor. */
1832 disas_coproc_insn(env, s, insn);
1833 break;
1834 case 0x7:
1835 if (!UCOP_SET(28)) {
1836 disas_coproc_insn(env, s, insn);
1837 break;
1838 }
1839 if ((insn & 0xff000000) == 0xff000000) { /* syscall */
1840 gen_set_pc_im(s->pc);
1841 s->is_jmp = DISAS_SYSCALL;
1842 break;
1843 }
1844 ILLEGAL;
1845 }
1846
1847 return;
1848 }
1849
1850 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
1851 basic block 'tb'. If search_pc is TRUE, also generate PC
1852 information for each intermediate instruction. */
1853 static inline void gen_intermediate_code_internal(CPUState *env,
1854 TranslationBlock *tb, int search_pc)
1855 {
1856 DisasContext dc1, *dc = &dc1;
1857 CPUBreakpoint *bp;
1858 uint16_t *gen_opc_end;
1859 int j, lj;
1860 target_ulong pc_start;
1861 uint32_t next_page_start;
1862 int num_insns;
1863 int max_insns;
1864
1865 /* generate intermediate code */
1866 num_temps = 0;
1867
1868 pc_start = tb->pc;
1869
1870 dc->tb = tb;
1871
1872 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1873
1874 dc->is_jmp = DISAS_NEXT;
1875 dc->pc = pc_start;
1876 dc->singlestep_enabled = env->singlestep_enabled;
1877 dc->condjmp = 0;
1878 cpu_F0s = tcg_temp_new_i32();
1879 cpu_F1s = tcg_temp_new_i32();
1880 cpu_F0d = tcg_temp_new_i64();
1881 cpu_F1d = tcg_temp_new_i64();
1882 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1883 lj = -1;
1884 num_insns = 0;
1885 max_insns = tb->cflags & CF_COUNT_MASK;
1886 if (max_insns == 0) {
1887 max_insns = CF_COUNT_MASK;
1888 }
1889
1890 gen_icount_start();
1891 do {
1892 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1893 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1894 if (bp->pc == dc->pc) {
1895 gen_set_pc_im(dc->pc);
1896 gen_exception(EXCP_DEBUG);
1897 dc->is_jmp = DISAS_JUMP;
1898 /* Advance PC so that clearing the breakpoint will
1899 invalidate this TB. */
1900 dc->pc += 2; /* FIXME */
1901 goto done_generating;
1902 break;
1903 }
1904 }
1905 }
1906 if (search_pc) {
1907 j = gen_opc_ptr - gen_opc_buf;
1908 if (lj < j) {
1909 lj++;
1910 while (lj < j) {
1911 gen_opc_instr_start[lj++] = 0;
1912 }
1913 }
1914 gen_opc_pc[lj] = dc->pc;
1915 gen_opc_instr_start[lj] = 1;
1916 gen_opc_icount[lj] = num_insns;
1917 }
1918
1919 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
1920 gen_io_start();
1921 }
1922
1923 disas_uc32_insn(env, dc);
1924
1925 if (num_temps) {
1926 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
1927 num_temps = 0;
1928 }
1929
1930 if (dc->condjmp && !dc->is_jmp) {
1931 gen_set_label(dc->condlabel);
1932 dc->condjmp = 0;
1933 }
1934 /* Translation stops when a conditional branch is encountered.
1935 * Otherwise the subsequent code could get translated several times.
1936 * Also stop translation when a page boundary is reached. This
1937 * ensures prefetch aborts occur at the right place. */
1938 num_insns++;
1939 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
1940 !env->singlestep_enabled &&
1941 !singlestep &&
1942 dc->pc < next_page_start &&
1943 num_insns < max_insns);
1944
1945 if (tb->cflags & CF_LAST_IO) {
1946 if (dc->condjmp) {
1947 /* FIXME: This can theoretically happen with self-modifying
1948 code. */
1949 cpu_abort(env, "IO on conditional branch instruction");
1950 }
1951 gen_io_end();
1952 }
1953
1954 /* At this stage dc->condjmp will only be set when the skipped
1955 instruction was a conditional branch or trap, and the PC has
1956 already been written. */
1957 if (unlikely(env->singlestep_enabled)) {
1958 /* Make sure the pc is updated, and raise a debug exception. */
1959 if (dc->condjmp) {
1960 if (dc->is_jmp == DISAS_SYSCALL) {
1961 gen_exception(UC32_EXCP_PRIV);
1962 } else {
1963 gen_exception(EXCP_DEBUG);
1964 }
1965 gen_set_label(dc->condlabel);
1966 }
1967 if (dc->condjmp || !dc->is_jmp) {
1968 gen_set_pc_im(dc->pc);
1969 dc->condjmp = 0;
1970 }
1971 if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) {
1972 gen_exception(UC32_EXCP_PRIV);
1973 } else {
1974 gen_exception(EXCP_DEBUG);
1975 }
1976 } else {
1977 /* While branches must always occur at the end of an IT block,
1978 there are a few other things that can cause us to terminate
1979 the TB in the middel of an IT block:
1980 - Exception generating instructions (bkpt, swi, undefined).
1981 - Page boundaries.
1982 - Hardware watchpoints.
1983 Hardware breakpoints have already been handled and skip this code.
1984 */
1985 switch (dc->is_jmp) {
1986 case DISAS_NEXT:
1987 gen_goto_tb(dc, 1, dc->pc);
1988 break;
1989 default:
1990 case DISAS_JUMP:
1991 case DISAS_UPDATE:
1992 /* indicate that the hash table must be used to find the next TB */
1993 tcg_gen_exit_tb(0);
1994 break;
1995 case DISAS_TB_JUMP:
1996 /* nothing more to generate */
1997 break;
1998 case DISAS_SYSCALL:
1999 gen_exception(UC32_EXCP_PRIV);
2000 break;
2001 }
2002 if (dc->condjmp) {
2003 gen_set_label(dc->condlabel);
2004 gen_goto_tb(dc, 1, dc->pc);
2005 dc->condjmp = 0;
2006 }
2007 }
2008
2009 done_generating:
2010 gen_icount_end(tb, num_insns);
2011 *gen_opc_ptr = INDEX_op_end;
2012
2013 #ifdef DEBUG_DISAS
2014 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2015 qemu_log("----------------\n");
2016 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2017 log_target_disas(pc_start, dc->pc - pc_start, 0);
2018 qemu_log("\n");
2019 }
2020 #endif
2021 if (search_pc) {
2022 j = gen_opc_ptr - gen_opc_buf;
2023 lj++;
2024 while (lj <= j) {
2025 gen_opc_instr_start[lj++] = 0;
2026 }
2027 } else {
2028 tb->size = dc->pc - pc_start;
2029 tb->icount = num_insns;
2030 }
2031 }
2032
2033 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2034 {
2035 gen_intermediate_code_internal(env, tb, 0);
2036 }
2037
2038 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2039 {
2040 gen_intermediate_code_internal(env, tb, 1);
2041 }
2042
2043 static const char *cpu_mode_names[16] = {
2044 "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
2045 "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
2046 };
2047
2048 #define UCF64_DUMP_STATE
2049 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
2050 int flags)
2051 {
2052 int i;
2053 #ifdef UCF64_DUMP_STATE
2054 union {
2055 uint32_t i;
2056 float s;
2057 } s0, s1;
2058 CPU_DoubleU d;
2059 /* ??? This assumes float64 and double have the same layout.
2060 Oh well, it's only debug dumps. */
2061 union {
2062 float64 f64;
2063 double d;
2064 } d0;
2065 #endif
2066 uint32_t psr;
2067
2068 for (i = 0; i < 32; i++) {
2069 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2070 if ((i % 4) == 3) {
2071 cpu_fprintf(f, "\n");
2072 } else {
2073 cpu_fprintf(f, " ");
2074 }
2075 }
2076 psr = cpu_asr_read(env);
2077 cpu_fprintf(f, "PSR=%08x %c%c%c%c %s\n",
2078 psr,
2079 psr & (1 << 31) ? 'N' : '-',
2080 psr & (1 << 30) ? 'Z' : '-',
2081 psr & (1 << 29) ? 'C' : '-',
2082 psr & (1 << 28) ? 'V' : '-',
2083 cpu_mode_names[psr & 0xf]);
2084
2085 #ifdef UCF64_DUMP_STATE
2086 for (i = 0; i < 16; i++) {
2087 d.d = env->ucf64.regs[i];
2088 s0.i = d.l.lower;
2089 s1.i = d.l.upper;
2090 d0.f64 = d.d;
2091 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%" PRIx64 "(%8g)\n",
2092 i * 2, (int)s0.i, s0.s,
2093 i * 2 + 1, (int)s1.i, s1.s,
2094 i, (uint64_t)d0.f64, d0.d);
2095 }
2096 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]);
2097 #endif
2098 }
2099
2100 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
2101 {
2102 env->regs[31] = gen_opc_pc[pc_pos];
2103 }