target/microblaze: Convert dec_add to decodetree
[qemu.git] / target / microblaze / translate.c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "qemu/qemu-print.h"
32
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35
36 #define EXTRACT_FIELD(src, start, end) \
37 (((src) >> start) & ((1 << (end - start + 1)) - 1))
38
39 /* is_jmp field values */
40 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
41 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
42
43 static TCGv_i32 cpu_R[32];
44 static TCGv_i32 cpu_pc;
45 static TCGv_i32 cpu_msr;
46 static TCGv_i32 cpu_msr_c;
47 static TCGv_i32 cpu_imm;
48 static TCGv_i32 cpu_btaken;
49 static TCGv_i32 cpu_btarget;
50 static TCGv_i32 cpu_iflags;
51 static TCGv cpu_res_addr;
52 static TCGv_i32 cpu_res_val;
53
54 #include "exec/gen-icount.h"
55
56 /* This is the state at translation time. */
57 typedef struct DisasContext {
58 DisasContextBase base;
59 MicroBlazeCPU *cpu;
60
61 TCGv_i32 r0;
62 bool r0_set;
63
64 /* Decoder. */
65 int type_b;
66 uint32_t ir;
67 uint32_t ext_imm;
68 uint8_t opcode;
69 uint8_t rd, ra, rb;
70 uint16_t imm;
71
72 unsigned int cpustate_changed;
73 unsigned int delayed_branch;
74 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
75 unsigned int clear_imm;
76
77 #define JMP_NOJMP 0
78 #define JMP_DIRECT 1
79 #define JMP_DIRECT_CC 2
80 #define JMP_INDIRECT 3
81 unsigned int jmp;
82 uint32_t jmp_pc;
83
84 int abort_at_next_insn;
85 } DisasContext;
86
87 static int typeb_imm(DisasContext *dc, int x)
88 {
89 if (dc->tb_flags & IMM_FLAG) {
90 return deposit32(dc->ext_imm, 0, 16, x);
91 }
92 return x;
93 }
94
95 /* Include the auto-generated decoder. */
96 #include "decode-insns.c.inc"
97
98 static inline void t_sync_flags(DisasContext *dc)
99 {
100 /* Synch the tb dependent flags between translator and runtime. */
101 if (dc->tb_flags != dc->synced_flags) {
102 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags);
103 dc->synced_flags = dc->tb_flags;
104 }
105 }
106
107 static void gen_raise_exception(DisasContext *dc, uint32_t index)
108 {
109 TCGv_i32 tmp = tcg_const_i32(index);
110
111 gen_helper_raise_exception(cpu_env, tmp);
112 tcg_temp_free_i32(tmp);
113 dc->base.is_jmp = DISAS_NORETURN;
114 }
115
116 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
117 {
118 t_sync_flags(dc);
119 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
120 gen_raise_exception(dc, index);
121 }
122
123 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
124 {
125 TCGv_i32 tmp = tcg_const_i32(esr_ec);
126 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
127 tcg_temp_free_i32(tmp);
128
129 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
130 }
131
132 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
133 {
134 #ifndef CONFIG_USER_ONLY
135 return (dc->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
136 #else
137 return true;
138 #endif
139 }
140
141 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
142 {
143 if (dc->base.singlestep_enabled) {
144 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
145 tcg_gen_movi_i32(cpu_pc, dest);
146 gen_helper_raise_exception(cpu_env, tmp);
147 tcg_temp_free_i32(tmp);
148 } else if (use_goto_tb(dc, dest)) {
149 tcg_gen_goto_tb(n);
150 tcg_gen_movi_i32(cpu_pc, dest);
151 tcg_gen_exit_tb(dc->base.tb, n);
152 } else {
153 tcg_gen_movi_i32(cpu_pc, dest);
154 tcg_gen_exit_tb(NULL, 0);
155 }
156 dc->base.is_jmp = DISAS_NORETURN;
157 }
158
159 /*
160 * Returns true if the insn an illegal operation.
161 * If exceptions are enabled, an exception is raised.
162 */
163 static bool trap_illegal(DisasContext *dc, bool cond)
164 {
165 if (cond && (dc->tb_flags & MSR_EE_FLAG)
166 && dc->cpu->cfg.illegal_opcode_exception) {
167 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
168 }
169 return cond;
170 }
171
172 /*
173 * Returns true if the insn is illegal in userspace.
174 * If exceptions are enabled, an exception is raised.
175 */
176 static bool trap_userspace(DisasContext *dc, bool cond)
177 {
178 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
179 bool cond_user = cond && mem_index == MMU_USER_IDX;
180
181 if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
182 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
183 }
184 return cond_user;
185 }
186
187 static int32_t dec_alu_typeb_imm(DisasContext *dc)
188 {
189 tcg_debug_assert(dc->type_b);
190 return typeb_imm(dc, (int16_t)dc->imm);
191 }
192
193 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
194 {
195 if (dc->type_b) {
196 tcg_gen_movi_i32(cpu_imm, dec_alu_typeb_imm(dc));
197 return &cpu_imm;
198 }
199 return &cpu_R[dc->rb];
200 }
201
202 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
203 {
204 if (likely(reg != 0)) {
205 return cpu_R[reg];
206 }
207 if (!dc->r0_set) {
208 if (dc->r0 == NULL) {
209 dc->r0 = tcg_temp_new_i32();
210 }
211 tcg_gen_movi_i32(dc->r0, 0);
212 dc->r0_set = true;
213 }
214 return dc->r0;
215 }
216
217 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
218 {
219 if (likely(reg != 0)) {
220 return cpu_R[reg];
221 }
222 if (dc->r0 == NULL) {
223 dc->r0 = tcg_temp_new_i32();
224 }
225 return dc->r0;
226 }
227
228 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
229 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
230 {
231 TCGv_i32 rd, ra, rb;
232
233 if (arg->rd == 0 && !side_effects) {
234 return true;
235 }
236
237 rd = reg_for_write(dc, arg->rd);
238 ra = reg_for_read(dc, arg->ra);
239 rb = reg_for_read(dc, arg->rb);
240 fn(rd, ra, rb);
241 return true;
242 }
243
244 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
245 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
246 {
247 TCGv_i32 rd, ra;
248
249 if (arg->rd == 0 && !side_effects) {
250 return true;
251 }
252
253 rd = reg_for_write(dc, arg->rd);
254 ra = reg_for_read(dc, arg->ra);
255 fni(rd, ra, arg->imm);
256 return true;
257 }
258
259 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
260 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
261 {
262 TCGv_i32 rd, ra, imm;
263
264 if (arg->rd == 0 && !side_effects) {
265 return true;
266 }
267
268 rd = reg_for_write(dc, arg->rd);
269 ra = reg_for_read(dc, arg->ra);
270 imm = tcg_const_i32(arg->imm);
271
272 fn(rd, ra, imm);
273
274 tcg_temp_free_i32(imm);
275 return true;
276 }
277
278 #define DO_TYPEA(NAME, SE, FN) \
279 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
280 { return do_typea(dc, a, SE, FN); }
281
282 #define DO_TYPEBI(NAME, SE, FNI) \
283 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
284 { return do_typeb_imm(dc, a, SE, FNI); }
285
286 #define DO_TYPEBV(NAME, SE, FN) \
287 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
288 { return do_typeb_val(dc, a, SE, FN); }
289
290 /* No input carry, but output carry. */
291 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
292 {
293 TCGv_i32 zero = tcg_const_i32(0);
294
295 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
296
297 tcg_temp_free_i32(zero);
298 }
299
300 /* Input and output carry. */
301 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
302 {
303 TCGv_i32 zero = tcg_const_i32(0);
304 TCGv_i32 tmp = tcg_temp_new_i32();
305
306 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
307 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
308
309 tcg_temp_free_i32(tmp);
310 tcg_temp_free_i32(zero);
311 }
312
313 /* Input carry, but no output carry. */
314 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
315 {
316 tcg_gen_add_i32(out, ina, inb);
317 tcg_gen_add_i32(out, out, cpu_msr_c);
318 }
319
320 DO_TYPEA(add, true, gen_add)
321 DO_TYPEA(addc, true, gen_addc)
322 DO_TYPEA(addk, false, tcg_gen_add_i32)
323 DO_TYPEA(addkc, true, gen_addkc)
324
325 DO_TYPEBV(addi, true, gen_add)
326 DO_TYPEBV(addic, true, gen_addc)
327 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
328 DO_TYPEBV(addikc, true, gen_addkc)
329
330 static bool trans_zero(DisasContext *dc, arg_zero *arg)
331 {
332 /* If opcode_0_illegal, trap. */
333 if (dc->cpu->cfg.opcode_0_illegal) {
334 trap_illegal(dc, true);
335 return true;
336 }
337 /*
338 * Otherwise, this is "add r0, r0, r0".
339 * Continue to trans_add so that MSR[C] gets cleared.
340 */
341 return false;
342 }
343
344 static void dec_sub(DisasContext *dc)
345 {
346 unsigned int u, cmp, k, c;
347 TCGv_i32 cf, na;
348
349 u = dc->imm & 2;
350 k = dc->opcode & 4;
351 c = dc->opcode & 2;
352 cmp = (dc->imm & 1) && (!dc->type_b) && k;
353
354 if (cmp) {
355 if (dc->rd) {
356 if (u)
357 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
358 else
359 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
360 }
361 return;
362 }
363
364 /* Take care of the easy cases first. */
365 if (k) {
366 /* k - keep carry, no need to update MSR. */
367 /* If rd == r0, it's a nop. */
368 if (dc->rd) {
369 tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
370
371 if (c) {
372 /* c - Add carry into the result. */
373 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_msr_c);
374 }
375 }
376 return;
377 }
378
379 /* From now on, we can assume k is zero. So we need to update MSR. */
380 /* Extract carry. And complement a into na. */
381 cf = tcg_temp_new_i32();
382 na = tcg_temp_new_i32();
383 if (c) {
384 tcg_gen_mov_i32(cf, cpu_msr_c);
385 } else {
386 tcg_gen_movi_i32(cf, 1);
387 }
388
389 /* d = b + ~a + c. carry defaults to 1. */
390 tcg_gen_not_i32(na, cpu_R[dc->ra]);
391
392 gen_helper_carry(cpu_msr_c, na, *(dec_alu_op_b(dc)), cf);
393 if (dc->rd) {
394 tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
395 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
396 }
397 tcg_temp_free_i32(cf);
398 tcg_temp_free_i32(na);
399 }
400
401 static void dec_pattern(DisasContext *dc)
402 {
403 unsigned int mode;
404
405 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
406 return;
407 }
408
409 mode = dc->opcode & 3;
410 switch (mode) {
411 case 0:
412 /* pcmpbf. */
413 if (dc->rd)
414 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
415 break;
416 case 2:
417 if (dc->rd) {
418 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
419 cpu_R[dc->ra], cpu_R[dc->rb]);
420 }
421 break;
422 case 3:
423 if (dc->rd) {
424 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
425 cpu_R[dc->ra], cpu_R[dc->rb]);
426 }
427 break;
428 default:
429 cpu_abort(CPU(dc->cpu),
430 "unsupported pattern insn opcode=%x\n", dc->opcode);
431 break;
432 }
433 }
434
435 static void dec_and(DisasContext *dc)
436 {
437 unsigned int not;
438
439 if (!dc->type_b && (dc->imm & (1 << 10))) {
440 dec_pattern(dc);
441 return;
442 }
443
444 not = dc->opcode & (1 << 1);
445
446 if (!dc->rd)
447 return;
448
449 if (not) {
450 tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
451 } else
452 tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
453 }
454
455 static void dec_or(DisasContext *dc)
456 {
457 if (!dc->type_b && (dc->imm & (1 << 10))) {
458 dec_pattern(dc);
459 return;
460 }
461
462 if (dc->rd)
463 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
464 }
465
466 static void dec_xor(DisasContext *dc)
467 {
468 if (!dc->type_b && (dc->imm & (1 << 10))) {
469 dec_pattern(dc);
470 return;
471 }
472
473 if (dc->rd)
474 tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
475 }
476
477 static void msr_read(DisasContext *dc, TCGv_i32 d)
478 {
479 TCGv_i32 t;
480
481 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
482 t = tcg_temp_new_i32();
483 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
484 tcg_gen_or_i32(d, cpu_msr, t);
485 tcg_temp_free_i32(t);
486 }
487
488 static void msr_write(DisasContext *dc, TCGv_i32 v)
489 {
490 dc->cpustate_changed = 1;
491
492 /* Install MSR_C. */
493 tcg_gen_extract_i32(cpu_msr_c, v, 2, 1);
494
495 /* Clear MSR_C and MSR_CC; MSR_PVR is not writable, and is always clear. */
496 tcg_gen_andi_i32(cpu_msr, v, ~(MSR_C | MSR_CC | MSR_PVR));
497 }
498
499 static void dec_msr(DisasContext *dc)
500 {
501 CPUState *cs = CPU(dc->cpu);
502 TCGv_i32 t0, t1;
503 unsigned int sr, rn;
504 bool to, clrset, extended = false;
505
506 sr = extract32(dc->imm, 0, 14);
507 to = extract32(dc->imm, 14, 1);
508 clrset = extract32(dc->imm, 15, 1) == 0;
509 dc->type_b = 1;
510 if (to) {
511 dc->cpustate_changed = 1;
512 }
513
514 /* Extended MSRs are only available if addr_size > 32. */
515 if (dc->cpu->cfg.addr_size > 32) {
516 /* The E-bit is encoded differently for To/From MSR. */
517 static const unsigned int e_bit[] = { 19, 24 };
518
519 extended = extract32(dc->imm, e_bit[to], 1);
520 }
521
522 /* msrclr and msrset. */
523 if (clrset) {
524 bool clr = extract32(dc->ir, 16, 1);
525
526 if (!dc->cpu->cfg.use_msr_instr) {
527 /* nop??? */
528 return;
529 }
530
531 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
532 return;
533 }
534
535 if (dc->rd)
536 msr_read(dc, cpu_R[dc->rd]);
537
538 t0 = tcg_temp_new_i32();
539 t1 = tcg_temp_new_i32();
540 msr_read(dc, t0);
541 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
542
543 if (clr) {
544 tcg_gen_not_i32(t1, t1);
545 tcg_gen_and_i32(t0, t0, t1);
546 } else
547 tcg_gen_or_i32(t0, t0, t1);
548 msr_write(dc, t0);
549 tcg_temp_free_i32(t0);
550 tcg_temp_free_i32(t1);
551 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
552 dc->base.is_jmp = DISAS_UPDATE;
553 return;
554 }
555
556 if (trap_userspace(dc, to)) {
557 return;
558 }
559
560 #if !defined(CONFIG_USER_ONLY)
561 /* Catch read/writes to the mmu block. */
562 if ((sr & ~0xff) == 0x1000) {
563 TCGv_i32 tmp_ext = tcg_const_i32(extended);
564 TCGv_i32 tmp_sr;
565
566 sr &= 7;
567 tmp_sr = tcg_const_i32(sr);
568 if (to) {
569 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
570 } else {
571 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
572 }
573 tcg_temp_free_i32(tmp_sr);
574 tcg_temp_free_i32(tmp_ext);
575 return;
576 }
577 #endif
578
579 if (to) {
580 switch (sr) {
581 case SR_PC:
582 break;
583 case SR_MSR:
584 msr_write(dc, cpu_R[dc->ra]);
585 break;
586 case SR_EAR:
587 {
588 TCGv_i64 t64 = tcg_temp_new_i64();
589 tcg_gen_extu_i32_i64(t64, cpu_R[dc->ra]);
590 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUMBState, ear));
591 tcg_temp_free_i64(t64);
592 }
593 break;
594 case SR_ESR:
595 tcg_gen_st_i32(cpu_R[dc->ra],
596 cpu_env, offsetof(CPUMBState, esr));
597 break;
598 case SR_FSR:
599 tcg_gen_st_i32(cpu_R[dc->ra],
600 cpu_env, offsetof(CPUMBState, fsr));
601 break;
602 case SR_BTR:
603 tcg_gen_st_i32(cpu_R[dc->ra],
604 cpu_env, offsetof(CPUMBState, btr));
605 break;
606 case SR_EDR:
607 tcg_gen_st_i32(cpu_R[dc->ra],
608 cpu_env, offsetof(CPUMBState, edr));
609 break;
610 case 0x800:
611 tcg_gen_st_i32(cpu_R[dc->ra],
612 cpu_env, offsetof(CPUMBState, slr));
613 break;
614 case 0x802:
615 tcg_gen_st_i32(cpu_R[dc->ra],
616 cpu_env, offsetof(CPUMBState, shr));
617 break;
618 default:
619 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
620 break;
621 }
622 } else {
623 switch (sr) {
624 case SR_PC:
625 tcg_gen_movi_i32(cpu_R[dc->rd], dc->base.pc_next);
626 break;
627 case SR_MSR:
628 msr_read(dc, cpu_R[dc->rd]);
629 break;
630 case SR_EAR:
631 {
632 TCGv_i64 t64 = tcg_temp_new_i64();
633 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
634 if (extended) {
635 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], t64);
636 } else {
637 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], t64);
638 }
639 tcg_temp_free_i64(t64);
640 }
641 break;
642 case SR_ESR:
643 tcg_gen_ld_i32(cpu_R[dc->rd],
644 cpu_env, offsetof(CPUMBState, esr));
645 break;
646 case SR_FSR:
647 tcg_gen_ld_i32(cpu_R[dc->rd],
648 cpu_env, offsetof(CPUMBState, fsr));
649 break;
650 case SR_BTR:
651 tcg_gen_ld_i32(cpu_R[dc->rd],
652 cpu_env, offsetof(CPUMBState, btr));
653 break;
654 case SR_EDR:
655 tcg_gen_ld_i32(cpu_R[dc->rd],
656 cpu_env, offsetof(CPUMBState, edr));
657 break;
658 case 0x800:
659 tcg_gen_ld_i32(cpu_R[dc->rd],
660 cpu_env, offsetof(CPUMBState, slr));
661 break;
662 case 0x802:
663 tcg_gen_ld_i32(cpu_R[dc->rd],
664 cpu_env, offsetof(CPUMBState, shr));
665 break;
666 case 0x2000 ... 0x200c:
667 rn = sr & 0xf;
668 tcg_gen_ld_i32(cpu_R[dc->rd],
669 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
670 break;
671 default:
672 cpu_abort(cs, "unknown mfs reg %x\n", sr);
673 break;
674 }
675 }
676
677 if (dc->rd == 0) {
678 tcg_gen_movi_i32(cpu_R[0], 0);
679 }
680 }
681
682 /* Multiplier unit. */
683 static void dec_mul(DisasContext *dc)
684 {
685 TCGv_i32 tmp;
686 unsigned int subcode;
687
688 if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
689 return;
690 }
691
692 subcode = dc->imm & 3;
693
694 if (dc->type_b) {
695 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
696 return;
697 }
698
699 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
700 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
701 /* nop??? */
702 }
703
704 tmp = tcg_temp_new_i32();
705 switch (subcode) {
706 case 0:
707 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
708 break;
709 case 1:
710 tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
711 cpu_R[dc->ra], cpu_R[dc->rb]);
712 break;
713 case 2:
714 tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
715 cpu_R[dc->ra], cpu_R[dc->rb]);
716 break;
717 case 3:
718 tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
719 break;
720 default:
721 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
722 break;
723 }
724 tcg_temp_free_i32(tmp);
725 }
726
727 /* Div unit. */
728 static void dec_div(DisasContext *dc)
729 {
730 unsigned int u;
731
732 u = dc->imm & 2;
733
734 if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
735 return;
736 }
737
738 if (u)
739 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
740 cpu_R[dc->ra]);
741 else
742 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
743 cpu_R[dc->ra]);
744 if (!dc->rd)
745 tcg_gen_movi_i32(cpu_R[dc->rd], 0);
746 }
747
748 static void dec_barrel(DisasContext *dc)
749 {
750 TCGv_i32 t0;
751 unsigned int imm_w, imm_s;
752 bool s, t, e = false, i = false;
753
754 if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
755 return;
756 }
757
758 if (dc->type_b) {
759 /* Insert and extract are only available in immediate mode. */
760 i = extract32(dc->imm, 15, 1);
761 e = extract32(dc->imm, 14, 1);
762 }
763 s = extract32(dc->imm, 10, 1);
764 t = extract32(dc->imm, 9, 1);
765 imm_w = extract32(dc->imm, 6, 5);
766 imm_s = extract32(dc->imm, 0, 5);
767
768 if (e) {
769 if (imm_w + imm_s > 32 || imm_w == 0) {
770 /* These inputs have an undefined behavior. */
771 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
772 imm_w, imm_s);
773 } else {
774 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
775 }
776 } else if (i) {
777 int width = imm_w - imm_s + 1;
778
779 if (imm_w < imm_s) {
780 /* These inputs have an undefined behavior. */
781 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
782 imm_w, imm_s);
783 } else {
784 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
785 imm_s, width);
786 }
787 } else {
788 t0 = tcg_temp_new_i32();
789
790 tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
791 tcg_gen_andi_i32(t0, t0, 31);
792
793 if (s) {
794 tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
795 } else {
796 if (t) {
797 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
798 } else {
799 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
800 }
801 }
802 tcg_temp_free_i32(t0);
803 }
804 }
805
806 static void dec_bit(DisasContext *dc)
807 {
808 CPUState *cs = CPU(dc->cpu);
809 TCGv_i32 t0;
810 unsigned int op;
811
812 op = dc->ir & ((1 << 9) - 1);
813 switch (op) {
814 case 0x21:
815 /* src. */
816 t0 = tcg_temp_new_i32();
817
818 tcg_gen_shli_i32(t0, cpu_msr_c, 31);
819 tcg_gen_andi_i32(cpu_msr_c, cpu_R[dc->ra], 1);
820 if (dc->rd) {
821 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
822 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
823 }
824 tcg_temp_free_i32(t0);
825 break;
826
827 case 0x1:
828 case 0x41:
829 /* srl. */
830 tcg_gen_andi_i32(cpu_msr_c, cpu_R[dc->ra], 1);
831 if (dc->rd) {
832 if (op == 0x41)
833 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
834 else
835 tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
836 }
837 break;
838 case 0x60:
839 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
840 break;
841 case 0x61:
842 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
843 break;
844 case 0x64:
845 case 0x66:
846 case 0x74:
847 case 0x76:
848 /* wdc. */
849 trap_userspace(dc, true);
850 break;
851 case 0x68:
852 /* wic. */
853 trap_userspace(dc, true);
854 break;
855 case 0xe0:
856 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
857 return;
858 }
859 if (dc->cpu->cfg.use_pcmp_instr) {
860 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
861 }
862 break;
863 case 0x1e0:
864 /* swapb */
865 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
866 break;
867 case 0x1e2:
868 /*swaph */
869 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
870 break;
871 default:
872 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
873 (uint32_t)dc->base.pc_next, op, dc->rd, dc->ra, dc->rb);
874 break;
875 }
876 }
877
878 static inline void sync_jmpstate(DisasContext *dc)
879 {
880 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
881 if (dc->jmp == JMP_DIRECT) {
882 tcg_gen_movi_i32(cpu_btaken, 1);
883 }
884 dc->jmp = JMP_INDIRECT;
885 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
886 }
887 }
888
889 static void dec_imm(DisasContext *dc)
890 {
891 dc->ext_imm = dc->imm << 16;
892 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
893 dc->tb_flags |= IMM_FLAG;
894 dc->clear_imm = 0;
895 }
896
897 static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
898 {
899 /* Should be set to true if r1 is used by loadstores. */
900 bool stackprot = false;
901 TCGv_i32 t32;
902
903 /* All load/stores use ra. */
904 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
905 stackprot = true;
906 }
907
908 /* Treat the common cases first. */
909 if (!dc->type_b) {
910 if (ea) {
911 int addr_size = dc->cpu->cfg.addr_size;
912
913 if (addr_size == 32) {
914 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
915 return;
916 }
917
918 tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
919 if (addr_size < 64) {
920 /* Mask off out of range bits. */
921 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
922 }
923 return;
924 }
925
926 /* If any of the regs is r0, set t to the value of the other reg. */
927 if (dc->ra == 0) {
928 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
929 return;
930 } else if (dc->rb == 0) {
931 tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
932 return;
933 }
934
935 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
936 stackprot = true;
937 }
938
939 t32 = tcg_temp_new_i32();
940 tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
941 tcg_gen_extu_i32_tl(t, t32);
942 tcg_temp_free_i32(t32);
943
944 if (stackprot) {
945 gen_helper_stackprot(cpu_env, t);
946 }
947 return;
948 }
949 /* Immediate. */
950 t32 = tcg_temp_new_i32();
951 tcg_gen_addi_i32(t32, cpu_R[dc->ra], dec_alu_typeb_imm(dc));
952 tcg_gen_extu_i32_tl(t, t32);
953 tcg_temp_free_i32(t32);
954
955 if (stackprot) {
956 gen_helper_stackprot(cpu_env, t);
957 }
958 return;
959 }
960
961 static void dec_load(DisasContext *dc)
962 {
963 TCGv_i32 v;
964 TCGv addr;
965 unsigned int size;
966 bool rev = false, ex = false, ea = false;
967 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
968 MemOp mop;
969
970 mop = dc->opcode & 3;
971 size = 1 << mop;
972 if (!dc->type_b) {
973 ea = extract32(dc->ir, 7, 1);
974 rev = extract32(dc->ir, 9, 1);
975 ex = extract32(dc->ir, 10, 1);
976 }
977 mop |= MO_TE;
978 if (rev) {
979 mop ^= MO_BSWAP;
980 }
981
982 if (trap_illegal(dc, size > 4)) {
983 return;
984 }
985
986 if (trap_userspace(dc, ea)) {
987 return;
988 }
989
990 t_sync_flags(dc);
991 addr = tcg_temp_new();
992 compute_ldst_addr(dc, ea, addr);
993 /* Extended addressing bypasses the MMU. */
994 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
995
996 /*
997 * When doing reverse accesses we need to do two things.
998 *
999 * 1. Reverse the address wrt endianness.
1000 * 2. Byteswap the data lanes on the way back into the CPU core.
1001 */
1002 if (rev && size != 4) {
1003 /* Endian reverse the address. t is addr. */
1004 switch (size) {
1005 case 1:
1006 {
1007 tcg_gen_xori_tl(addr, addr, 3);
1008 break;
1009 }
1010
1011 case 2:
1012 /* 00 -> 10
1013 10 -> 00. */
1014 tcg_gen_xori_tl(addr, addr, 2);
1015 break;
1016 default:
1017 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1018 break;
1019 }
1020 }
1021
1022 /* lwx does not throw unaligned access errors, so force alignment */
1023 if (ex) {
1024 tcg_gen_andi_tl(addr, addr, ~3);
1025 }
1026
1027 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1028 sync_jmpstate(dc);
1029
1030 /* Verify alignment if needed. */
1031 /*
1032 * Microblaze gives MMU faults priority over faults due to
1033 * unaligned addresses. That's why we speculatively do the load
1034 * into v. If the load succeeds, we verify alignment of the
1035 * address and if that succeeds we write into the destination reg.
1036 */
1037 v = tcg_temp_new_i32();
1038 tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
1039
1040 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1041 TCGv_i32 t0 = tcg_const_i32(0);
1042 TCGv_i32 treg = tcg_const_i32(dc->rd);
1043 TCGv_i32 tsize = tcg_const_i32(size - 1);
1044
1045 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1046 gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
1047
1048 tcg_temp_free_i32(t0);
1049 tcg_temp_free_i32(treg);
1050 tcg_temp_free_i32(tsize);
1051 }
1052
1053 if (ex) {
1054 tcg_gen_mov_tl(cpu_res_addr, addr);
1055 tcg_gen_mov_i32(cpu_res_val, v);
1056 }
1057 if (dc->rd) {
1058 tcg_gen_mov_i32(cpu_R[dc->rd], v);
1059 }
1060 tcg_temp_free_i32(v);
1061
1062 if (ex) { /* lwx */
1063 /* no support for AXI exclusive so always clear C */
1064 tcg_gen_movi_i32(cpu_msr_c, 0);
1065 }
1066
1067 tcg_temp_free(addr);
1068 }
1069
1070 static void dec_store(DisasContext *dc)
1071 {
1072 TCGv addr;
1073 TCGLabel *swx_skip = NULL;
1074 unsigned int size;
1075 bool rev = false, ex = false, ea = false;
1076 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1077 MemOp mop;
1078
1079 mop = dc->opcode & 3;
1080 size = 1 << mop;
1081 if (!dc->type_b) {
1082 ea = extract32(dc->ir, 7, 1);
1083 rev = extract32(dc->ir, 9, 1);
1084 ex = extract32(dc->ir, 10, 1);
1085 }
1086 mop |= MO_TE;
1087 if (rev) {
1088 mop ^= MO_BSWAP;
1089 }
1090
1091 if (trap_illegal(dc, size > 4)) {
1092 return;
1093 }
1094
1095 trap_userspace(dc, ea);
1096
1097 t_sync_flags(dc);
1098 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1099 sync_jmpstate(dc);
1100 /* SWX needs a temp_local. */
1101 addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1102 compute_ldst_addr(dc, ea, addr);
1103 /* Extended addressing bypasses the MMU. */
1104 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
1105
1106 if (ex) { /* swx */
1107 TCGv_i32 tval;
1108
1109 /* swx does not throw unaligned access errors, so force alignment */
1110 tcg_gen_andi_tl(addr, addr, ~3);
1111
1112 tcg_gen_movi_i32(cpu_msr_c, 1);
1113 swx_skip = gen_new_label();
1114 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_skip);
1115
1116 /*
1117 * Compare the value loaded at lwx with current contents of
1118 * the reserved location.
1119 */
1120 tval = tcg_temp_new_i32();
1121
1122 tcg_gen_atomic_cmpxchg_i32(tval, addr, cpu_res_val,
1123 cpu_R[dc->rd], mem_index,
1124 mop);
1125
1126 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_skip);
1127 tcg_gen_movi_i32(cpu_msr_c, 0);
1128 tcg_temp_free_i32(tval);
1129 }
1130
1131 if (rev && size != 4) {
1132 /* Endian reverse the address. t is addr. */
1133 switch (size) {
1134 case 1:
1135 {
1136 tcg_gen_xori_tl(addr, addr, 3);
1137 break;
1138 }
1139
1140 case 2:
1141 /* 00 -> 10
1142 10 -> 00. */
1143 /* Force addr into the temp. */
1144 tcg_gen_xori_tl(addr, addr, 2);
1145 break;
1146 default:
1147 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1148 break;
1149 }
1150 }
1151
1152 if (!ex) {
1153 tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1154 }
1155
1156 /* Verify alignment if needed. */
1157 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1158 TCGv_i32 t1 = tcg_const_i32(1);
1159 TCGv_i32 treg = tcg_const_i32(dc->rd);
1160 TCGv_i32 tsize = tcg_const_i32(size - 1);
1161
1162 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1163 /* FIXME: if the alignment is wrong, we should restore the value
1164 * in memory. One possible way to achieve this is to probe
1165 * the MMU prior to the memaccess, thay way we could put
1166 * the alignment checks in between the probe and the mem
1167 * access.
1168 */
1169 gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
1170
1171 tcg_temp_free_i32(t1);
1172 tcg_temp_free_i32(treg);
1173 tcg_temp_free_i32(tsize);
1174 }
1175
1176 if (ex) {
1177 gen_set_label(swx_skip);
1178 }
1179
1180 tcg_temp_free(addr);
1181 }
1182
1183 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1184 TCGv_i32 d, TCGv_i32 a)
1185 {
1186 static const int mb_to_tcg_cc[] = {
1187 [CC_EQ] = TCG_COND_EQ,
1188 [CC_NE] = TCG_COND_NE,
1189 [CC_LT] = TCG_COND_LT,
1190 [CC_LE] = TCG_COND_LE,
1191 [CC_GE] = TCG_COND_GE,
1192 [CC_GT] = TCG_COND_GT,
1193 };
1194
1195 switch (cc) {
1196 case CC_EQ:
1197 case CC_NE:
1198 case CC_LT:
1199 case CC_LE:
1200 case CC_GE:
1201 case CC_GT:
1202 tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
1203 break;
1204 default:
1205 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1206 break;
1207 }
1208 }
1209
1210 static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
1211 {
1212 TCGv_i32 zero = tcg_const_i32(0);
1213
1214 tcg_gen_movcond_i32(TCG_COND_NE, cpu_pc,
1215 cpu_btaken, zero,
1216 pc_true, pc_false);
1217
1218 tcg_temp_free_i32(zero);
1219 }
1220
1221 static void dec_setup_dslot(DisasContext *dc)
1222 {
1223 TCGv_i32 tmp = tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG));
1224
1225 dc->delayed_branch = 2;
1226 dc->tb_flags |= D_FLAG;
1227
1228 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, bimm));
1229 tcg_temp_free_i32(tmp);
1230 }
1231
1232 static void dec_bcc(DisasContext *dc)
1233 {
1234 unsigned int cc;
1235 unsigned int dslot;
1236
1237 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1238 dslot = dc->ir & (1 << 25);
1239
1240 dc->delayed_branch = 1;
1241 if (dslot) {
1242 dec_setup_dslot(dc);
1243 }
1244
1245 if (dc->type_b) {
1246 dc->jmp = JMP_DIRECT_CC;
1247 dc->jmp_pc = dc->base.pc_next + dec_alu_typeb_imm(dc);
1248 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
1249 } else {
1250 dc->jmp = JMP_INDIRECT;
1251 tcg_gen_addi_i32(cpu_btarget, cpu_R[dc->rb], dc->base.pc_next);
1252 }
1253 eval_cc(dc, cc, cpu_btaken, cpu_R[dc->ra]);
1254 }
1255
1256 static void dec_br(DisasContext *dc)
1257 {
1258 unsigned int dslot, link, abs, mbar;
1259
1260 dslot = dc->ir & (1 << 20);
1261 abs = dc->ir & (1 << 19);
1262 link = dc->ir & (1 << 18);
1263
1264 /* Memory barrier. */
1265 mbar = (dc->ir >> 16) & 31;
1266 if (mbar == 2 && dc->imm == 4) {
1267 uint16_t mbar_imm = dc->rd;
1268
1269 /* Data access memory barrier. */
1270 if ((mbar_imm & 2) == 0) {
1271 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1272 }
1273
1274 /* mbar IMM & 16 decodes to sleep. */
1275 if (mbar_imm & 16) {
1276 TCGv_i32 tmp_1;
1277
1278 if (trap_userspace(dc, true)) {
1279 /* Sleep is a privileged instruction. */
1280 return;
1281 }
1282
1283 t_sync_flags(dc);
1284
1285 tmp_1 = tcg_const_i32(1);
1286 tcg_gen_st_i32(tmp_1, cpu_env,
1287 -offsetof(MicroBlazeCPU, env)
1288 +offsetof(CPUState, halted));
1289 tcg_temp_free_i32(tmp_1);
1290
1291 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1292
1293 gen_raise_exception(dc, EXCP_HLT);
1294 return;
1295 }
1296 /* Break the TB. */
1297 dc->cpustate_changed = 1;
1298 return;
1299 }
1300
1301 if (abs && link && !dslot) {
1302 if (dc->type_b) {
1303 /* BRKI */
1304 uint32_t imm = dec_alu_typeb_imm(dc);
1305 if (trap_userspace(dc, imm != 8 && imm != 0x18)) {
1306 return;
1307 }
1308 } else {
1309 /* BRK */
1310 if (trap_userspace(dc, true)) {
1311 return;
1312 }
1313 }
1314 }
1315
1316 dc->delayed_branch = 1;
1317 if (dslot) {
1318 dec_setup_dslot(dc);
1319 }
1320 if (link && dc->rd) {
1321 tcg_gen_movi_i32(cpu_R[dc->rd], dc->base.pc_next);
1322 }
1323
1324 if (abs) {
1325 if (dc->type_b) {
1326 uint32_t dest = dec_alu_typeb_imm(dc);
1327
1328 dc->jmp = JMP_DIRECT;
1329 dc->jmp_pc = dest;
1330 tcg_gen_movi_i32(cpu_btarget, dest);
1331 if (link && !dslot) {
1332 switch (dest) {
1333 case 8:
1334 case 0x18:
1335 gen_raise_exception_sync(dc, EXCP_BREAK);
1336 break;
1337 case 0:
1338 gen_raise_exception_sync(dc, EXCP_DEBUG);
1339 break;
1340 }
1341 }
1342 } else {
1343 dc->jmp = JMP_INDIRECT;
1344 tcg_gen_mov_i32(cpu_btarget, cpu_R[dc->rb]);
1345 if (link && !dslot) {
1346 gen_raise_exception_sync(dc, EXCP_BREAK);
1347 }
1348 }
1349 } else if (dc->type_b) {
1350 dc->jmp = JMP_DIRECT;
1351 dc->jmp_pc = dc->base.pc_next + dec_alu_typeb_imm(dc);
1352 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
1353 } else {
1354 dc->jmp = JMP_INDIRECT;
1355 tcg_gen_addi_i32(cpu_btarget, cpu_R[dc->rb], dc->base.pc_next);
1356 }
1357 tcg_gen_movi_i32(cpu_btaken, 1);
1358 }
1359
1360 static inline void do_rti(DisasContext *dc)
1361 {
1362 TCGv_i32 t0, t1;
1363 t0 = tcg_temp_new_i32();
1364 t1 = tcg_temp_new_i32();
1365 tcg_gen_mov_i32(t1, cpu_msr);
1366 tcg_gen_shri_i32(t0, t1, 1);
1367 tcg_gen_ori_i32(t1, t1, MSR_IE);
1368 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1369
1370 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1371 tcg_gen_or_i32(t1, t1, t0);
1372 msr_write(dc, t1);
1373 tcg_temp_free_i32(t1);
1374 tcg_temp_free_i32(t0);
1375 dc->tb_flags &= ~DRTI_FLAG;
1376 }
1377
1378 static inline void do_rtb(DisasContext *dc)
1379 {
1380 TCGv_i32 t0, t1;
1381 t0 = tcg_temp_new_i32();
1382 t1 = tcg_temp_new_i32();
1383 tcg_gen_mov_i32(t1, cpu_msr);
1384 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
1385 tcg_gen_shri_i32(t0, t1, 1);
1386 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1387
1388 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1389 tcg_gen_or_i32(t1, t1, t0);
1390 msr_write(dc, t1);
1391 tcg_temp_free_i32(t1);
1392 tcg_temp_free_i32(t0);
1393 dc->tb_flags &= ~DRTB_FLAG;
1394 }
1395
1396 static inline void do_rte(DisasContext *dc)
1397 {
1398 TCGv_i32 t0, t1;
1399 t0 = tcg_temp_new_i32();
1400 t1 = tcg_temp_new_i32();
1401
1402 tcg_gen_mov_i32(t1, cpu_msr);
1403 tcg_gen_ori_i32(t1, t1, MSR_EE);
1404 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1405 tcg_gen_shri_i32(t0, t1, 1);
1406 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1407
1408 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1409 tcg_gen_or_i32(t1, t1, t0);
1410 msr_write(dc, t1);
1411 tcg_temp_free_i32(t1);
1412 tcg_temp_free_i32(t0);
1413 dc->tb_flags &= ~DRTE_FLAG;
1414 }
1415
1416 static void dec_rts(DisasContext *dc)
1417 {
1418 unsigned int b_bit, i_bit, e_bit;
1419
1420 i_bit = dc->ir & (1 << 21);
1421 b_bit = dc->ir & (1 << 22);
1422 e_bit = dc->ir & (1 << 23);
1423
1424 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1425 return;
1426 }
1427
1428 dec_setup_dslot(dc);
1429
1430 if (i_bit) {
1431 dc->tb_flags |= DRTI_FLAG;
1432 } else if (b_bit) {
1433 dc->tb_flags |= DRTB_FLAG;
1434 } else if (e_bit) {
1435 dc->tb_flags |= DRTE_FLAG;
1436 }
1437
1438 dc->jmp = JMP_INDIRECT;
1439 tcg_gen_movi_i32(cpu_btaken, 1);
1440 tcg_gen_add_i32(cpu_btarget, cpu_R[dc->ra], *dec_alu_op_b(dc));
1441 }
1442
1443 static int dec_check_fpuv2(DisasContext *dc)
1444 {
1445 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1446 gen_raise_hw_excp(dc, ESR_EC_FPU);
1447 }
1448 return (dc->cpu->cfg.use_fpu == 2) ? PVR2_USE_FPU2_MASK : 0;
1449 }
1450
1451 static void dec_fpu(DisasContext *dc)
1452 {
1453 unsigned int fpu_insn;
1454
1455 if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1456 return;
1457 }
1458
1459 fpu_insn = (dc->ir >> 7) & 7;
1460
1461 switch (fpu_insn) {
1462 case 0:
1463 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1464 cpu_R[dc->rb]);
1465 break;
1466
1467 case 1:
1468 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1469 cpu_R[dc->rb]);
1470 break;
1471
1472 case 2:
1473 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1474 cpu_R[dc->rb]);
1475 break;
1476
1477 case 3:
1478 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1479 cpu_R[dc->rb]);
1480 break;
1481
1482 case 4:
1483 switch ((dc->ir >> 4) & 7) {
1484 case 0:
1485 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1486 cpu_R[dc->ra], cpu_R[dc->rb]);
1487 break;
1488 case 1:
1489 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1490 cpu_R[dc->ra], cpu_R[dc->rb]);
1491 break;
1492 case 2:
1493 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1494 cpu_R[dc->ra], cpu_R[dc->rb]);
1495 break;
1496 case 3:
1497 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1498 cpu_R[dc->ra], cpu_R[dc->rb]);
1499 break;
1500 case 4:
1501 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1502 cpu_R[dc->ra], cpu_R[dc->rb]);
1503 break;
1504 case 5:
1505 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1506 cpu_R[dc->ra], cpu_R[dc->rb]);
1507 break;
1508 case 6:
1509 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1510 cpu_R[dc->ra], cpu_R[dc->rb]);
1511 break;
1512 default:
1513 qemu_log_mask(LOG_UNIMP,
1514 "unimplemented fcmp fpu_insn=%x pc=%x"
1515 " opc=%x\n",
1516 fpu_insn, (uint32_t)dc->base.pc_next,
1517 dc->opcode);
1518 dc->abort_at_next_insn = 1;
1519 break;
1520 }
1521 break;
1522
1523 case 5:
1524 if (!dec_check_fpuv2(dc)) {
1525 return;
1526 }
1527 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1528 break;
1529
1530 case 6:
1531 if (!dec_check_fpuv2(dc)) {
1532 return;
1533 }
1534 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1535 break;
1536
1537 case 7:
1538 if (!dec_check_fpuv2(dc)) {
1539 return;
1540 }
1541 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1542 break;
1543
1544 default:
1545 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1546 " opc=%x\n",
1547 fpu_insn, (uint32_t)dc->base.pc_next, dc->opcode);
1548 dc->abort_at_next_insn = 1;
1549 break;
1550 }
1551 }
1552
1553 static void dec_null(DisasContext *dc)
1554 {
1555 if (trap_illegal(dc, true)) {
1556 return;
1557 }
1558 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n",
1559 (uint32_t)dc->base.pc_next, dc->opcode);
1560 dc->abort_at_next_insn = 1;
1561 }
1562
1563 /* Insns connected to FSL or AXI stream attached devices. */
1564 static void dec_stream(DisasContext *dc)
1565 {
1566 TCGv_i32 t_id, t_ctrl;
1567 int ctrl;
1568
1569 if (trap_userspace(dc, true)) {
1570 return;
1571 }
1572
1573 t_id = tcg_temp_new_i32();
1574 if (dc->type_b) {
1575 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1576 ctrl = dc->imm >> 10;
1577 } else {
1578 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1579 ctrl = dc->imm >> 5;
1580 }
1581
1582 t_ctrl = tcg_const_i32(ctrl);
1583
1584 if (dc->rd == 0) {
1585 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1586 } else {
1587 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1588 }
1589 tcg_temp_free_i32(t_id);
1590 tcg_temp_free_i32(t_ctrl);
1591 }
1592
1593 static struct decoder_info {
1594 struct {
1595 uint32_t bits;
1596 uint32_t mask;
1597 };
1598 void (*dec)(DisasContext *dc);
1599 } decinfo[] = {
1600 {DEC_SUB, dec_sub},
1601 {DEC_AND, dec_and},
1602 {DEC_XOR, dec_xor},
1603 {DEC_OR, dec_or},
1604 {DEC_BIT, dec_bit},
1605 {DEC_BARREL, dec_barrel},
1606 {DEC_LD, dec_load},
1607 {DEC_ST, dec_store},
1608 {DEC_IMM, dec_imm},
1609 {DEC_BR, dec_br},
1610 {DEC_BCC, dec_bcc},
1611 {DEC_RTS, dec_rts},
1612 {DEC_FPU, dec_fpu},
1613 {DEC_MUL, dec_mul},
1614 {DEC_DIV, dec_div},
1615 {DEC_MSR, dec_msr},
1616 {DEC_STREAM, dec_stream},
1617 {{0, 0}, dec_null}
1618 };
1619
1620 static void old_decode(DisasContext *dc, uint32_t ir)
1621 {
1622 int i;
1623
1624 dc->ir = ir;
1625
1626 /* bit 2 seems to indicate insn type. */
1627 dc->type_b = ir & (1 << 29);
1628
1629 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1630 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1631 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1632 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1633 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1634
1635 /* Large switch for all insns. */
1636 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1637 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1638 decinfo[i].dec(dc);
1639 break;
1640 }
1641 }
1642 }
1643
1644 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1645 {
1646 DisasContext *dc = container_of(dcb, DisasContext, base);
1647 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1648 int bound;
1649
1650 dc->cpu = cpu;
1651 dc->synced_flags = dc->tb_flags = dc->base.tb->flags;
1652 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1653 dc->jmp = dc->delayed_branch ? JMP_INDIRECT : JMP_NOJMP;
1654 dc->cpustate_changed = 0;
1655 dc->abort_at_next_insn = 0;
1656 dc->ext_imm = dc->base.tb->cs_base;
1657 dc->r0 = NULL;
1658 dc->r0_set = false;
1659
1660 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1661 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1662 }
1663
1664 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1665 {
1666 }
1667
1668 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1669 {
1670 tcg_gen_insn_start(dcb->pc_next);
1671 }
1672
1673 static bool mb_tr_breakpoint_check(DisasContextBase *dcb, CPUState *cs,
1674 const CPUBreakpoint *bp)
1675 {
1676 DisasContext *dc = container_of(dcb, DisasContext, base);
1677
1678 gen_raise_exception_sync(dc, EXCP_DEBUG);
1679
1680 /*
1681 * The address covered by the breakpoint must be included in
1682 * [tb->pc, tb->pc + tb->size) in order to for it to be
1683 * properly cleared -- thus we increment the PC here so that
1684 * the logic setting tb->size below does the right thing.
1685 */
1686 dc->base.pc_next += 4;
1687 return true;
1688 }
1689
1690 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1691 {
1692 DisasContext *dc = container_of(dcb, DisasContext, base);
1693 CPUMBState *env = cs->env_ptr;
1694 uint32_t ir;
1695
1696 /* TODO: This should raise an exception, not terminate qemu. */
1697 if (dc->base.pc_next & 3) {
1698 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1699 (uint32_t)dc->base.pc_next);
1700 }
1701
1702 dc->clear_imm = 1;
1703 ir = cpu_ldl_code(env, dc->base.pc_next);
1704 if (!decode(dc, ir)) {
1705 old_decode(dc, ir);
1706 }
1707
1708 if (dc->r0) {
1709 tcg_temp_free_i32(dc->r0);
1710 dc->r0 = NULL;
1711 dc->r0_set = false;
1712 }
1713
1714 if (dc->clear_imm && (dc->tb_flags & IMM_FLAG)) {
1715 dc->tb_flags &= ~IMM_FLAG;
1716 tcg_gen_discard_i32(cpu_imm);
1717 }
1718 dc->base.pc_next += 4;
1719
1720 if (dc->delayed_branch && --dc->delayed_branch == 0) {
1721 if (dc->tb_flags & DRTI_FLAG) {
1722 do_rti(dc);
1723 }
1724 if (dc->tb_flags & DRTB_FLAG) {
1725 do_rtb(dc);
1726 }
1727 if (dc->tb_flags & DRTE_FLAG) {
1728 do_rte(dc);
1729 }
1730 /* Clear the delay slot flag. */
1731 dc->tb_flags &= ~D_FLAG;
1732 dc->base.is_jmp = DISAS_JUMP;
1733 }
1734
1735 /* Force an exit if the per-tb cpu state has changed. */
1736 if (dc->base.is_jmp == DISAS_NEXT && dc->cpustate_changed) {
1737 dc->base.is_jmp = DISAS_UPDATE;
1738 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1739 }
1740 }
1741
1742 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1743 {
1744 DisasContext *dc = container_of(dcb, DisasContext, base);
1745
1746 assert(!dc->abort_at_next_insn);
1747
1748 if (dc->base.is_jmp == DISAS_NORETURN) {
1749 /* We have already exited the TB. */
1750 return;
1751 }
1752
1753 t_sync_flags(dc);
1754 if (dc->tb_flags & D_FLAG) {
1755 sync_jmpstate(dc);
1756 dc->jmp = JMP_NOJMP;
1757 }
1758
1759 switch (dc->base.is_jmp) {
1760 case DISAS_TOO_MANY:
1761 assert(dc->jmp == JMP_NOJMP);
1762 gen_goto_tb(dc, 0, dc->base.pc_next);
1763 return;
1764
1765 case DISAS_UPDATE:
1766 assert(dc->jmp == JMP_NOJMP);
1767 if (unlikely(cs->singlestep_enabled)) {
1768 gen_raise_exception(dc, EXCP_DEBUG);
1769 } else {
1770 tcg_gen_exit_tb(NULL, 0);
1771 }
1772 return;
1773
1774 case DISAS_JUMP:
1775 switch (dc->jmp) {
1776 case JMP_INDIRECT:
1777 {
1778 TCGv_i32 tmp_pc = tcg_const_i32(dc->base.pc_next);
1779 eval_cond_jmp(dc, cpu_btarget, tmp_pc);
1780 tcg_temp_free_i32(tmp_pc);
1781
1782 if (unlikely(cs->singlestep_enabled)) {
1783 gen_raise_exception(dc, EXCP_DEBUG);
1784 } else {
1785 tcg_gen_exit_tb(NULL, 0);
1786 }
1787 }
1788 return;
1789
1790 case JMP_DIRECT_CC:
1791 {
1792 TCGLabel *l1 = gen_new_label();
1793 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_btaken, 0, l1);
1794 gen_goto_tb(dc, 1, dc->base.pc_next);
1795 gen_set_label(l1);
1796 }
1797 /* fall through */
1798
1799 case JMP_DIRECT:
1800 gen_goto_tb(dc, 0, dc->jmp_pc);
1801 return;
1802 }
1803 /* fall through */
1804
1805 default:
1806 g_assert_not_reached();
1807 }
1808 }
1809
1810 static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs)
1811 {
1812 qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first));
1813 log_target_disas(cs, dcb->pc_first, dcb->tb->size);
1814 }
1815
1816 static const TranslatorOps mb_tr_ops = {
1817 .init_disas_context = mb_tr_init_disas_context,
1818 .tb_start = mb_tr_tb_start,
1819 .insn_start = mb_tr_insn_start,
1820 .breakpoint_check = mb_tr_breakpoint_check,
1821 .translate_insn = mb_tr_translate_insn,
1822 .tb_stop = mb_tr_tb_stop,
1823 .disas_log = mb_tr_disas_log,
1824 };
1825
1826 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1827 {
1828 DisasContext dc;
1829 translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
1830 }
1831
1832 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1833 {
1834 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1835 CPUMBState *env = &cpu->env;
1836 int i;
1837
1838 if (!env) {
1839 return;
1840 }
1841
1842 qemu_fprintf(f, "IN: PC=%x %s\n",
1843 env->pc, lookup_symbol(env->pc));
1844 qemu_fprintf(f, "rmsr=%x resr=%x rear=%" PRIx64 " "
1845 "imm=%x iflags=%x fsr=%x rbtr=%x\n",
1846 env->msr, env->esr, env->ear,
1847 env->imm, env->iflags, env->fsr, env->btr);
1848 qemu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1849 env->btaken, env->btarget,
1850 (env->msr & MSR_UM) ? "user" : "kernel",
1851 (env->msr & MSR_UMS) ? "user" : "kernel",
1852 (bool)(env->msr & MSR_EIP),
1853 (bool)(env->msr & MSR_IE));
1854 for (i = 0; i < 12; i++) {
1855 qemu_fprintf(f, "rpvr%2.2d=%8.8x ", i, env->pvr.regs[i]);
1856 if ((i + 1) % 4 == 0) {
1857 qemu_fprintf(f, "\n");
1858 }
1859 }
1860
1861 /* Registers that aren't modeled are reported as 0 */
1862 qemu_fprintf(f, "redr=%x rpid=0 rzpr=0 rtlbx=0 rtlbsx=0 "
1863 "rtlblo=0 rtlbhi=0\n", env->edr);
1864 qemu_fprintf(f, "slr=%x shr=%x\n", env->slr, env->shr);
1865 for (i = 0; i < 32; i++) {
1866 qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1867 if ((i + 1) % 4 == 0)
1868 qemu_fprintf(f, "\n");
1869 }
1870 qemu_fprintf(f, "\n\n");
1871 }
1872
1873 void mb_tcg_init(void)
1874 {
1875 #define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1876 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1877
1878 static const struct {
1879 TCGv_i32 *var; int ofs; char name[8];
1880 } i32s[] = {
1881 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1882 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1883 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1884 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1885
1886 SP(pc),
1887 SP(msr),
1888 SP(msr_c),
1889 SP(imm),
1890 SP(iflags),
1891 SP(btaken),
1892 SP(btarget),
1893 SP(res_val),
1894 };
1895
1896 #undef R
1897 #undef SP
1898
1899 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1900 *i32s[i].var =
1901 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1902 }
1903
1904 cpu_res_addr =
1905 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
1906 }
1907
1908 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1909 target_ulong *data)
1910 {
1911 env->pc = data[0];
1912 }