target/microblaze: Fix width of EDR
[qemu.git] / target / microblaze / translate.c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "qemu/qemu-print.h"
32
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35
36
37 #define SIM_COMPAT 0
38 #define DISAS_GNU 1
39 #define DISAS_MB 1
40 #if DISAS_MB && !SIM_COMPAT
41 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 #else
43 # define LOG_DIS(...) do { } while (0)
44 #endif
45
46 #define D(x)
47
48 #define EXTRACT_FIELD(src, start, end) \
49 (((src) >> start) & ((1 << (end - start + 1)) - 1))
50
51 /* is_jmp field values */
52 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
53 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
54 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55
56 static TCGv_i32 env_debug;
57 static TCGv_i32 cpu_R[32];
58 static TCGv_i32 cpu_pc;
59 static TCGv_i32 cpu_msr;
60 static TCGv_i64 cpu_ear;
61 static TCGv_i32 cpu_esr;
62 static TCGv_i32 env_imm;
63 static TCGv_i32 env_btaken;
64 static TCGv_i32 cpu_btarget;
65 static TCGv_i32 env_iflags;
66 static TCGv env_res_addr;
67 static TCGv_i32 env_res_val;
68
69 #include "exec/gen-icount.h"
70
71 /* This is the state at translation time. */
72 typedef struct DisasContext {
73 MicroBlazeCPU *cpu;
74 uint32_t pc;
75
76 /* Decoder. */
77 int type_b;
78 uint32_t ir;
79 uint8_t opcode;
80 uint8_t rd, ra, rb;
81 uint16_t imm;
82
83 unsigned int cpustate_changed;
84 unsigned int delayed_branch;
85 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
86 unsigned int clear_imm;
87 int is_jmp;
88
89 #define JMP_NOJMP 0
90 #define JMP_DIRECT 1
91 #define JMP_DIRECT_CC 2
92 #define JMP_INDIRECT 3
93 unsigned int jmp;
94 uint32_t jmp_pc;
95
96 int abort_at_next_insn;
97 struct TranslationBlock *tb;
98 int singlestep_enabled;
99 } DisasContext;
100
101 static const char *regnames[] =
102 {
103 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
105 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
106 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
107 };
108
109 static inline void t_sync_flags(DisasContext *dc)
110 {
111 /* Synch the tb dependent flags between translator and runtime. */
112 if (dc->tb_flags != dc->synced_flags) {
113 tcg_gen_movi_i32(env_iflags, dc->tb_flags);
114 dc->synced_flags = dc->tb_flags;
115 }
116 }
117
118 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
119 {
120 TCGv_i32 tmp = tcg_const_i32(index);
121
122 t_sync_flags(dc);
123 tcg_gen_movi_i32(cpu_pc, dc->pc);
124 gen_helper_raise_exception(cpu_env, tmp);
125 tcg_temp_free_i32(tmp);
126 dc->is_jmp = DISAS_UPDATE;
127 }
128
129 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
130 {
131 #ifndef CONFIG_USER_ONLY
132 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
133 #else
134 return true;
135 #endif
136 }
137
138 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
139 {
140 if (use_goto_tb(dc, dest)) {
141 tcg_gen_goto_tb(n);
142 tcg_gen_movi_i32(cpu_pc, dest);
143 tcg_gen_exit_tb(dc->tb, n);
144 } else {
145 tcg_gen_movi_i32(cpu_pc, dest);
146 tcg_gen_exit_tb(NULL, 0);
147 }
148 }
149
150 static void read_carry(DisasContext *dc, TCGv_i32 d)
151 {
152 tcg_gen_shri_i32(d, cpu_msr, 31);
153 }
154
155 /*
156 * write_carry sets the carry bits in MSR based on bit 0 of v.
157 * v[31:1] are ignored.
158 */
159 static void write_carry(DisasContext *dc, TCGv_i32 v)
160 {
161 /* Deposit bit 0 into MSR_C and the alias MSR_CC. */
162 tcg_gen_deposit_i32(cpu_msr, cpu_msr, v, 2, 1);
163 tcg_gen_deposit_i32(cpu_msr, cpu_msr, v, 31, 1);
164 }
165
166 static void write_carryi(DisasContext *dc, bool carry)
167 {
168 TCGv_i32 t0 = tcg_temp_new_i32();
169 tcg_gen_movi_i32(t0, carry);
170 write_carry(dc, t0);
171 tcg_temp_free_i32(t0);
172 }
173
174 /*
175 * Returns true if the insn an illegal operation.
176 * If exceptions are enabled, an exception is raised.
177 */
178 static bool trap_illegal(DisasContext *dc, bool cond)
179 {
180 if (cond && (dc->tb_flags & MSR_EE_FLAG)
181 && dc->cpu->cfg.illegal_opcode_exception) {
182 tcg_gen_movi_i32(cpu_esr, ESR_EC_ILLEGAL_OP);
183 t_gen_raise_exception(dc, EXCP_HW_EXCP);
184 }
185 return cond;
186 }
187
188 /*
189 * Returns true if the insn is illegal in userspace.
190 * If exceptions are enabled, an exception is raised.
191 */
192 static bool trap_userspace(DisasContext *dc, bool cond)
193 {
194 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
195 bool cond_user = cond && mem_index == MMU_USER_IDX;
196
197 if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
198 tcg_gen_movi_i32(cpu_esr, ESR_EC_PRIVINSN);
199 t_gen_raise_exception(dc, EXCP_HW_EXCP);
200 }
201 return cond_user;
202 }
203
204 /* True if ALU operand b is a small immediate that may deserve
205 faster treatment. */
206 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
207 {
208 /* Immediate insn without the imm prefix ? */
209 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
210 }
211
212 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
213 {
214 if (dc->type_b) {
215 if (dc->tb_flags & IMM_FLAG)
216 tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
217 else
218 tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
219 return &env_imm;
220 } else
221 return &cpu_R[dc->rb];
222 }
223
224 static void dec_add(DisasContext *dc)
225 {
226 unsigned int k, c;
227 TCGv_i32 cf;
228
229 k = dc->opcode & 4;
230 c = dc->opcode & 2;
231
232 LOG_DIS("add%s%s%s r%d r%d r%d\n",
233 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
234 dc->rd, dc->ra, dc->rb);
235
236 /* Take care of the easy cases first. */
237 if (k) {
238 /* k - keep carry, no need to update MSR. */
239 /* If rd == r0, it's a nop. */
240 if (dc->rd) {
241 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
242
243 if (c) {
244 /* c - Add carry into the result. */
245 cf = tcg_temp_new_i32();
246
247 read_carry(dc, cf);
248 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
249 tcg_temp_free_i32(cf);
250 }
251 }
252 return;
253 }
254
255 /* From now on, we can assume k is zero. So we need to update MSR. */
256 /* Extract carry. */
257 cf = tcg_temp_new_i32();
258 if (c) {
259 read_carry(dc, cf);
260 } else {
261 tcg_gen_movi_i32(cf, 0);
262 }
263
264 if (dc->rd) {
265 TCGv_i32 ncf = tcg_temp_new_i32();
266 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
267 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
268 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
269 write_carry(dc, ncf);
270 tcg_temp_free_i32(ncf);
271 } else {
272 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
273 write_carry(dc, cf);
274 }
275 tcg_temp_free_i32(cf);
276 }
277
278 static void dec_sub(DisasContext *dc)
279 {
280 unsigned int u, cmp, k, c;
281 TCGv_i32 cf, na;
282
283 u = dc->imm & 2;
284 k = dc->opcode & 4;
285 c = dc->opcode & 2;
286 cmp = (dc->imm & 1) && (!dc->type_b) && k;
287
288 if (cmp) {
289 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
290 if (dc->rd) {
291 if (u)
292 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
293 else
294 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
295 }
296 return;
297 }
298
299 LOG_DIS("sub%s%s r%d, r%d r%d\n",
300 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
301
302 /* Take care of the easy cases first. */
303 if (k) {
304 /* k - keep carry, no need to update MSR. */
305 /* If rd == r0, it's a nop. */
306 if (dc->rd) {
307 tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
308
309 if (c) {
310 /* c - Add carry into the result. */
311 cf = tcg_temp_new_i32();
312
313 read_carry(dc, cf);
314 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
315 tcg_temp_free_i32(cf);
316 }
317 }
318 return;
319 }
320
321 /* From now on, we can assume k is zero. So we need to update MSR. */
322 /* Extract carry. And complement a into na. */
323 cf = tcg_temp_new_i32();
324 na = tcg_temp_new_i32();
325 if (c) {
326 read_carry(dc, cf);
327 } else {
328 tcg_gen_movi_i32(cf, 1);
329 }
330
331 /* d = b + ~a + c. carry defaults to 1. */
332 tcg_gen_not_i32(na, cpu_R[dc->ra]);
333
334 if (dc->rd) {
335 TCGv_i32 ncf = tcg_temp_new_i32();
336 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
337 tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
338 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
339 write_carry(dc, ncf);
340 tcg_temp_free_i32(ncf);
341 } else {
342 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
343 write_carry(dc, cf);
344 }
345 tcg_temp_free_i32(cf);
346 tcg_temp_free_i32(na);
347 }
348
349 static void dec_pattern(DisasContext *dc)
350 {
351 unsigned int mode;
352
353 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
354 return;
355 }
356
357 mode = dc->opcode & 3;
358 switch (mode) {
359 case 0:
360 /* pcmpbf. */
361 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
362 if (dc->rd)
363 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
364 break;
365 case 2:
366 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
367 if (dc->rd) {
368 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
369 cpu_R[dc->ra], cpu_R[dc->rb]);
370 }
371 break;
372 case 3:
373 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
374 if (dc->rd) {
375 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
376 cpu_R[dc->ra], cpu_R[dc->rb]);
377 }
378 break;
379 default:
380 cpu_abort(CPU(dc->cpu),
381 "unsupported pattern insn opcode=%x\n", dc->opcode);
382 break;
383 }
384 }
385
386 static void dec_and(DisasContext *dc)
387 {
388 unsigned int not;
389
390 if (!dc->type_b && (dc->imm & (1 << 10))) {
391 dec_pattern(dc);
392 return;
393 }
394
395 not = dc->opcode & (1 << 1);
396 LOG_DIS("and%s\n", not ? "n" : "");
397
398 if (!dc->rd)
399 return;
400
401 if (not) {
402 tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
403 } else
404 tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
405 }
406
407 static void dec_or(DisasContext *dc)
408 {
409 if (!dc->type_b && (dc->imm & (1 << 10))) {
410 dec_pattern(dc);
411 return;
412 }
413
414 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
415 if (dc->rd)
416 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
417 }
418
419 static void dec_xor(DisasContext *dc)
420 {
421 if (!dc->type_b && (dc->imm & (1 << 10))) {
422 dec_pattern(dc);
423 return;
424 }
425
426 LOG_DIS("xor r%d\n", dc->rd);
427 if (dc->rd)
428 tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
429 }
430
431 static inline void msr_read(DisasContext *dc, TCGv_i32 d)
432 {
433 tcg_gen_mov_i32(d, cpu_msr);
434 }
435
436 static inline void msr_write(DisasContext *dc, TCGv_i32 v)
437 {
438 dc->cpustate_changed = 1;
439 /* PVR bit is not writable, and is never set. */
440 tcg_gen_andi_i32(cpu_msr, v, ~MSR_PVR);
441 }
442
443 static void dec_msr(DisasContext *dc)
444 {
445 CPUState *cs = CPU(dc->cpu);
446 TCGv_i32 t0, t1;
447 unsigned int sr, rn;
448 bool to, clrset, extended = false;
449
450 sr = extract32(dc->imm, 0, 14);
451 to = extract32(dc->imm, 14, 1);
452 clrset = extract32(dc->imm, 15, 1) == 0;
453 dc->type_b = 1;
454 if (to) {
455 dc->cpustate_changed = 1;
456 }
457
458 /* Extended MSRs are only available if addr_size > 32. */
459 if (dc->cpu->cfg.addr_size > 32) {
460 /* The E-bit is encoded differently for To/From MSR. */
461 static const unsigned int e_bit[] = { 19, 24 };
462
463 extended = extract32(dc->imm, e_bit[to], 1);
464 }
465
466 /* msrclr and msrset. */
467 if (clrset) {
468 bool clr = extract32(dc->ir, 16, 1);
469
470 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
471 dc->rd, dc->imm);
472
473 if (!dc->cpu->cfg.use_msr_instr) {
474 /* nop??? */
475 return;
476 }
477
478 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
479 return;
480 }
481
482 if (dc->rd)
483 msr_read(dc, cpu_R[dc->rd]);
484
485 t0 = tcg_temp_new_i32();
486 t1 = tcg_temp_new_i32();
487 msr_read(dc, t0);
488 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
489
490 if (clr) {
491 tcg_gen_not_i32(t1, t1);
492 tcg_gen_and_i32(t0, t0, t1);
493 } else
494 tcg_gen_or_i32(t0, t0, t1);
495 msr_write(dc, t0);
496 tcg_temp_free_i32(t0);
497 tcg_temp_free_i32(t1);
498 tcg_gen_movi_i32(cpu_pc, dc->pc + 4);
499 dc->is_jmp = DISAS_UPDATE;
500 return;
501 }
502
503 if (trap_userspace(dc, to)) {
504 return;
505 }
506
507 #if !defined(CONFIG_USER_ONLY)
508 /* Catch read/writes to the mmu block. */
509 if ((sr & ~0xff) == 0x1000) {
510 TCGv_i32 tmp_ext = tcg_const_i32(extended);
511 TCGv_i32 tmp_sr;
512
513 sr &= 7;
514 tmp_sr = tcg_const_i32(sr);
515 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
516 if (to) {
517 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
518 } else {
519 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
520 }
521 tcg_temp_free_i32(tmp_sr);
522 tcg_temp_free_i32(tmp_ext);
523 return;
524 }
525 #endif
526
527 if (to) {
528 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
529 switch (sr) {
530 case SR_PC:
531 break;
532 case SR_MSR:
533 msr_write(dc, cpu_R[dc->ra]);
534 break;
535 case SR_EAR:
536 tcg_gen_extu_i32_i64(cpu_ear, cpu_R[dc->ra]);
537 break;
538 case SR_ESR:
539 tcg_gen_mov_i32(cpu_esr, cpu_R[dc->ra]);
540 break;
541 case SR_FSR:
542 tcg_gen_st_i32(cpu_R[dc->ra],
543 cpu_env, offsetof(CPUMBState, fsr));
544 break;
545 case SR_BTR:
546 tcg_gen_st_i32(cpu_R[dc->ra],
547 cpu_env, offsetof(CPUMBState, btr));
548 break;
549 case SR_EDR:
550 tcg_gen_st_i32(cpu_R[dc->ra],
551 cpu_env, offsetof(CPUMBState, edr));
552 break;
553 case 0x800:
554 tcg_gen_st_i32(cpu_R[dc->ra],
555 cpu_env, offsetof(CPUMBState, slr));
556 break;
557 case 0x802:
558 tcg_gen_st_i32(cpu_R[dc->ra],
559 cpu_env, offsetof(CPUMBState, shr));
560 break;
561 default:
562 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
563 break;
564 }
565 } else {
566 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
567
568 switch (sr) {
569 case SR_PC:
570 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
571 break;
572 case SR_MSR:
573 msr_read(dc, cpu_R[dc->rd]);
574 break;
575 case SR_EAR:
576 if (extended) {
577 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], cpu_ear);
578 } else {
579 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_ear);
580 }
581 break;
582 case SR_ESR:
583 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_esr);
584 break;
585 case SR_FSR:
586 tcg_gen_ld_i32(cpu_R[dc->rd],
587 cpu_env, offsetof(CPUMBState, fsr));
588 break;
589 case SR_BTR:
590 tcg_gen_ld_i32(cpu_R[dc->rd],
591 cpu_env, offsetof(CPUMBState, btr));
592 break;
593 case SR_EDR:
594 tcg_gen_ld_i32(cpu_R[dc->rd],
595 cpu_env, offsetof(CPUMBState, edr));
596 break;
597 case 0x800:
598 tcg_gen_ld_i32(cpu_R[dc->rd],
599 cpu_env, offsetof(CPUMBState, slr));
600 break;
601 case 0x802:
602 tcg_gen_ld_i32(cpu_R[dc->rd],
603 cpu_env, offsetof(CPUMBState, shr));
604 break;
605 case 0x2000 ... 0x200c:
606 rn = sr & 0xf;
607 tcg_gen_ld_i32(cpu_R[dc->rd],
608 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
609 break;
610 default:
611 cpu_abort(cs, "unknown mfs reg %x\n", sr);
612 break;
613 }
614 }
615
616 if (dc->rd == 0) {
617 tcg_gen_movi_i32(cpu_R[0], 0);
618 }
619 }
620
621 /* Multiplier unit. */
622 static void dec_mul(DisasContext *dc)
623 {
624 TCGv_i32 tmp;
625 unsigned int subcode;
626
627 if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
628 return;
629 }
630
631 subcode = dc->imm & 3;
632
633 if (dc->type_b) {
634 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
635 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
636 return;
637 }
638
639 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
640 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
641 /* nop??? */
642 }
643
644 tmp = tcg_temp_new_i32();
645 switch (subcode) {
646 case 0:
647 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
648 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
649 break;
650 case 1:
651 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
652 tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
653 cpu_R[dc->ra], cpu_R[dc->rb]);
654 break;
655 case 2:
656 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
657 tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
658 cpu_R[dc->ra], cpu_R[dc->rb]);
659 break;
660 case 3:
661 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
662 tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
663 break;
664 default:
665 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
666 break;
667 }
668 tcg_temp_free_i32(tmp);
669 }
670
671 /* Div unit. */
672 static void dec_div(DisasContext *dc)
673 {
674 unsigned int u;
675
676 u = dc->imm & 2;
677 LOG_DIS("div\n");
678
679 if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
680 return;
681 }
682
683 if (u)
684 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
685 cpu_R[dc->ra]);
686 else
687 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
688 cpu_R[dc->ra]);
689 if (!dc->rd)
690 tcg_gen_movi_i32(cpu_R[dc->rd], 0);
691 }
692
693 static void dec_barrel(DisasContext *dc)
694 {
695 TCGv_i32 t0;
696 unsigned int imm_w, imm_s;
697 bool s, t, e = false, i = false;
698
699 if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
700 return;
701 }
702
703 if (dc->type_b) {
704 /* Insert and extract are only available in immediate mode. */
705 i = extract32(dc->imm, 15, 1);
706 e = extract32(dc->imm, 14, 1);
707 }
708 s = extract32(dc->imm, 10, 1);
709 t = extract32(dc->imm, 9, 1);
710 imm_w = extract32(dc->imm, 6, 5);
711 imm_s = extract32(dc->imm, 0, 5);
712
713 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
714 e ? "e" : "",
715 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
716
717 if (e) {
718 if (imm_w + imm_s > 32 || imm_w == 0) {
719 /* These inputs have an undefined behavior. */
720 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
721 imm_w, imm_s);
722 } else {
723 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
724 }
725 } else if (i) {
726 int width = imm_w - imm_s + 1;
727
728 if (imm_w < imm_s) {
729 /* These inputs have an undefined behavior. */
730 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
731 imm_w, imm_s);
732 } else {
733 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
734 imm_s, width);
735 }
736 } else {
737 t0 = tcg_temp_new_i32();
738
739 tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
740 tcg_gen_andi_i32(t0, t0, 31);
741
742 if (s) {
743 tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
744 } else {
745 if (t) {
746 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
747 } else {
748 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
749 }
750 }
751 tcg_temp_free_i32(t0);
752 }
753 }
754
755 static void dec_bit(DisasContext *dc)
756 {
757 CPUState *cs = CPU(dc->cpu);
758 TCGv_i32 t0;
759 unsigned int op;
760
761 op = dc->ir & ((1 << 9) - 1);
762 switch (op) {
763 case 0x21:
764 /* src. */
765 t0 = tcg_temp_new_i32();
766
767 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
768 tcg_gen_andi_i32(t0, cpu_msr, MSR_CC);
769 write_carry(dc, cpu_R[dc->ra]);
770 if (dc->rd) {
771 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
772 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
773 }
774 tcg_temp_free_i32(t0);
775 break;
776
777 case 0x1:
778 case 0x41:
779 /* srl. */
780 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
781
782 /* Update carry. Note that write carry only looks at the LSB. */
783 write_carry(dc, cpu_R[dc->ra]);
784 if (dc->rd) {
785 if (op == 0x41)
786 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
787 else
788 tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
789 }
790 break;
791 case 0x60:
792 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
793 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
794 break;
795 case 0x61:
796 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
797 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
798 break;
799 case 0x64:
800 case 0x66:
801 case 0x74:
802 case 0x76:
803 /* wdc. */
804 LOG_DIS("wdc r%d\n", dc->ra);
805 trap_userspace(dc, true);
806 break;
807 case 0x68:
808 /* wic. */
809 LOG_DIS("wic r%d\n", dc->ra);
810 trap_userspace(dc, true);
811 break;
812 case 0xe0:
813 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
814 return;
815 }
816 if (dc->cpu->cfg.use_pcmp_instr) {
817 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
818 }
819 break;
820 case 0x1e0:
821 /* swapb */
822 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
823 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
824 break;
825 case 0x1e2:
826 /*swaph */
827 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
828 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
829 break;
830 default:
831 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
832 dc->pc, op, dc->rd, dc->ra, dc->rb);
833 break;
834 }
835 }
836
837 static inline void sync_jmpstate(DisasContext *dc)
838 {
839 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
840 if (dc->jmp == JMP_DIRECT) {
841 tcg_gen_movi_i32(env_btaken, 1);
842 }
843 dc->jmp = JMP_INDIRECT;
844 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
845 }
846 }
847
848 static void dec_imm(DisasContext *dc)
849 {
850 LOG_DIS("imm %x\n", dc->imm << 16);
851 tcg_gen_movi_i32(env_imm, (dc->imm << 16));
852 dc->tb_flags |= IMM_FLAG;
853 dc->clear_imm = 0;
854 }
855
856 static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
857 {
858 bool extimm = dc->tb_flags & IMM_FLAG;
859 /* Should be set to true if r1 is used by loadstores. */
860 bool stackprot = false;
861 TCGv_i32 t32;
862
863 /* All load/stores use ra. */
864 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
865 stackprot = true;
866 }
867
868 /* Treat the common cases first. */
869 if (!dc->type_b) {
870 if (ea) {
871 int addr_size = dc->cpu->cfg.addr_size;
872
873 if (addr_size == 32) {
874 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
875 return;
876 }
877
878 tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
879 if (addr_size < 64) {
880 /* Mask off out of range bits. */
881 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
882 }
883 return;
884 }
885
886 /* If any of the regs is r0, set t to the value of the other reg. */
887 if (dc->ra == 0) {
888 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
889 return;
890 } else if (dc->rb == 0) {
891 tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
892 return;
893 }
894
895 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
896 stackprot = true;
897 }
898
899 t32 = tcg_temp_new_i32();
900 tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
901 tcg_gen_extu_i32_tl(t, t32);
902 tcg_temp_free_i32(t32);
903
904 if (stackprot) {
905 gen_helper_stackprot(cpu_env, t);
906 }
907 return;
908 }
909 /* Immediate. */
910 t32 = tcg_temp_new_i32();
911 if (!extimm) {
912 tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
913 } else {
914 tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
915 }
916 tcg_gen_extu_i32_tl(t, t32);
917 tcg_temp_free_i32(t32);
918
919 if (stackprot) {
920 gen_helper_stackprot(cpu_env, t);
921 }
922 return;
923 }
924
925 static void dec_load(DisasContext *dc)
926 {
927 TCGv_i32 v;
928 TCGv addr;
929 unsigned int size;
930 bool rev = false, ex = false, ea = false;
931 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
932 MemOp mop;
933
934 mop = dc->opcode & 3;
935 size = 1 << mop;
936 if (!dc->type_b) {
937 ea = extract32(dc->ir, 7, 1);
938 rev = extract32(dc->ir, 9, 1);
939 ex = extract32(dc->ir, 10, 1);
940 }
941 mop |= MO_TE;
942 if (rev) {
943 mop ^= MO_BSWAP;
944 }
945
946 if (trap_illegal(dc, size > 4)) {
947 return;
948 }
949
950 if (trap_userspace(dc, ea)) {
951 return;
952 }
953
954 LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
955 ex ? "x" : "",
956 ea ? "ea" : "");
957
958 t_sync_flags(dc);
959 addr = tcg_temp_new();
960 compute_ldst_addr(dc, ea, addr);
961 /* Extended addressing bypasses the MMU. */
962 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
963
964 /*
965 * When doing reverse accesses we need to do two things.
966 *
967 * 1. Reverse the address wrt endianness.
968 * 2. Byteswap the data lanes on the way back into the CPU core.
969 */
970 if (rev && size != 4) {
971 /* Endian reverse the address. t is addr. */
972 switch (size) {
973 case 1:
974 {
975 tcg_gen_xori_tl(addr, addr, 3);
976 break;
977 }
978
979 case 2:
980 /* 00 -> 10
981 10 -> 00. */
982 tcg_gen_xori_tl(addr, addr, 2);
983 break;
984 default:
985 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
986 break;
987 }
988 }
989
990 /* lwx does not throw unaligned access errors, so force alignment */
991 if (ex) {
992 tcg_gen_andi_tl(addr, addr, ~3);
993 }
994
995 /* If we get a fault on a dslot, the jmpstate better be in sync. */
996 sync_jmpstate(dc);
997
998 /* Verify alignment if needed. */
999 /*
1000 * Microblaze gives MMU faults priority over faults due to
1001 * unaligned addresses. That's why we speculatively do the load
1002 * into v. If the load succeeds, we verify alignment of the
1003 * address and if that succeeds we write into the destination reg.
1004 */
1005 v = tcg_temp_new_i32();
1006 tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
1007
1008 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1009 TCGv_i32 t0 = tcg_const_i32(0);
1010 TCGv_i32 treg = tcg_const_i32(dc->rd);
1011 TCGv_i32 tsize = tcg_const_i32(size - 1);
1012
1013 tcg_gen_movi_i32(cpu_pc, dc->pc);
1014 gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
1015
1016 tcg_temp_free_i32(t0);
1017 tcg_temp_free_i32(treg);
1018 tcg_temp_free_i32(tsize);
1019 }
1020
1021 if (ex) {
1022 tcg_gen_mov_tl(env_res_addr, addr);
1023 tcg_gen_mov_i32(env_res_val, v);
1024 }
1025 if (dc->rd) {
1026 tcg_gen_mov_i32(cpu_R[dc->rd], v);
1027 }
1028 tcg_temp_free_i32(v);
1029
1030 if (ex) { /* lwx */
1031 /* no support for AXI exclusive so always clear C */
1032 write_carryi(dc, 0);
1033 }
1034
1035 tcg_temp_free(addr);
1036 }
1037
1038 static void dec_store(DisasContext *dc)
1039 {
1040 TCGv addr;
1041 TCGLabel *swx_skip = NULL;
1042 unsigned int size;
1043 bool rev = false, ex = false, ea = false;
1044 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1045 MemOp mop;
1046
1047 mop = dc->opcode & 3;
1048 size = 1 << mop;
1049 if (!dc->type_b) {
1050 ea = extract32(dc->ir, 7, 1);
1051 rev = extract32(dc->ir, 9, 1);
1052 ex = extract32(dc->ir, 10, 1);
1053 }
1054 mop |= MO_TE;
1055 if (rev) {
1056 mop ^= MO_BSWAP;
1057 }
1058
1059 if (trap_illegal(dc, size > 4)) {
1060 return;
1061 }
1062
1063 trap_userspace(dc, ea);
1064
1065 LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1066 ex ? "x" : "",
1067 ea ? "ea" : "");
1068 t_sync_flags(dc);
1069 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1070 sync_jmpstate(dc);
1071 /* SWX needs a temp_local. */
1072 addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1073 compute_ldst_addr(dc, ea, addr);
1074 /* Extended addressing bypasses the MMU. */
1075 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
1076
1077 if (ex) { /* swx */
1078 TCGv_i32 tval;
1079
1080 /* swx does not throw unaligned access errors, so force alignment */
1081 tcg_gen_andi_tl(addr, addr, ~3);
1082
1083 write_carryi(dc, 1);
1084 swx_skip = gen_new_label();
1085 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
1086
1087 /*
1088 * Compare the value loaded at lwx with current contents of
1089 * the reserved location.
1090 */
1091 tval = tcg_temp_new_i32();
1092
1093 tcg_gen_atomic_cmpxchg_i32(tval, addr, env_res_val,
1094 cpu_R[dc->rd], mem_index,
1095 mop);
1096
1097 tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1098 write_carryi(dc, 0);
1099 tcg_temp_free_i32(tval);
1100 }
1101
1102 if (rev && size != 4) {
1103 /* Endian reverse the address. t is addr. */
1104 switch (size) {
1105 case 1:
1106 {
1107 tcg_gen_xori_tl(addr, addr, 3);
1108 break;
1109 }
1110
1111 case 2:
1112 /* 00 -> 10
1113 10 -> 00. */
1114 /* Force addr into the temp. */
1115 tcg_gen_xori_tl(addr, addr, 2);
1116 break;
1117 default:
1118 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1119 break;
1120 }
1121 }
1122
1123 if (!ex) {
1124 tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1125 }
1126
1127 /* Verify alignment if needed. */
1128 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1129 TCGv_i32 t1 = tcg_const_i32(1);
1130 TCGv_i32 treg = tcg_const_i32(dc->rd);
1131 TCGv_i32 tsize = tcg_const_i32(size - 1);
1132
1133 tcg_gen_movi_i32(cpu_pc, dc->pc);
1134 /* FIXME: if the alignment is wrong, we should restore the value
1135 * in memory. One possible way to achieve this is to probe
1136 * the MMU prior to the memaccess, thay way we could put
1137 * the alignment checks in between the probe and the mem
1138 * access.
1139 */
1140 gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
1141
1142 tcg_temp_free_i32(t1);
1143 tcg_temp_free_i32(treg);
1144 tcg_temp_free_i32(tsize);
1145 }
1146
1147 if (ex) {
1148 gen_set_label(swx_skip);
1149 }
1150
1151 tcg_temp_free(addr);
1152 }
1153
1154 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1155 TCGv_i32 d, TCGv_i32 a)
1156 {
1157 static const int mb_to_tcg_cc[] = {
1158 [CC_EQ] = TCG_COND_EQ,
1159 [CC_NE] = TCG_COND_NE,
1160 [CC_LT] = TCG_COND_LT,
1161 [CC_LE] = TCG_COND_LE,
1162 [CC_GE] = TCG_COND_GE,
1163 [CC_GT] = TCG_COND_GT,
1164 };
1165
1166 switch (cc) {
1167 case CC_EQ:
1168 case CC_NE:
1169 case CC_LT:
1170 case CC_LE:
1171 case CC_GE:
1172 case CC_GT:
1173 tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
1174 break;
1175 default:
1176 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1177 break;
1178 }
1179 }
1180
1181 static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
1182 {
1183 TCGv_i32 zero = tcg_const_i32(0);
1184
1185 tcg_gen_movcond_i32(TCG_COND_NE, cpu_pc,
1186 env_btaken, zero,
1187 pc_true, pc_false);
1188
1189 tcg_temp_free_i32(zero);
1190 }
1191
1192 static void dec_setup_dslot(DisasContext *dc)
1193 {
1194 TCGv_i32 tmp = tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG));
1195
1196 dc->delayed_branch = 2;
1197 dc->tb_flags |= D_FLAG;
1198
1199 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, bimm));
1200 tcg_temp_free_i32(tmp);
1201 }
1202
1203 static void dec_bcc(DisasContext *dc)
1204 {
1205 unsigned int cc;
1206 unsigned int dslot;
1207
1208 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1209 dslot = dc->ir & (1 << 25);
1210 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1211
1212 dc->delayed_branch = 1;
1213 if (dslot) {
1214 dec_setup_dslot(dc);
1215 }
1216
1217 if (dec_alu_op_b_is_small_imm(dc)) {
1218 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1219
1220 tcg_gen_movi_i32(cpu_btarget, dc->pc + offset);
1221 dc->jmp = JMP_DIRECT_CC;
1222 dc->jmp_pc = dc->pc + offset;
1223 } else {
1224 dc->jmp = JMP_INDIRECT;
1225 tcg_gen_addi_i32(cpu_btarget, *dec_alu_op_b(dc), dc->pc);
1226 }
1227 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra]);
1228 }
1229
1230 static void dec_br(DisasContext *dc)
1231 {
1232 unsigned int dslot, link, abs, mbar;
1233
1234 dslot = dc->ir & (1 << 20);
1235 abs = dc->ir & (1 << 19);
1236 link = dc->ir & (1 << 18);
1237
1238 /* Memory barrier. */
1239 mbar = (dc->ir >> 16) & 31;
1240 if (mbar == 2 && dc->imm == 4) {
1241 uint16_t mbar_imm = dc->rd;
1242
1243 LOG_DIS("mbar %d\n", mbar_imm);
1244
1245 /* Data access memory barrier. */
1246 if ((mbar_imm & 2) == 0) {
1247 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1248 }
1249
1250 /* mbar IMM & 16 decodes to sleep. */
1251 if (mbar_imm & 16) {
1252 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1253 TCGv_i32 tmp_1 = tcg_const_i32(1);
1254
1255 LOG_DIS("sleep\n");
1256
1257 if (trap_userspace(dc, true)) {
1258 /* Sleep is a privileged instruction. */
1259 return;
1260 }
1261
1262 t_sync_flags(dc);
1263 tcg_gen_st_i32(tmp_1, cpu_env,
1264 -offsetof(MicroBlazeCPU, env)
1265 +offsetof(CPUState, halted));
1266 tcg_gen_movi_i32(cpu_pc, dc->pc + 4);
1267 gen_helper_raise_exception(cpu_env, tmp_hlt);
1268 tcg_temp_free_i32(tmp_hlt);
1269 tcg_temp_free_i32(tmp_1);
1270 return;
1271 }
1272 /* Break the TB. */
1273 dc->cpustate_changed = 1;
1274 return;
1275 }
1276
1277 LOG_DIS("br%s%s%s%s imm=%x\n",
1278 abs ? "a" : "", link ? "l" : "",
1279 dc->type_b ? "i" : "", dslot ? "d" : "",
1280 dc->imm);
1281
1282 dc->delayed_branch = 1;
1283 if (dslot) {
1284 dec_setup_dslot(dc);
1285 }
1286 if (link && dc->rd)
1287 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1288
1289 dc->jmp = JMP_INDIRECT;
1290 if (abs) {
1291 tcg_gen_movi_i32(env_btaken, 1);
1292 tcg_gen_mov_i32(cpu_btarget, *(dec_alu_op_b(dc)));
1293 if (link && !dslot) {
1294 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1295 t_gen_raise_exception(dc, EXCP_BREAK);
1296 if (dc->imm == 0) {
1297 if (trap_userspace(dc, true)) {
1298 return;
1299 }
1300
1301 t_gen_raise_exception(dc, EXCP_DEBUG);
1302 }
1303 }
1304 } else {
1305 if (dec_alu_op_b_is_small_imm(dc)) {
1306 dc->jmp = JMP_DIRECT;
1307 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1308 } else {
1309 tcg_gen_movi_i32(env_btaken, 1);
1310 tcg_gen_addi_i32(cpu_btarget, *dec_alu_op_b(dc), dc->pc);
1311 }
1312 }
1313 }
1314
1315 static inline void do_rti(DisasContext *dc)
1316 {
1317 TCGv_i32 t0, t1;
1318 t0 = tcg_temp_new_i32();
1319 t1 = tcg_temp_new_i32();
1320 tcg_gen_mov_i32(t1, cpu_msr);
1321 tcg_gen_shri_i32(t0, t1, 1);
1322 tcg_gen_ori_i32(t1, t1, MSR_IE);
1323 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1324
1325 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1326 tcg_gen_or_i32(t1, t1, t0);
1327 msr_write(dc, t1);
1328 tcg_temp_free_i32(t1);
1329 tcg_temp_free_i32(t0);
1330 dc->tb_flags &= ~DRTI_FLAG;
1331 }
1332
1333 static inline void do_rtb(DisasContext *dc)
1334 {
1335 TCGv_i32 t0, t1;
1336 t0 = tcg_temp_new_i32();
1337 t1 = tcg_temp_new_i32();
1338 tcg_gen_mov_i32(t1, cpu_msr);
1339 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
1340 tcg_gen_shri_i32(t0, t1, 1);
1341 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1342
1343 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1344 tcg_gen_or_i32(t1, t1, t0);
1345 msr_write(dc, t1);
1346 tcg_temp_free_i32(t1);
1347 tcg_temp_free_i32(t0);
1348 dc->tb_flags &= ~DRTB_FLAG;
1349 }
1350
1351 static inline void do_rte(DisasContext *dc)
1352 {
1353 TCGv_i32 t0, t1;
1354 t0 = tcg_temp_new_i32();
1355 t1 = tcg_temp_new_i32();
1356
1357 tcg_gen_mov_i32(t1, cpu_msr);
1358 tcg_gen_ori_i32(t1, t1, MSR_EE);
1359 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1360 tcg_gen_shri_i32(t0, t1, 1);
1361 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1362
1363 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1364 tcg_gen_or_i32(t1, t1, t0);
1365 msr_write(dc, t1);
1366 tcg_temp_free_i32(t1);
1367 tcg_temp_free_i32(t0);
1368 dc->tb_flags &= ~DRTE_FLAG;
1369 }
1370
1371 static void dec_rts(DisasContext *dc)
1372 {
1373 unsigned int b_bit, i_bit, e_bit;
1374
1375 i_bit = dc->ir & (1 << 21);
1376 b_bit = dc->ir & (1 << 22);
1377 e_bit = dc->ir & (1 << 23);
1378
1379 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1380 return;
1381 }
1382
1383 dec_setup_dslot(dc);
1384
1385 if (i_bit) {
1386 LOG_DIS("rtid ir=%x\n", dc->ir);
1387 dc->tb_flags |= DRTI_FLAG;
1388 } else if (b_bit) {
1389 LOG_DIS("rtbd ir=%x\n", dc->ir);
1390 dc->tb_flags |= DRTB_FLAG;
1391 } else if (e_bit) {
1392 LOG_DIS("rted ir=%x\n", dc->ir);
1393 dc->tb_flags |= DRTE_FLAG;
1394 } else
1395 LOG_DIS("rts ir=%x\n", dc->ir);
1396
1397 dc->jmp = JMP_INDIRECT;
1398 tcg_gen_movi_i32(env_btaken, 1);
1399 tcg_gen_add_i32(cpu_btarget, cpu_R[dc->ra], *dec_alu_op_b(dc));
1400 }
1401
1402 static int dec_check_fpuv2(DisasContext *dc)
1403 {
1404 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1405 tcg_gen_movi_i32(cpu_esr, ESR_EC_FPU);
1406 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1407 }
1408 return (dc->cpu->cfg.use_fpu == 2) ? PVR2_USE_FPU2_MASK : 0;
1409 }
1410
1411 static void dec_fpu(DisasContext *dc)
1412 {
1413 unsigned int fpu_insn;
1414
1415 if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1416 return;
1417 }
1418
1419 fpu_insn = (dc->ir >> 7) & 7;
1420
1421 switch (fpu_insn) {
1422 case 0:
1423 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1424 cpu_R[dc->rb]);
1425 break;
1426
1427 case 1:
1428 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1429 cpu_R[dc->rb]);
1430 break;
1431
1432 case 2:
1433 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1434 cpu_R[dc->rb]);
1435 break;
1436
1437 case 3:
1438 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1439 cpu_R[dc->rb]);
1440 break;
1441
1442 case 4:
1443 switch ((dc->ir >> 4) & 7) {
1444 case 0:
1445 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1446 cpu_R[dc->ra], cpu_R[dc->rb]);
1447 break;
1448 case 1:
1449 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1450 cpu_R[dc->ra], cpu_R[dc->rb]);
1451 break;
1452 case 2:
1453 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1454 cpu_R[dc->ra], cpu_R[dc->rb]);
1455 break;
1456 case 3:
1457 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1458 cpu_R[dc->ra], cpu_R[dc->rb]);
1459 break;
1460 case 4:
1461 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1462 cpu_R[dc->ra], cpu_R[dc->rb]);
1463 break;
1464 case 5:
1465 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1466 cpu_R[dc->ra], cpu_R[dc->rb]);
1467 break;
1468 case 6:
1469 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1470 cpu_R[dc->ra], cpu_R[dc->rb]);
1471 break;
1472 default:
1473 qemu_log_mask(LOG_UNIMP,
1474 "unimplemented fcmp fpu_insn=%x pc=%x"
1475 " opc=%x\n",
1476 fpu_insn, dc->pc, dc->opcode);
1477 dc->abort_at_next_insn = 1;
1478 break;
1479 }
1480 break;
1481
1482 case 5:
1483 if (!dec_check_fpuv2(dc)) {
1484 return;
1485 }
1486 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1487 break;
1488
1489 case 6:
1490 if (!dec_check_fpuv2(dc)) {
1491 return;
1492 }
1493 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1494 break;
1495
1496 case 7:
1497 if (!dec_check_fpuv2(dc)) {
1498 return;
1499 }
1500 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1501 break;
1502
1503 default:
1504 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1505 " opc=%x\n",
1506 fpu_insn, dc->pc, dc->opcode);
1507 dc->abort_at_next_insn = 1;
1508 break;
1509 }
1510 }
1511
1512 static void dec_null(DisasContext *dc)
1513 {
1514 if (trap_illegal(dc, true)) {
1515 return;
1516 }
1517 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1518 dc->abort_at_next_insn = 1;
1519 }
1520
1521 /* Insns connected to FSL or AXI stream attached devices. */
1522 static void dec_stream(DisasContext *dc)
1523 {
1524 TCGv_i32 t_id, t_ctrl;
1525 int ctrl;
1526
1527 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1528 dc->type_b ? "" : "d", dc->imm);
1529
1530 if (trap_userspace(dc, true)) {
1531 return;
1532 }
1533
1534 t_id = tcg_temp_new_i32();
1535 if (dc->type_b) {
1536 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1537 ctrl = dc->imm >> 10;
1538 } else {
1539 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1540 ctrl = dc->imm >> 5;
1541 }
1542
1543 t_ctrl = tcg_const_i32(ctrl);
1544
1545 if (dc->rd == 0) {
1546 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1547 } else {
1548 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1549 }
1550 tcg_temp_free_i32(t_id);
1551 tcg_temp_free_i32(t_ctrl);
1552 }
1553
1554 static struct decoder_info {
1555 struct {
1556 uint32_t bits;
1557 uint32_t mask;
1558 };
1559 void (*dec)(DisasContext *dc);
1560 } decinfo[] = {
1561 {DEC_ADD, dec_add},
1562 {DEC_SUB, dec_sub},
1563 {DEC_AND, dec_and},
1564 {DEC_XOR, dec_xor},
1565 {DEC_OR, dec_or},
1566 {DEC_BIT, dec_bit},
1567 {DEC_BARREL, dec_barrel},
1568 {DEC_LD, dec_load},
1569 {DEC_ST, dec_store},
1570 {DEC_IMM, dec_imm},
1571 {DEC_BR, dec_br},
1572 {DEC_BCC, dec_bcc},
1573 {DEC_RTS, dec_rts},
1574 {DEC_FPU, dec_fpu},
1575 {DEC_MUL, dec_mul},
1576 {DEC_DIV, dec_div},
1577 {DEC_MSR, dec_msr},
1578 {DEC_STREAM, dec_stream},
1579 {{0, 0}, dec_null}
1580 };
1581
1582 static inline void decode(DisasContext *dc, uint32_t ir)
1583 {
1584 int i;
1585
1586 dc->ir = ir;
1587 LOG_DIS("%8.8x\t", dc->ir);
1588
1589 if (ir == 0) {
1590 trap_illegal(dc, dc->cpu->cfg.opcode_0_illegal);
1591 /* Don't decode nop/zero instructions any further. */
1592 return;
1593 }
1594
1595 /* bit 2 seems to indicate insn type. */
1596 dc->type_b = ir & (1 << 29);
1597
1598 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1599 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1600 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1601 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1602 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1603
1604 /* Large switch for all insns. */
1605 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1606 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1607 decinfo[i].dec(dc);
1608 break;
1609 }
1610 }
1611 }
1612
1613 /* generate intermediate code for basic block 'tb'. */
1614 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1615 {
1616 CPUMBState *env = cs->env_ptr;
1617 MicroBlazeCPU *cpu = env_archcpu(env);
1618 uint32_t pc_start;
1619 struct DisasContext ctx;
1620 struct DisasContext *dc = &ctx;
1621 uint32_t page_start, org_flags;
1622 uint32_t npc;
1623 int num_insns;
1624
1625 pc_start = tb->pc;
1626 dc->cpu = cpu;
1627 dc->tb = tb;
1628 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1629
1630 dc->is_jmp = DISAS_NEXT;
1631 dc->jmp = 0;
1632 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1633 if (dc->delayed_branch) {
1634 dc->jmp = JMP_INDIRECT;
1635 }
1636 dc->pc = pc_start;
1637 dc->singlestep_enabled = cs->singlestep_enabled;
1638 dc->cpustate_changed = 0;
1639 dc->abort_at_next_insn = 0;
1640
1641 if (pc_start & 3) {
1642 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1643 }
1644
1645 page_start = pc_start & TARGET_PAGE_MASK;
1646 num_insns = 0;
1647
1648 gen_tb_start(tb);
1649 do
1650 {
1651 tcg_gen_insn_start(dc->pc);
1652 num_insns++;
1653
1654 #if SIM_COMPAT
1655 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1656 tcg_gen_movi_i32(cpu_pc, dc->pc);
1657 gen_helper_debug();
1658 }
1659 #endif
1660
1661 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1662 t_gen_raise_exception(dc, EXCP_DEBUG);
1663 dc->is_jmp = DISAS_UPDATE;
1664 /* The address covered by the breakpoint must be included in
1665 [tb->pc, tb->pc + tb->size) in order to for it to be
1666 properly cleared -- thus we increment the PC here so that
1667 the logic setting tb->size below does the right thing. */
1668 dc->pc += 4;
1669 break;
1670 }
1671
1672 /* Pretty disas. */
1673 LOG_DIS("%8.8x:\t", dc->pc);
1674
1675 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1676 gen_io_start();
1677 }
1678
1679 dc->clear_imm = 1;
1680 decode(dc, cpu_ldl_code(env, dc->pc));
1681 if (dc->clear_imm)
1682 dc->tb_flags &= ~IMM_FLAG;
1683 dc->pc += 4;
1684
1685 if (dc->delayed_branch) {
1686 dc->delayed_branch--;
1687 if (!dc->delayed_branch) {
1688 if (dc->tb_flags & DRTI_FLAG)
1689 do_rti(dc);
1690 if (dc->tb_flags & DRTB_FLAG)
1691 do_rtb(dc);
1692 if (dc->tb_flags & DRTE_FLAG)
1693 do_rte(dc);
1694 /* Clear the delay slot flag. */
1695 dc->tb_flags &= ~D_FLAG;
1696 /* If it is a direct jump, try direct chaining. */
1697 if (dc->jmp == JMP_INDIRECT) {
1698 TCGv_i32 tmp_pc = tcg_const_i32(dc->pc);
1699 eval_cond_jmp(dc, cpu_btarget, tmp_pc);
1700 tcg_temp_free_i32(tmp_pc);
1701 dc->is_jmp = DISAS_JUMP;
1702 } else if (dc->jmp == JMP_DIRECT) {
1703 t_sync_flags(dc);
1704 gen_goto_tb(dc, 0, dc->jmp_pc);
1705 dc->is_jmp = DISAS_TB_JUMP;
1706 } else if (dc->jmp == JMP_DIRECT_CC) {
1707 TCGLabel *l1 = gen_new_label();
1708 t_sync_flags(dc);
1709 /* Conditional jmp. */
1710 tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1711 gen_goto_tb(dc, 1, dc->pc);
1712 gen_set_label(l1);
1713 gen_goto_tb(dc, 0, dc->jmp_pc);
1714
1715 dc->is_jmp = DISAS_TB_JUMP;
1716 }
1717 break;
1718 }
1719 }
1720 if (cs->singlestep_enabled) {
1721 break;
1722 }
1723 } while (!dc->is_jmp && !dc->cpustate_changed
1724 && !tcg_op_buf_full()
1725 && !singlestep
1726 && (dc->pc - page_start < TARGET_PAGE_SIZE)
1727 && num_insns < max_insns);
1728
1729 npc = dc->pc;
1730 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1731 if (dc->tb_flags & D_FLAG) {
1732 dc->is_jmp = DISAS_UPDATE;
1733 tcg_gen_movi_i32(cpu_pc, npc);
1734 sync_jmpstate(dc);
1735 } else
1736 npc = dc->jmp_pc;
1737 }
1738
1739 /* Force an update if the per-tb cpu state has changed. */
1740 if (dc->is_jmp == DISAS_NEXT
1741 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1742 dc->is_jmp = DISAS_UPDATE;
1743 tcg_gen_movi_i32(cpu_pc, npc);
1744 }
1745 t_sync_flags(dc);
1746
1747 if (unlikely(cs->singlestep_enabled)) {
1748 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1749
1750 if (dc->is_jmp != DISAS_JUMP) {
1751 tcg_gen_movi_i32(cpu_pc, npc);
1752 }
1753 gen_helper_raise_exception(cpu_env, tmp);
1754 tcg_temp_free_i32(tmp);
1755 } else {
1756 switch(dc->is_jmp) {
1757 case DISAS_NEXT:
1758 gen_goto_tb(dc, 1, npc);
1759 break;
1760 default:
1761 case DISAS_JUMP:
1762 case DISAS_UPDATE:
1763 /* indicate that the hash table must be used
1764 to find the next TB */
1765 tcg_gen_exit_tb(NULL, 0);
1766 break;
1767 case DISAS_TB_JUMP:
1768 /* nothing more to generate */
1769 break;
1770 }
1771 }
1772 gen_tb_end(tb, num_insns);
1773
1774 tb->size = dc->pc - pc_start;
1775 tb->icount = num_insns;
1776
1777 #ifdef DEBUG_DISAS
1778 #if !SIM_COMPAT
1779 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1780 && qemu_log_in_addr_range(pc_start)) {
1781 FILE *logfile = qemu_log_lock();
1782 qemu_log("--------------\n");
1783 log_target_disas(cs, pc_start, dc->pc - pc_start);
1784 qemu_log_unlock(logfile);
1785 }
1786 #endif
1787 #endif
1788 assert(!dc->abort_at_next_insn);
1789 }
1790
1791 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1792 {
1793 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1794 CPUMBState *env = &cpu->env;
1795 int i;
1796
1797 if (!env) {
1798 return;
1799 }
1800
1801 qemu_fprintf(f, "IN: PC=%x %s\n",
1802 env->pc, lookup_symbol(env->pc));
1803 qemu_fprintf(f, "rmsr=%x resr=%x rear=%" PRIx64 " "
1804 "debug=%x imm=%x iflags=%x fsr=%x rbtr=%x\n",
1805 env->msr, env->esr, env->ear,
1806 env->debug, env->imm, env->iflags, env->fsr,
1807 env->btr);
1808 qemu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1809 env->btaken, env->btarget,
1810 (env->msr & MSR_UM) ? "user" : "kernel",
1811 (env->msr & MSR_UMS) ? "user" : "kernel",
1812 (bool)(env->msr & MSR_EIP),
1813 (bool)(env->msr & MSR_IE));
1814 for (i = 0; i < 12; i++) {
1815 qemu_fprintf(f, "rpvr%2.2d=%8.8x ", i, env->pvr.regs[i]);
1816 if ((i + 1) % 4 == 0) {
1817 qemu_fprintf(f, "\n");
1818 }
1819 }
1820
1821 /* Registers that aren't modeled are reported as 0 */
1822 qemu_fprintf(f, "redr=%x rpid=0 rzpr=0 rtlbx=0 rtlbsx=0 "
1823 "rtlblo=0 rtlbhi=0\n", env->edr);
1824 qemu_fprintf(f, "slr=%x shr=%x\n", env->slr, env->shr);
1825 for (i = 0; i < 32; i++) {
1826 qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1827 if ((i + 1) % 4 == 0)
1828 qemu_fprintf(f, "\n");
1829 }
1830 qemu_fprintf(f, "\n\n");
1831 }
1832
1833 void mb_tcg_init(void)
1834 {
1835 int i;
1836
1837 env_debug = tcg_global_mem_new_i32(cpu_env,
1838 offsetof(CPUMBState, debug),
1839 "debug0");
1840 env_iflags = tcg_global_mem_new_i32(cpu_env,
1841 offsetof(CPUMBState, iflags),
1842 "iflags");
1843 env_imm = tcg_global_mem_new_i32(cpu_env,
1844 offsetof(CPUMBState, imm),
1845 "imm");
1846 cpu_btarget = tcg_global_mem_new_i32(cpu_env,
1847 offsetof(CPUMBState, btarget),
1848 "btarget");
1849 env_btaken = tcg_global_mem_new_i32(cpu_env,
1850 offsetof(CPUMBState, btaken),
1851 "btaken");
1852 env_res_addr = tcg_global_mem_new(cpu_env,
1853 offsetof(CPUMBState, res_addr),
1854 "res_addr");
1855 env_res_val = tcg_global_mem_new_i32(cpu_env,
1856 offsetof(CPUMBState, res_val),
1857 "res_val");
1858 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1859 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1860 offsetof(CPUMBState, regs[i]),
1861 regnames[i]);
1862 }
1863
1864 cpu_pc =
1865 tcg_global_mem_new_i32(cpu_env, offsetof(CPUMBState, pc), "rpc");
1866 cpu_msr =
1867 tcg_global_mem_new_i32(cpu_env, offsetof(CPUMBState, msr), "rmsr");
1868 cpu_ear =
1869 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, ear), "rear");
1870 cpu_esr =
1871 tcg_global_mem_new_i32(cpu_env, offsetof(CPUMBState, esr), "resr");
1872 }
1873
1874 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1875 target_ulong *data)
1876 {
1877 env->pc = data[0];
1878 }