cpu: Move breakpoints field from CPU_COMMON to CPUState
[qemu.git] / target-microblaze / translate.c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "tcg-op.h"
24 #include "helper.h"
25 #include "microblaze-decode.h"
26
27 #define GEN_HELPER 1
28 #include "helper.h"
29
30 #define SIM_COMPAT 0
31 #define DISAS_GNU 1
32 #define DISAS_MB 1
33 #if DISAS_MB && !SIM_COMPAT
34 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 #else
36 # define LOG_DIS(...) do { } while (0)
37 #endif
38
39 #define D(x)
40
41 #define EXTRACT_FIELD(src, start, end) \
42 (((src) >> start) & ((1 << (end - start + 1)) - 1))
43
44 static TCGv env_debug;
45 static TCGv_ptr cpu_env;
46 static TCGv cpu_R[32];
47 static TCGv cpu_SR[18];
48 static TCGv env_imm;
49 static TCGv env_btaken;
50 static TCGv env_btarget;
51 static TCGv env_iflags;
52 static TCGv env_res_addr;
53 static TCGv env_res_val;
54
55 #include "exec/gen-icount.h"
56
57 /* This is the state at translation time. */
58 typedef struct DisasContext {
59 CPUMBState *env;
60 target_ulong pc;
61
62 /* Decoder. */
63 int type_b;
64 uint32_t ir;
65 uint8_t opcode;
66 uint8_t rd, ra, rb;
67 uint16_t imm;
68
69 unsigned int cpustate_changed;
70 unsigned int delayed_branch;
71 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
72 unsigned int clear_imm;
73 int is_jmp;
74
75 #define JMP_NOJMP 0
76 #define JMP_DIRECT 1
77 #define JMP_DIRECT_CC 2
78 #define JMP_INDIRECT 3
79 unsigned int jmp;
80 uint32_t jmp_pc;
81
82 int abort_at_next_insn;
83 int nr_nops;
84 struct TranslationBlock *tb;
85 int singlestep_enabled;
86 } DisasContext;
87
88 static const char *regnames[] =
89 {
90 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
91 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
92 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
93 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
94 };
95
96 static const char *special_regnames[] =
97 {
98 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
99 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
100 "sr16", "sr17", "sr18"
101 };
102
103 /* Sign extend at translation time. */
104 static inline int sign_extend(unsigned int val, unsigned int width)
105 {
106 int sval;
107
108 /* LSL. */
109 val <<= 31 - width;
110 sval = val;
111 /* ASR. */
112 sval >>= 31 - width;
113 return sval;
114 }
115
116 static inline void t_sync_flags(DisasContext *dc)
117 {
118 /* Synch the tb dependent flags between translator and runtime. */
119 if (dc->tb_flags != dc->synced_flags) {
120 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
121 dc->synced_flags = dc->tb_flags;
122 }
123 }
124
125 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
126 {
127 TCGv_i32 tmp = tcg_const_i32(index);
128
129 t_sync_flags(dc);
130 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
131 gen_helper_raise_exception(cpu_env, tmp);
132 tcg_temp_free_i32(tmp);
133 dc->is_jmp = DISAS_UPDATE;
134 }
135
136 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
137 {
138 TranslationBlock *tb;
139 tb = dc->tb;
140 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
141 tcg_gen_goto_tb(n);
142 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
143 tcg_gen_exit_tb((uintptr_t)tb + n);
144 } else {
145 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
146 tcg_gen_exit_tb(0);
147 }
148 }
149
150 static void read_carry(DisasContext *dc, TCGv d)
151 {
152 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
153 }
154
155 /*
156 * write_carry sets the carry bits in MSR based on bit 0 of v.
157 * v[31:1] are ignored.
158 */
159 static void write_carry(DisasContext *dc, TCGv v)
160 {
161 TCGv t0 = tcg_temp_new();
162 tcg_gen_shli_tl(t0, v, 31);
163 tcg_gen_sari_tl(t0, t0, 31);
164 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
165 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
166 ~(MSR_C | MSR_CC));
167 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
168 tcg_temp_free(t0);
169 }
170
171 static void write_carryi(DisasContext *dc, bool carry)
172 {
173 TCGv t0 = tcg_temp_new();
174 tcg_gen_movi_tl(t0, carry);
175 write_carry(dc, t0);
176 tcg_temp_free(t0);
177 }
178
179 /* True if ALU operand b is a small immediate that may deserve
180 faster treatment. */
181 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
182 {
183 /* Immediate insn without the imm prefix ? */
184 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
185 }
186
187 static inline TCGv *dec_alu_op_b(DisasContext *dc)
188 {
189 if (dc->type_b) {
190 if (dc->tb_flags & IMM_FLAG)
191 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
192 else
193 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
194 return &env_imm;
195 } else
196 return &cpu_R[dc->rb];
197 }
198
199 static void dec_add(DisasContext *dc)
200 {
201 unsigned int k, c;
202 TCGv cf;
203
204 k = dc->opcode & 4;
205 c = dc->opcode & 2;
206
207 LOG_DIS("add%s%s%s r%d r%d r%d\n",
208 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
209 dc->rd, dc->ra, dc->rb);
210
211 /* Take care of the easy cases first. */
212 if (k) {
213 /* k - keep carry, no need to update MSR. */
214 /* If rd == r0, it's a nop. */
215 if (dc->rd) {
216 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
217
218 if (c) {
219 /* c - Add carry into the result. */
220 cf = tcg_temp_new();
221
222 read_carry(dc, cf);
223 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
224 tcg_temp_free(cf);
225 }
226 }
227 return;
228 }
229
230 /* From now on, we can assume k is zero. So we need to update MSR. */
231 /* Extract carry. */
232 cf = tcg_temp_new();
233 if (c) {
234 read_carry(dc, cf);
235 } else {
236 tcg_gen_movi_tl(cf, 0);
237 }
238
239 if (dc->rd) {
240 TCGv ncf = tcg_temp_new();
241 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
242 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
243 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
244 write_carry(dc, ncf);
245 tcg_temp_free(ncf);
246 } else {
247 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
248 write_carry(dc, cf);
249 }
250 tcg_temp_free(cf);
251 }
252
253 static void dec_sub(DisasContext *dc)
254 {
255 unsigned int u, cmp, k, c;
256 TCGv cf, na;
257
258 u = dc->imm & 2;
259 k = dc->opcode & 4;
260 c = dc->opcode & 2;
261 cmp = (dc->imm & 1) && (!dc->type_b) && k;
262
263 if (cmp) {
264 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
265 if (dc->rd) {
266 if (u)
267 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
268 else
269 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
270 }
271 return;
272 }
273
274 LOG_DIS("sub%s%s r%d, r%d r%d\n",
275 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
276
277 /* Take care of the easy cases first. */
278 if (k) {
279 /* k - keep carry, no need to update MSR. */
280 /* If rd == r0, it's a nop. */
281 if (dc->rd) {
282 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
283
284 if (c) {
285 /* c - Add carry into the result. */
286 cf = tcg_temp_new();
287
288 read_carry(dc, cf);
289 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
290 tcg_temp_free(cf);
291 }
292 }
293 return;
294 }
295
296 /* From now on, we can assume k is zero. So we need to update MSR. */
297 /* Extract carry. And complement a into na. */
298 cf = tcg_temp_new();
299 na = tcg_temp_new();
300 if (c) {
301 read_carry(dc, cf);
302 } else {
303 tcg_gen_movi_tl(cf, 1);
304 }
305
306 /* d = b + ~a + c. carry defaults to 1. */
307 tcg_gen_not_tl(na, cpu_R[dc->ra]);
308
309 if (dc->rd) {
310 TCGv ncf = tcg_temp_new();
311 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
312 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
313 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
314 write_carry(dc, ncf);
315 tcg_temp_free(ncf);
316 } else {
317 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
318 write_carry(dc, cf);
319 }
320 tcg_temp_free(cf);
321 tcg_temp_free(na);
322 }
323
324 static void dec_pattern(DisasContext *dc)
325 {
326 unsigned int mode;
327 int l1;
328
329 if ((dc->tb_flags & MSR_EE_FLAG)
330 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
331 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
332 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
333 t_gen_raise_exception(dc, EXCP_HW_EXCP);
334 }
335
336 mode = dc->opcode & 3;
337 switch (mode) {
338 case 0:
339 /* pcmpbf. */
340 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
341 if (dc->rd)
342 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
343 break;
344 case 2:
345 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
346 if (dc->rd) {
347 TCGv t0 = tcg_temp_local_new();
348 l1 = gen_new_label();
349 tcg_gen_movi_tl(t0, 1);
350 tcg_gen_brcond_tl(TCG_COND_EQ,
351 cpu_R[dc->ra], cpu_R[dc->rb], l1);
352 tcg_gen_movi_tl(t0, 0);
353 gen_set_label(l1);
354 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
355 tcg_temp_free(t0);
356 }
357 break;
358 case 3:
359 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
360 l1 = gen_new_label();
361 if (dc->rd) {
362 TCGv t0 = tcg_temp_local_new();
363 tcg_gen_movi_tl(t0, 1);
364 tcg_gen_brcond_tl(TCG_COND_NE,
365 cpu_R[dc->ra], cpu_R[dc->rb], l1);
366 tcg_gen_movi_tl(t0, 0);
367 gen_set_label(l1);
368 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
369 tcg_temp_free(t0);
370 }
371 break;
372 default:
373 cpu_abort(dc->env,
374 "unsupported pattern insn opcode=%x\n", dc->opcode);
375 break;
376 }
377 }
378
379 static void dec_and(DisasContext *dc)
380 {
381 unsigned int not;
382
383 if (!dc->type_b && (dc->imm & (1 << 10))) {
384 dec_pattern(dc);
385 return;
386 }
387
388 not = dc->opcode & (1 << 1);
389 LOG_DIS("and%s\n", not ? "n" : "");
390
391 if (!dc->rd)
392 return;
393
394 if (not) {
395 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
396 } else
397 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
398 }
399
400 static void dec_or(DisasContext *dc)
401 {
402 if (!dc->type_b && (dc->imm & (1 << 10))) {
403 dec_pattern(dc);
404 return;
405 }
406
407 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
408 if (dc->rd)
409 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
410 }
411
412 static void dec_xor(DisasContext *dc)
413 {
414 if (!dc->type_b && (dc->imm & (1 << 10))) {
415 dec_pattern(dc);
416 return;
417 }
418
419 LOG_DIS("xor r%d\n", dc->rd);
420 if (dc->rd)
421 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
422 }
423
424 static inline void msr_read(DisasContext *dc, TCGv d)
425 {
426 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
427 }
428
429 static inline void msr_write(DisasContext *dc, TCGv v)
430 {
431 TCGv t;
432
433 t = tcg_temp_new();
434 dc->cpustate_changed = 1;
435 /* PVR bit is not writable. */
436 tcg_gen_andi_tl(t, v, ~MSR_PVR);
437 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
438 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
439 tcg_temp_free(t);
440 }
441
442 static void dec_msr(DisasContext *dc)
443 {
444 TCGv t0, t1;
445 unsigned int sr, to, rn;
446 int mem_index = cpu_mmu_index(dc->env);
447
448 sr = dc->imm & ((1 << 14) - 1);
449 to = dc->imm & (1 << 14);
450 dc->type_b = 1;
451 if (to)
452 dc->cpustate_changed = 1;
453
454 /* msrclr and msrset. */
455 if (!(dc->imm & (1 << 15))) {
456 unsigned int clr = dc->ir & (1 << 16);
457
458 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
459 dc->rd, dc->imm);
460
461 if (!(dc->env->pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
462 /* nop??? */
463 return;
464 }
465
466 if ((dc->tb_flags & MSR_EE_FLAG)
467 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
468 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
469 t_gen_raise_exception(dc, EXCP_HW_EXCP);
470 return;
471 }
472
473 if (dc->rd)
474 msr_read(dc, cpu_R[dc->rd]);
475
476 t0 = tcg_temp_new();
477 t1 = tcg_temp_new();
478 msr_read(dc, t0);
479 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
480
481 if (clr) {
482 tcg_gen_not_tl(t1, t1);
483 tcg_gen_and_tl(t0, t0, t1);
484 } else
485 tcg_gen_or_tl(t0, t0, t1);
486 msr_write(dc, t0);
487 tcg_temp_free(t0);
488 tcg_temp_free(t1);
489 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
490 dc->is_jmp = DISAS_UPDATE;
491 return;
492 }
493
494 if (to) {
495 if ((dc->tb_flags & MSR_EE_FLAG)
496 && mem_index == MMU_USER_IDX) {
497 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
498 t_gen_raise_exception(dc, EXCP_HW_EXCP);
499 return;
500 }
501 }
502
503 #if !defined(CONFIG_USER_ONLY)
504 /* Catch read/writes to the mmu block. */
505 if ((sr & ~0xff) == 0x1000) {
506 sr &= 7;
507 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
508 if (to)
509 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
510 else
511 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
512 return;
513 }
514 #endif
515
516 if (to) {
517 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
518 switch (sr) {
519 case 0:
520 break;
521 case 1:
522 msr_write(dc, cpu_R[dc->ra]);
523 break;
524 case 0x3:
525 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
526 break;
527 case 0x5:
528 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
529 break;
530 case 0x7:
531 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
532 break;
533 case 0x800:
534 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
535 break;
536 case 0x802:
537 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
538 break;
539 default:
540 cpu_abort(dc->env, "unknown mts reg %x\n", sr);
541 break;
542 }
543 } else {
544 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
545
546 switch (sr) {
547 case 0:
548 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
549 break;
550 case 1:
551 msr_read(dc, cpu_R[dc->rd]);
552 break;
553 case 0x3:
554 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
555 break;
556 case 0x5:
557 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
558 break;
559 case 0x7:
560 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
561 break;
562 case 0xb:
563 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
564 break;
565 case 0x800:
566 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
567 break;
568 case 0x802:
569 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
570 break;
571 case 0x2000:
572 case 0x2001:
573 case 0x2002:
574 case 0x2003:
575 case 0x2004:
576 case 0x2005:
577 case 0x2006:
578 case 0x2007:
579 case 0x2008:
580 case 0x2009:
581 case 0x200a:
582 case 0x200b:
583 case 0x200c:
584 rn = sr & 0xf;
585 tcg_gen_ld_tl(cpu_R[dc->rd],
586 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
587 break;
588 default:
589 cpu_abort(dc->env, "unknown mfs reg %x\n", sr);
590 break;
591 }
592 }
593
594 if (dc->rd == 0) {
595 tcg_gen_movi_tl(cpu_R[0], 0);
596 }
597 }
598
599 /* 64-bit signed mul, lower result in d and upper in d2. */
600 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
601 {
602 TCGv_i64 t0, t1;
603
604 t0 = tcg_temp_new_i64();
605 t1 = tcg_temp_new_i64();
606
607 tcg_gen_ext_i32_i64(t0, a);
608 tcg_gen_ext_i32_i64(t1, b);
609 tcg_gen_mul_i64(t0, t0, t1);
610
611 tcg_gen_trunc_i64_i32(d, t0);
612 tcg_gen_shri_i64(t0, t0, 32);
613 tcg_gen_trunc_i64_i32(d2, t0);
614
615 tcg_temp_free_i64(t0);
616 tcg_temp_free_i64(t1);
617 }
618
619 /* 64-bit unsigned muls, lower result in d and upper in d2. */
620 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
621 {
622 TCGv_i64 t0, t1;
623
624 t0 = tcg_temp_new_i64();
625 t1 = tcg_temp_new_i64();
626
627 tcg_gen_extu_i32_i64(t0, a);
628 tcg_gen_extu_i32_i64(t1, b);
629 tcg_gen_mul_i64(t0, t0, t1);
630
631 tcg_gen_trunc_i64_i32(d, t0);
632 tcg_gen_shri_i64(t0, t0, 32);
633 tcg_gen_trunc_i64_i32(d2, t0);
634
635 tcg_temp_free_i64(t0);
636 tcg_temp_free_i64(t1);
637 }
638
639 /* Multiplier unit. */
640 static void dec_mul(DisasContext *dc)
641 {
642 TCGv d[2];
643 unsigned int subcode;
644
645 if ((dc->tb_flags & MSR_EE_FLAG)
646 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
647 && !(dc->env->pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
648 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
649 t_gen_raise_exception(dc, EXCP_HW_EXCP);
650 return;
651 }
652
653 subcode = dc->imm & 3;
654 d[0] = tcg_temp_new();
655 d[1] = tcg_temp_new();
656
657 if (dc->type_b) {
658 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
659 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
660 goto done;
661 }
662
663 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
664 if (subcode >= 1 && subcode <= 3
665 && !((dc->env->pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
666 /* nop??? */
667 }
668
669 switch (subcode) {
670 case 0:
671 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
672 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
673 break;
674 case 1:
675 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
676 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
677 break;
678 case 2:
679 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
680 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
681 break;
682 case 3:
683 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
684 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
685 break;
686 default:
687 cpu_abort(dc->env, "unknown MUL insn %x\n", subcode);
688 break;
689 }
690 done:
691 tcg_temp_free(d[0]);
692 tcg_temp_free(d[1]);
693 }
694
695 /* Div unit. */
696 static void dec_div(DisasContext *dc)
697 {
698 unsigned int u;
699
700 u = dc->imm & 2;
701 LOG_DIS("div\n");
702
703 if ((dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
704 && !((dc->env->pvr.regs[0] & PVR0_USE_DIV_MASK))) {
705 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
706 t_gen_raise_exception(dc, EXCP_HW_EXCP);
707 }
708
709 if (u)
710 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
711 cpu_R[dc->ra]);
712 else
713 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
714 cpu_R[dc->ra]);
715 if (!dc->rd)
716 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
717 }
718
719 static void dec_barrel(DisasContext *dc)
720 {
721 TCGv t0;
722 unsigned int s, t;
723
724 if ((dc->tb_flags & MSR_EE_FLAG)
725 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
726 && !(dc->env->pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
727 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
728 t_gen_raise_exception(dc, EXCP_HW_EXCP);
729 return;
730 }
731
732 s = dc->imm & (1 << 10);
733 t = dc->imm & (1 << 9);
734
735 LOG_DIS("bs%s%s r%d r%d r%d\n",
736 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
737
738 t0 = tcg_temp_new();
739
740 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
741 tcg_gen_andi_tl(t0, t0, 31);
742
743 if (s)
744 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
745 else {
746 if (t)
747 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
748 else
749 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
750 }
751 }
752
753 static void dec_bit(DisasContext *dc)
754 {
755 TCGv t0;
756 unsigned int op;
757 int mem_index = cpu_mmu_index(dc->env);
758
759 op = dc->ir & ((1 << 9) - 1);
760 switch (op) {
761 case 0x21:
762 /* src. */
763 t0 = tcg_temp_new();
764
765 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
766 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
767 write_carry(dc, cpu_R[dc->ra]);
768 if (dc->rd) {
769 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
770 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
771 }
772 tcg_temp_free(t0);
773 break;
774
775 case 0x1:
776 case 0x41:
777 /* srl. */
778 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
779
780 /* Update carry. Note that write carry only looks at the LSB. */
781 write_carry(dc, cpu_R[dc->ra]);
782 if (dc->rd) {
783 if (op == 0x41)
784 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
785 else
786 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
787 }
788 break;
789 case 0x60:
790 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
791 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
792 break;
793 case 0x61:
794 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
795 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
796 break;
797 case 0x64:
798 case 0x66:
799 case 0x74:
800 case 0x76:
801 /* wdc. */
802 LOG_DIS("wdc r%d\n", dc->ra);
803 if ((dc->tb_flags & MSR_EE_FLAG)
804 && mem_index == MMU_USER_IDX) {
805 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
806 t_gen_raise_exception(dc, EXCP_HW_EXCP);
807 return;
808 }
809 break;
810 case 0x68:
811 /* wic. */
812 LOG_DIS("wic r%d\n", dc->ra);
813 if ((dc->tb_flags & MSR_EE_FLAG)
814 && mem_index == MMU_USER_IDX) {
815 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
816 t_gen_raise_exception(dc, EXCP_HW_EXCP);
817 return;
818 }
819 break;
820 case 0xe0:
821 if ((dc->tb_flags & MSR_EE_FLAG)
822 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
823 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
824 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
825 t_gen_raise_exception(dc, EXCP_HW_EXCP);
826 }
827 if (dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
828 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
829 }
830 break;
831 case 0x1e0:
832 /* swapb */
833 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
834 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
835 break;
836 case 0x1e2:
837 /*swaph */
838 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
839 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
840 break;
841 default:
842 cpu_abort(dc->env, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
843 dc->pc, op, dc->rd, dc->ra, dc->rb);
844 break;
845 }
846 }
847
848 static inline void sync_jmpstate(DisasContext *dc)
849 {
850 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
851 if (dc->jmp == JMP_DIRECT) {
852 tcg_gen_movi_tl(env_btaken, 1);
853 }
854 dc->jmp = JMP_INDIRECT;
855 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
856 }
857 }
858
859 static void dec_imm(DisasContext *dc)
860 {
861 LOG_DIS("imm %x\n", dc->imm << 16);
862 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
863 dc->tb_flags |= IMM_FLAG;
864 dc->clear_imm = 0;
865 }
866
867 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
868 {
869 unsigned int extimm = dc->tb_flags & IMM_FLAG;
870 /* Should be set to one if r1 is used by loadstores. */
871 int stackprot = 0;
872
873 /* All load/stores use ra. */
874 if (dc->ra == 1) {
875 stackprot = 1;
876 }
877
878 /* Treat the common cases first. */
879 if (!dc->type_b) {
880 /* If any of the regs is r0, return a ptr to the other. */
881 if (dc->ra == 0) {
882 return &cpu_R[dc->rb];
883 } else if (dc->rb == 0) {
884 return &cpu_R[dc->ra];
885 }
886
887 if (dc->rb == 1) {
888 stackprot = 1;
889 }
890
891 *t = tcg_temp_new();
892 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
893
894 if (stackprot) {
895 gen_helper_stackprot(cpu_env, *t);
896 }
897 return t;
898 }
899 /* Immediate. */
900 if (!extimm) {
901 if (dc->imm == 0) {
902 return &cpu_R[dc->ra];
903 }
904 *t = tcg_temp_new();
905 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
906 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
907 } else {
908 *t = tcg_temp_new();
909 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
910 }
911
912 if (stackprot) {
913 gen_helper_stackprot(cpu_env, *t);
914 }
915 return t;
916 }
917
918 static void dec_load(DisasContext *dc)
919 {
920 TCGv t, v, *addr;
921 unsigned int size, rev = 0, ex = 0;
922 TCGMemOp mop;
923
924 mop = dc->opcode & 3;
925 size = 1 << mop;
926 if (!dc->type_b) {
927 rev = (dc->ir >> 9) & 1;
928 ex = (dc->ir >> 10) & 1;
929 }
930 mop |= MO_TE;
931 if (rev) {
932 mop ^= MO_BSWAP;
933 }
934
935 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
936 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
937 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
938 t_gen_raise_exception(dc, EXCP_HW_EXCP);
939 return;
940 }
941
942 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
943 ex ? "x" : "");
944
945 t_sync_flags(dc);
946 addr = compute_ldst_addr(dc, &t);
947
948 /*
949 * When doing reverse accesses we need to do two things.
950 *
951 * 1. Reverse the address wrt endianness.
952 * 2. Byteswap the data lanes on the way back into the CPU core.
953 */
954 if (rev && size != 4) {
955 /* Endian reverse the address. t is addr. */
956 switch (size) {
957 case 1:
958 {
959 /* 00 -> 11
960 01 -> 10
961 10 -> 10
962 11 -> 00 */
963 TCGv low = tcg_temp_new();
964
965 /* Force addr into the temp. */
966 if (addr != &t) {
967 t = tcg_temp_new();
968 tcg_gen_mov_tl(t, *addr);
969 addr = &t;
970 }
971
972 tcg_gen_andi_tl(low, t, 3);
973 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
974 tcg_gen_andi_tl(t, t, ~3);
975 tcg_gen_or_tl(t, t, low);
976 tcg_gen_mov_tl(env_imm, t);
977 tcg_temp_free(low);
978 break;
979 }
980
981 case 2:
982 /* 00 -> 10
983 10 -> 00. */
984 /* Force addr into the temp. */
985 if (addr != &t) {
986 t = tcg_temp_new();
987 tcg_gen_xori_tl(t, *addr, 2);
988 addr = &t;
989 } else {
990 tcg_gen_xori_tl(t, t, 2);
991 }
992 break;
993 default:
994 cpu_abort(dc->env, "Invalid reverse size\n");
995 break;
996 }
997 }
998
999 /* lwx does not throw unaligned access errors, so force alignment */
1000 if (ex) {
1001 /* Force addr into the temp. */
1002 if (addr != &t) {
1003 t = tcg_temp_new();
1004 tcg_gen_mov_tl(t, *addr);
1005 addr = &t;
1006 }
1007 tcg_gen_andi_tl(t, t, ~3);
1008 }
1009
1010 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1011 sync_jmpstate(dc);
1012
1013 /* Verify alignment if needed. */
1014 /*
1015 * Microblaze gives MMU faults priority over faults due to
1016 * unaligned addresses. That's why we speculatively do the load
1017 * into v. If the load succeeds, we verify alignment of the
1018 * address and if that succeeds we write into the destination reg.
1019 */
1020 v = tcg_temp_new();
1021 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(dc->env), mop);
1022
1023 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1024 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1025 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1026 tcg_const_tl(0), tcg_const_tl(size - 1));
1027 }
1028
1029 if (ex) {
1030 tcg_gen_mov_tl(env_res_addr, *addr);
1031 tcg_gen_mov_tl(env_res_val, v);
1032 }
1033 if (dc->rd) {
1034 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1035 }
1036 tcg_temp_free(v);
1037
1038 if (ex) { /* lwx */
1039 /* no support for for AXI exclusive so always clear C */
1040 write_carryi(dc, 0);
1041 }
1042
1043 if (addr == &t)
1044 tcg_temp_free(t);
1045 }
1046
1047 static void dec_store(DisasContext *dc)
1048 {
1049 TCGv t, *addr, swx_addr;
1050 int swx_skip = 0;
1051 unsigned int size, rev = 0, ex = 0;
1052 TCGMemOp mop;
1053
1054 mop = dc->opcode & 3;
1055 size = 1 << mop;
1056 if (!dc->type_b) {
1057 rev = (dc->ir >> 9) & 1;
1058 ex = (dc->ir >> 10) & 1;
1059 }
1060 mop |= MO_TE;
1061 if (rev) {
1062 mop ^= MO_BSWAP;
1063 }
1064
1065 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1066 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1067 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1068 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1069 return;
1070 }
1071
1072 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1073 ex ? "x" : "");
1074 t_sync_flags(dc);
1075 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1076 sync_jmpstate(dc);
1077 addr = compute_ldst_addr(dc, &t);
1078
1079 swx_addr = tcg_temp_local_new();
1080 if (ex) { /* swx */
1081 TCGv tval;
1082
1083 /* Force addr into the swx_addr. */
1084 tcg_gen_mov_tl(swx_addr, *addr);
1085 addr = &swx_addr;
1086 /* swx does not throw unaligned access errors, so force alignment */
1087 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1088
1089 write_carryi(dc, 1);
1090 swx_skip = gen_new_label();
1091 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1092
1093 /* Compare the value loaded at lwx with current contents of
1094 the reserved location.
1095 FIXME: This only works for system emulation where we can expect
1096 this compare and the following write to be atomic. For user
1097 emulation we need to add atomicity between threads. */
1098 tval = tcg_temp_new();
1099 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(dc->env), MO_TEUL);
1100 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1101 write_carryi(dc, 0);
1102 tcg_temp_free(tval);
1103 }
1104
1105 if (rev && size != 4) {
1106 /* Endian reverse the address. t is addr. */
1107 switch (size) {
1108 case 1:
1109 {
1110 /* 00 -> 11
1111 01 -> 10
1112 10 -> 10
1113 11 -> 00 */
1114 TCGv low = tcg_temp_new();
1115
1116 /* Force addr into the temp. */
1117 if (addr != &t) {
1118 t = tcg_temp_new();
1119 tcg_gen_mov_tl(t, *addr);
1120 addr = &t;
1121 }
1122
1123 tcg_gen_andi_tl(low, t, 3);
1124 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1125 tcg_gen_andi_tl(t, t, ~3);
1126 tcg_gen_or_tl(t, t, low);
1127 tcg_gen_mov_tl(env_imm, t);
1128 tcg_temp_free(low);
1129 break;
1130 }
1131
1132 case 2:
1133 /* 00 -> 10
1134 10 -> 00. */
1135 /* Force addr into the temp. */
1136 if (addr != &t) {
1137 t = tcg_temp_new();
1138 tcg_gen_xori_tl(t, *addr, 2);
1139 addr = &t;
1140 } else {
1141 tcg_gen_xori_tl(t, t, 2);
1142 }
1143 break;
1144 default:
1145 cpu_abort(dc->env, "Invalid reverse size\n");
1146 break;
1147 }
1148 }
1149 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(dc->env), mop);
1150
1151 /* Verify alignment if needed. */
1152 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1153 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1154 /* FIXME: if the alignment is wrong, we should restore the value
1155 * in memory. One possible way to achieve this is to probe
1156 * the MMU prior to the memaccess, thay way we could put
1157 * the alignment checks in between the probe and the mem
1158 * access.
1159 */
1160 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1161 tcg_const_tl(1), tcg_const_tl(size - 1));
1162 }
1163
1164 if (ex) {
1165 gen_set_label(swx_skip);
1166 }
1167 tcg_temp_free(swx_addr);
1168
1169 if (addr == &t)
1170 tcg_temp_free(t);
1171 }
1172
1173 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1174 TCGv d, TCGv a, TCGv b)
1175 {
1176 switch (cc) {
1177 case CC_EQ:
1178 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1179 break;
1180 case CC_NE:
1181 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1182 break;
1183 case CC_LT:
1184 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1185 break;
1186 case CC_LE:
1187 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1188 break;
1189 case CC_GE:
1190 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1191 break;
1192 case CC_GT:
1193 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1194 break;
1195 default:
1196 cpu_abort(dc->env, "Unknown condition code %x.\n", cc);
1197 break;
1198 }
1199 }
1200
1201 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1202 {
1203 int l1;
1204
1205 l1 = gen_new_label();
1206 /* Conditional jmp. */
1207 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1208 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1209 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1210 gen_set_label(l1);
1211 }
1212
1213 static void dec_bcc(DisasContext *dc)
1214 {
1215 unsigned int cc;
1216 unsigned int dslot;
1217
1218 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1219 dslot = dc->ir & (1 << 25);
1220 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1221
1222 dc->delayed_branch = 1;
1223 if (dslot) {
1224 dc->delayed_branch = 2;
1225 dc->tb_flags |= D_FLAG;
1226 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1227 cpu_env, offsetof(CPUMBState, bimm));
1228 }
1229
1230 if (dec_alu_op_b_is_small_imm(dc)) {
1231 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1232
1233 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1234 dc->jmp = JMP_DIRECT_CC;
1235 dc->jmp_pc = dc->pc + offset;
1236 } else {
1237 dc->jmp = JMP_INDIRECT;
1238 tcg_gen_movi_tl(env_btarget, dc->pc);
1239 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1240 }
1241 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1242 }
1243
1244 static void dec_br(DisasContext *dc)
1245 {
1246 unsigned int dslot, link, abs, mbar;
1247 int mem_index = cpu_mmu_index(dc->env);
1248
1249 dslot = dc->ir & (1 << 20);
1250 abs = dc->ir & (1 << 19);
1251 link = dc->ir & (1 << 18);
1252
1253 /* Memory barrier. */
1254 mbar = (dc->ir >> 16) & 31;
1255 if (mbar == 2 && dc->imm == 4) {
1256 /* mbar IMM & 16 decodes to sleep. */
1257 if (dc->rd & 16) {
1258 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1259 TCGv_i32 tmp_1 = tcg_const_i32(1);
1260
1261 LOG_DIS("sleep\n");
1262
1263 t_sync_flags(dc);
1264 tcg_gen_st_i32(tmp_1, cpu_env,
1265 -offsetof(MicroBlazeCPU, env)
1266 +offsetof(CPUState, halted));
1267 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1268 gen_helper_raise_exception(cpu_env, tmp_hlt);
1269 tcg_temp_free_i32(tmp_hlt);
1270 tcg_temp_free_i32(tmp_1);
1271 return;
1272 }
1273 LOG_DIS("mbar %d\n", dc->rd);
1274 /* Break the TB. */
1275 dc->cpustate_changed = 1;
1276 return;
1277 }
1278
1279 LOG_DIS("br%s%s%s%s imm=%x\n",
1280 abs ? "a" : "", link ? "l" : "",
1281 dc->type_b ? "i" : "", dslot ? "d" : "",
1282 dc->imm);
1283
1284 dc->delayed_branch = 1;
1285 if (dslot) {
1286 dc->delayed_branch = 2;
1287 dc->tb_flags |= D_FLAG;
1288 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1289 cpu_env, offsetof(CPUMBState, bimm));
1290 }
1291 if (link && dc->rd)
1292 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1293
1294 dc->jmp = JMP_INDIRECT;
1295 if (abs) {
1296 tcg_gen_movi_tl(env_btaken, 1);
1297 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1298 if (link && !dslot) {
1299 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1300 t_gen_raise_exception(dc, EXCP_BREAK);
1301 if (dc->imm == 0) {
1302 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1303 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1304 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1305 return;
1306 }
1307
1308 t_gen_raise_exception(dc, EXCP_DEBUG);
1309 }
1310 }
1311 } else {
1312 if (dec_alu_op_b_is_small_imm(dc)) {
1313 dc->jmp = JMP_DIRECT;
1314 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1315 } else {
1316 tcg_gen_movi_tl(env_btaken, 1);
1317 tcg_gen_movi_tl(env_btarget, dc->pc);
1318 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1319 }
1320 }
1321 }
1322
1323 static inline void do_rti(DisasContext *dc)
1324 {
1325 TCGv t0, t1;
1326 t0 = tcg_temp_new();
1327 t1 = tcg_temp_new();
1328 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1329 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1330 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1331
1332 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1333 tcg_gen_or_tl(t1, t1, t0);
1334 msr_write(dc, t1);
1335 tcg_temp_free(t1);
1336 tcg_temp_free(t0);
1337 dc->tb_flags &= ~DRTI_FLAG;
1338 }
1339
1340 static inline void do_rtb(DisasContext *dc)
1341 {
1342 TCGv t0, t1;
1343 t0 = tcg_temp_new();
1344 t1 = tcg_temp_new();
1345 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1346 tcg_gen_shri_tl(t0, t1, 1);
1347 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1348
1349 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1350 tcg_gen_or_tl(t1, t1, t0);
1351 msr_write(dc, t1);
1352 tcg_temp_free(t1);
1353 tcg_temp_free(t0);
1354 dc->tb_flags &= ~DRTB_FLAG;
1355 }
1356
1357 static inline void do_rte(DisasContext *dc)
1358 {
1359 TCGv t0, t1;
1360 t0 = tcg_temp_new();
1361 t1 = tcg_temp_new();
1362
1363 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1364 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1365 tcg_gen_shri_tl(t0, t1, 1);
1366 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1367
1368 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1369 tcg_gen_or_tl(t1, t1, t0);
1370 msr_write(dc, t1);
1371 tcg_temp_free(t1);
1372 tcg_temp_free(t0);
1373 dc->tb_flags &= ~DRTE_FLAG;
1374 }
1375
1376 static void dec_rts(DisasContext *dc)
1377 {
1378 unsigned int b_bit, i_bit, e_bit;
1379 int mem_index = cpu_mmu_index(dc->env);
1380
1381 i_bit = dc->ir & (1 << 21);
1382 b_bit = dc->ir & (1 << 22);
1383 e_bit = dc->ir & (1 << 23);
1384
1385 dc->delayed_branch = 2;
1386 dc->tb_flags |= D_FLAG;
1387 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1388 cpu_env, offsetof(CPUMBState, bimm));
1389
1390 if (i_bit) {
1391 LOG_DIS("rtid ir=%x\n", dc->ir);
1392 if ((dc->tb_flags & MSR_EE_FLAG)
1393 && mem_index == MMU_USER_IDX) {
1394 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1395 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1396 }
1397 dc->tb_flags |= DRTI_FLAG;
1398 } else if (b_bit) {
1399 LOG_DIS("rtbd ir=%x\n", dc->ir);
1400 if ((dc->tb_flags & MSR_EE_FLAG)
1401 && mem_index == MMU_USER_IDX) {
1402 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1403 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1404 }
1405 dc->tb_flags |= DRTB_FLAG;
1406 } else if (e_bit) {
1407 LOG_DIS("rted ir=%x\n", dc->ir);
1408 if ((dc->tb_flags & MSR_EE_FLAG)
1409 && mem_index == MMU_USER_IDX) {
1410 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1411 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1412 }
1413 dc->tb_flags |= DRTE_FLAG;
1414 } else
1415 LOG_DIS("rts ir=%x\n", dc->ir);
1416
1417 dc->jmp = JMP_INDIRECT;
1418 tcg_gen_movi_tl(env_btaken, 1);
1419 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1420 }
1421
1422 static int dec_check_fpuv2(DisasContext *dc)
1423 {
1424 int r;
1425
1426 r = dc->env->pvr.regs[2] & PVR2_USE_FPU2_MASK;
1427
1428 if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
1429 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1430 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1431 }
1432 return r;
1433 }
1434
1435 static void dec_fpu(DisasContext *dc)
1436 {
1437 unsigned int fpu_insn;
1438
1439 if ((dc->tb_flags & MSR_EE_FLAG)
1440 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1441 && !((dc->env->pvr.regs[2] & PVR2_USE_FPU_MASK))) {
1442 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1443 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1444 return;
1445 }
1446
1447 fpu_insn = (dc->ir >> 7) & 7;
1448
1449 switch (fpu_insn) {
1450 case 0:
1451 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1452 cpu_R[dc->rb]);
1453 break;
1454
1455 case 1:
1456 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1457 cpu_R[dc->rb]);
1458 break;
1459
1460 case 2:
1461 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1462 cpu_R[dc->rb]);
1463 break;
1464
1465 case 3:
1466 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1467 cpu_R[dc->rb]);
1468 break;
1469
1470 case 4:
1471 switch ((dc->ir >> 4) & 7) {
1472 case 0:
1473 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1474 cpu_R[dc->ra], cpu_R[dc->rb]);
1475 break;
1476 case 1:
1477 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1478 cpu_R[dc->ra], cpu_R[dc->rb]);
1479 break;
1480 case 2:
1481 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1482 cpu_R[dc->ra], cpu_R[dc->rb]);
1483 break;
1484 case 3:
1485 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1486 cpu_R[dc->ra], cpu_R[dc->rb]);
1487 break;
1488 case 4:
1489 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1490 cpu_R[dc->ra], cpu_R[dc->rb]);
1491 break;
1492 case 5:
1493 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1494 cpu_R[dc->ra], cpu_R[dc->rb]);
1495 break;
1496 case 6:
1497 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1498 cpu_R[dc->ra], cpu_R[dc->rb]);
1499 break;
1500 default:
1501 qemu_log_mask(LOG_UNIMP,
1502 "unimplemented fcmp fpu_insn=%x pc=%x"
1503 " opc=%x\n",
1504 fpu_insn, dc->pc, dc->opcode);
1505 dc->abort_at_next_insn = 1;
1506 break;
1507 }
1508 break;
1509
1510 case 5:
1511 if (!dec_check_fpuv2(dc)) {
1512 return;
1513 }
1514 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1515 break;
1516
1517 case 6:
1518 if (!dec_check_fpuv2(dc)) {
1519 return;
1520 }
1521 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1522 break;
1523
1524 case 7:
1525 if (!dec_check_fpuv2(dc)) {
1526 return;
1527 }
1528 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1529 break;
1530
1531 default:
1532 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1533 " opc=%x\n",
1534 fpu_insn, dc->pc, dc->opcode);
1535 dc->abort_at_next_insn = 1;
1536 break;
1537 }
1538 }
1539
1540 static void dec_null(DisasContext *dc)
1541 {
1542 if ((dc->tb_flags & MSR_EE_FLAG)
1543 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1544 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1545 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1546 return;
1547 }
1548 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1549 dc->abort_at_next_insn = 1;
1550 }
1551
1552 /* Insns connected to FSL or AXI stream attached devices. */
1553 static void dec_stream(DisasContext *dc)
1554 {
1555 int mem_index = cpu_mmu_index(dc->env);
1556 TCGv_i32 t_id, t_ctrl;
1557 int ctrl;
1558
1559 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1560 dc->type_b ? "" : "d", dc->imm);
1561
1562 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1563 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1564 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1565 return;
1566 }
1567
1568 t_id = tcg_temp_new();
1569 if (dc->type_b) {
1570 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1571 ctrl = dc->imm >> 10;
1572 } else {
1573 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1574 ctrl = dc->imm >> 5;
1575 }
1576
1577 t_ctrl = tcg_const_tl(ctrl);
1578
1579 if (dc->rd == 0) {
1580 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1581 } else {
1582 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1583 }
1584 tcg_temp_free(t_id);
1585 tcg_temp_free(t_ctrl);
1586 }
1587
1588 static struct decoder_info {
1589 struct {
1590 uint32_t bits;
1591 uint32_t mask;
1592 };
1593 void (*dec)(DisasContext *dc);
1594 } decinfo[] = {
1595 {DEC_ADD, dec_add},
1596 {DEC_SUB, dec_sub},
1597 {DEC_AND, dec_and},
1598 {DEC_XOR, dec_xor},
1599 {DEC_OR, dec_or},
1600 {DEC_BIT, dec_bit},
1601 {DEC_BARREL, dec_barrel},
1602 {DEC_LD, dec_load},
1603 {DEC_ST, dec_store},
1604 {DEC_IMM, dec_imm},
1605 {DEC_BR, dec_br},
1606 {DEC_BCC, dec_bcc},
1607 {DEC_RTS, dec_rts},
1608 {DEC_FPU, dec_fpu},
1609 {DEC_MUL, dec_mul},
1610 {DEC_DIV, dec_div},
1611 {DEC_MSR, dec_msr},
1612 {DEC_STREAM, dec_stream},
1613 {{0, 0}, dec_null}
1614 };
1615
1616 static inline void decode(DisasContext *dc, uint32_t ir)
1617 {
1618 int i;
1619
1620 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1621 tcg_gen_debug_insn_start(dc->pc);
1622 }
1623
1624 dc->ir = ir;
1625 LOG_DIS("%8.8x\t", dc->ir);
1626
1627 if (dc->ir)
1628 dc->nr_nops = 0;
1629 else {
1630 if ((dc->tb_flags & MSR_EE_FLAG)
1631 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1632 && (dc->env->pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1633 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1634 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1635 return;
1636 }
1637
1638 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1639 dc->nr_nops++;
1640 if (dc->nr_nops > 4)
1641 cpu_abort(dc->env, "fetching nop sequence\n");
1642 }
1643 /* bit 2 seems to indicate insn type. */
1644 dc->type_b = ir & (1 << 29);
1645
1646 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1647 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1648 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1649 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1650 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1651
1652 /* Large switch for all insns. */
1653 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1654 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1655 decinfo[i].dec(dc);
1656 break;
1657 }
1658 }
1659 }
1660
1661 static void check_breakpoint(CPUMBState *env, DisasContext *dc)
1662 {
1663 CPUState *cs = CPU(mb_env_get_cpu(env));
1664 CPUBreakpoint *bp;
1665
1666 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
1667 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
1668 if (bp->pc == dc->pc) {
1669 t_gen_raise_exception(dc, EXCP_DEBUG);
1670 dc->is_jmp = DISAS_UPDATE;
1671 }
1672 }
1673 }
1674 }
1675
1676 /* generate intermediate code for basic block 'tb'. */
1677 static inline void
1678 gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
1679 bool search_pc)
1680 {
1681 CPUState *cs = CPU(cpu);
1682 CPUMBState *env = &cpu->env;
1683 uint16_t *gen_opc_end;
1684 uint32_t pc_start;
1685 int j, lj;
1686 struct DisasContext ctx;
1687 struct DisasContext *dc = &ctx;
1688 uint32_t next_page_start, org_flags;
1689 target_ulong npc;
1690 int num_insns;
1691 int max_insns;
1692
1693 pc_start = tb->pc;
1694 dc->env = env;
1695 dc->tb = tb;
1696 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1697
1698 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1699
1700 dc->is_jmp = DISAS_NEXT;
1701 dc->jmp = 0;
1702 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1703 if (dc->delayed_branch) {
1704 dc->jmp = JMP_INDIRECT;
1705 }
1706 dc->pc = pc_start;
1707 dc->singlestep_enabled = cs->singlestep_enabled;
1708 dc->cpustate_changed = 0;
1709 dc->abort_at_next_insn = 0;
1710 dc->nr_nops = 0;
1711
1712 if (pc_start & 3)
1713 cpu_abort(env, "Microblaze: unaligned PC=%x\n", pc_start);
1714
1715 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1716 #if !SIM_COMPAT
1717 qemu_log("--------------\n");
1718 log_cpu_state(CPU(cpu), 0);
1719 #endif
1720 }
1721
1722 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1723 lj = -1;
1724 num_insns = 0;
1725 max_insns = tb->cflags & CF_COUNT_MASK;
1726 if (max_insns == 0)
1727 max_insns = CF_COUNT_MASK;
1728
1729 gen_tb_start();
1730 do
1731 {
1732 #if SIM_COMPAT
1733 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1734 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1735 gen_helper_debug();
1736 }
1737 #endif
1738 check_breakpoint(env, dc);
1739
1740 if (search_pc) {
1741 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1742 if (lj < j) {
1743 lj++;
1744 while (lj < j)
1745 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1746 }
1747 tcg_ctx.gen_opc_pc[lj] = dc->pc;
1748 tcg_ctx.gen_opc_instr_start[lj] = 1;
1749 tcg_ctx.gen_opc_icount[lj] = num_insns;
1750 }
1751
1752 /* Pretty disas. */
1753 LOG_DIS("%8.8x:\t", dc->pc);
1754
1755 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1756 gen_io_start();
1757
1758 dc->clear_imm = 1;
1759 decode(dc, cpu_ldl_code(env, dc->pc));
1760 if (dc->clear_imm)
1761 dc->tb_flags &= ~IMM_FLAG;
1762 dc->pc += 4;
1763 num_insns++;
1764
1765 if (dc->delayed_branch) {
1766 dc->delayed_branch--;
1767 if (!dc->delayed_branch) {
1768 if (dc->tb_flags & DRTI_FLAG)
1769 do_rti(dc);
1770 if (dc->tb_flags & DRTB_FLAG)
1771 do_rtb(dc);
1772 if (dc->tb_flags & DRTE_FLAG)
1773 do_rte(dc);
1774 /* Clear the delay slot flag. */
1775 dc->tb_flags &= ~D_FLAG;
1776 /* If it is a direct jump, try direct chaining. */
1777 if (dc->jmp == JMP_INDIRECT) {
1778 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1779 dc->is_jmp = DISAS_JUMP;
1780 } else if (dc->jmp == JMP_DIRECT) {
1781 t_sync_flags(dc);
1782 gen_goto_tb(dc, 0, dc->jmp_pc);
1783 dc->is_jmp = DISAS_TB_JUMP;
1784 } else if (dc->jmp == JMP_DIRECT_CC) {
1785 int l1;
1786
1787 t_sync_flags(dc);
1788 l1 = gen_new_label();
1789 /* Conditional jmp. */
1790 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1791 gen_goto_tb(dc, 1, dc->pc);
1792 gen_set_label(l1);
1793 gen_goto_tb(dc, 0, dc->jmp_pc);
1794
1795 dc->is_jmp = DISAS_TB_JUMP;
1796 }
1797 break;
1798 }
1799 }
1800 if (cs->singlestep_enabled) {
1801 break;
1802 }
1803 } while (!dc->is_jmp && !dc->cpustate_changed
1804 && tcg_ctx.gen_opc_ptr < gen_opc_end
1805 && !singlestep
1806 && (dc->pc < next_page_start)
1807 && num_insns < max_insns);
1808
1809 npc = dc->pc;
1810 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1811 if (dc->tb_flags & D_FLAG) {
1812 dc->is_jmp = DISAS_UPDATE;
1813 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1814 sync_jmpstate(dc);
1815 } else
1816 npc = dc->jmp_pc;
1817 }
1818
1819 if (tb->cflags & CF_LAST_IO)
1820 gen_io_end();
1821 /* Force an update if the per-tb cpu state has changed. */
1822 if (dc->is_jmp == DISAS_NEXT
1823 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1824 dc->is_jmp = DISAS_UPDATE;
1825 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1826 }
1827 t_sync_flags(dc);
1828
1829 if (unlikely(cs->singlestep_enabled)) {
1830 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1831
1832 if (dc->is_jmp != DISAS_JUMP) {
1833 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1834 }
1835 gen_helper_raise_exception(cpu_env, tmp);
1836 tcg_temp_free_i32(tmp);
1837 } else {
1838 switch(dc->is_jmp) {
1839 case DISAS_NEXT:
1840 gen_goto_tb(dc, 1, npc);
1841 break;
1842 default:
1843 case DISAS_JUMP:
1844 case DISAS_UPDATE:
1845 /* indicate that the hash table must be used
1846 to find the next TB */
1847 tcg_gen_exit_tb(0);
1848 break;
1849 case DISAS_TB_JUMP:
1850 /* nothing more to generate */
1851 break;
1852 }
1853 }
1854 gen_tb_end(tb, num_insns);
1855 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
1856 if (search_pc) {
1857 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1858 lj++;
1859 while (lj <= j)
1860 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1861 } else {
1862 tb->size = dc->pc - pc_start;
1863 tb->icount = num_insns;
1864 }
1865
1866 #ifdef DEBUG_DISAS
1867 #if !SIM_COMPAT
1868 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1869 qemu_log("\n");
1870 #if DISAS_GNU
1871 log_target_disas(env, pc_start, dc->pc - pc_start, 0);
1872 #endif
1873 qemu_log("\nisize=%d osize=%td\n",
1874 dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
1875 tcg_ctx.gen_opc_buf);
1876 }
1877 #endif
1878 #endif
1879 assert(!dc->abort_at_next_insn);
1880 }
1881
1882 void gen_intermediate_code (CPUMBState *env, struct TranslationBlock *tb)
1883 {
1884 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, false);
1885 }
1886
1887 void gen_intermediate_code_pc (CPUMBState *env, struct TranslationBlock *tb)
1888 {
1889 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, true);
1890 }
1891
1892 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1893 int flags)
1894 {
1895 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1896 CPUMBState *env = &cpu->env;
1897 int i;
1898
1899 if (!env || !f)
1900 return;
1901
1902 cpu_fprintf(f, "IN: PC=%x %s\n",
1903 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1904 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1905 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1906 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1907 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1908 env->btaken, env->btarget,
1909 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1910 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1911 (env->sregs[SR_MSR] & MSR_EIP),
1912 (env->sregs[SR_MSR] & MSR_IE));
1913
1914 for (i = 0; i < 32; i++) {
1915 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1916 if ((i + 1) % 4 == 0)
1917 cpu_fprintf(f, "\n");
1918 }
1919 cpu_fprintf(f, "\n\n");
1920 }
1921
1922 MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1923 {
1924 MicroBlazeCPU *cpu;
1925
1926 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1927
1928 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1929
1930 return cpu;
1931 }
1932
1933 void mb_tcg_init(void)
1934 {
1935 int i;
1936
1937 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1938
1939 env_debug = tcg_global_mem_new(TCG_AREG0,
1940 offsetof(CPUMBState, debug),
1941 "debug0");
1942 env_iflags = tcg_global_mem_new(TCG_AREG0,
1943 offsetof(CPUMBState, iflags),
1944 "iflags");
1945 env_imm = tcg_global_mem_new(TCG_AREG0,
1946 offsetof(CPUMBState, imm),
1947 "imm");
1948 env_btarget = tcg_global_mem_new(TCG_AREG0,
1949 offsetof(CPUMBState, btarget),
1950 "btarget");
1951 env_btaken = tcg_global_mem_new(TCG_AREG0,
1952 offsetof(CPUMBState, btaken),
1953 "btaken");
1954 env_res_addr = tcg_global_mem_new(TCG_AREG0,
1955 offsetof(CPUMBState, res_addr),
1956 "res_addr");
1957 env_res_val = tcg_global_mem_new(TCG_AREG0,
1958 offsetof(CPUMBState, res_val),
1959 "res_val");
1960 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1961 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
1962 offsetof(CPUMBState, regs[i]),
1963 regnames[i]);
1964 }
1965 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1966 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
1967 offsetof(CPUMBState, sregs[i]),
1968 special_regnames[i]);
1969 }
1970 }
1971
1972 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, int pc_pos)
1973 {
1974 env->sregs[SR_PC] = tcg_ctx.gen_opc_pc[pc_pos];
1975 }