target/microblaze: Fix width of PC and BTARGET
[qemu.git] / target / microblaze / translate.c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "qemu/qemu-print.h"
32
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35
36
37 #define SIM_COMPAT 0
38 #define DISAS_GNU 1
39 #define DISAS_MB 1
40 #if DISAS_MB && !SIM_COMPAT
41 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 #else
43 # define LOG_DIS(...) do { } while (0)
44 #endif
45
46 #define D(x)
47
48 #define EXTRACT_FIELD(src, start, end) \
49 (((src) >> start) & ((1 << (end - start + 1)) - 1))
50
51 /* is_jmp field values */
52 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
53 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
54 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55
56 static TCGv_i32 env_debug;
57 static TCGv_i32 cpu_R[32];
58 static TCGv_i32 cpu_pc;
59 static TCGv_i64 cpu_msr;
60 static TCGv_i64 cpu_ear;
61 static TCGv_i64 cpu_esr;
62 static TCGv_i64 cpu_fsr;
63 static TCGv_i64 cpu_btr;
64 static TCGv_i64 cpu_edr;
65 static TCGv_i32 env_imm;
66 static TCGv_i32 env_btaken;
67 static TCGv_i32 cpu_btarget;
68 static TCGv_i32 env_iflags;
69 static TCGv env_res_addr;
70 static TCGv_i32 env_res_val;
71
72 #include "exec/gen-icount.h"
73
74 /* This is the state at translation time. */
75 typedef struct DisasContext {
76 MicroBlazeCPU *cpu;
77 uint32_t pc;
78
79 /* Decoder. */
80 int type_b;
81 uint32_t ir;
82 uint8_t opcode;
83 uint8_t rd, ra, rb;
84 uint16_t imm;
85
86 unsigned int cpustate_changed;
87 unsigned int delayed_branch;
88 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
89 unsigned int clear_imm;
90 int is_jmp;
91
92 #define JMP_NOJMP 0
93 #define JMP_DIRECT 1
94 #define JMP_DIRECT_CC 2
95 #define JMP_INDIRECT 3
96 unsigned int jmp;
97 uint32_t jmp_pc;
98
99 int abort_at_next_insn;
100 struct TranslationBlock *tb;
101 int singlestep_enabled;
102 } DisasContext;
103
104 static const char *regnames[] =
105 {
106 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
107 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
108 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
109 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
110 };
111
112 static inline void t_sync_flags(DisasContext *dc)
113 {
114 /* Synch the tb dependent flags between translator and runtime. */
115 if (dc->tb_flags != dc->synced_flags) {
116 tcg_gen_movi_i32(env_iflags, dc->tb_flags);
117 dc->synced_flags = dc->tb_flags;
118 }
119 }
120
121 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
122 {
123 TCGv_i32 tmp = tcg_const_i32(index);
124
125 t_sync_flags(dc);
126 tcg_gen_movi_i32(cpu_pc, dc->pc);
127 gen_helper_raise_exception(cpu_env, tmp);
128 tcg_temp_free_i32(tmp);
129 dc->is_jmp = DISAS_UPDATE;
130 }
131
132 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
133 {
134 #ifndef CONFIG_USER_ONLY
135 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
136 #else
137 return true;
138 #endif
139 }
140
141 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
142 {
143 if (use_goto_tb(dc, dest)) {
144 tcg_gen_goto_tb(n);
145 tcg_gen_movi_i32(cpu_pc, dest);
146 tcg_gen_exit_tb(dc->tb, n);
147 } else {
148 tcg_gen_movi_i32(cpu_pc, dest);
149 tcg_gen_exit_tb(NULL, 0);
150 }
151 }
152
153 static void read_carry(DisasContext *dc, TCGv_i32 d)
154 {
155 tcg_gen_extrl_i64_i32(d, cpu_msr);
156 tcg_gen_shri_i32(d, d, 31);
157 }
158
159 /*
160 * write_carry sets the carry bits in MSR based on bit 0 of v.
161 * v[31:1] are ignored.
162 */
163 static void write_carry(DisasContext *dc, TCGv_i32 v)
164 {
165 TCGv_i64 t0 = tcg_temp_new_i64();
166 tcg_gen_extu_i32_i64(t0, v);
167 /* Deposit bit 0 into MSR_C and the alias MSR_CC. */
168 tcg_gen_deposit_i64(cpu_msr, cpu_msr, t0, 2, 1);
169 tcg_gen_deposit_i64(cpu_msr, cpu_msr, t0, 31, 1);
170 tcg_temp_free_i64(t0);
171 }
172
173 static void write_carryi(DisasContext *dc, bool carry)
174 {
175 TCGv_i32 t0 = tcg_temp_new_i32();
176 tcg_gen_movi_i32(t0, carry);
177 write_carry(dc, t0);
178 tcg_temp_free_i32(t0);
179 }
180
181 /*
182 * Returns true if the insn an illegal operation.
183 * If exceptions are enabled, an exception is raised.
184 */
185 static bool trap_illegal(DisasContext *dc, bool cond)
186 {
187 if (cond && (dc->tb_flags & MSR_EE_FLAG)
188 && dc->cpu->cfg.illegal_opcode_exception) {
189 tcg_gen_movi_i64(cpu_esr, ESR_EC_ILLEGAL_OP);
190 t_gen_raise_exception(dc, EXCP_HW_EXCP);
191 }
192 return cond;
193 }
194
195 /*
196 * Returns true if the insn is illegal in userspace.
197 * If exceptions are enabled, an exception is raised.
198 */
199 static bool trap_userspace(DisasContext *dc, bool cond)
200 {
201 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
202 bool cond_user = cond && mem_index == MMU_USER_IDX;
203
204 if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
205 tcg_gen_movi_i64(cpu_esr, ESR_EC_PRIVINSN);
206 t_gen_raise_exception(dc, EXCP_HW_EXCP);
207 }
208 return cond_user;
209 }
210
211 /* True if ALU operand b is a small immediate that may deserve
212 faster treatment. */
213 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
214 {
215 /* Immediate insn without the imm prefix ? */
216 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
217 }
218
219 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
220 {
221 if (dc->type_b) {
222 if (dc->tb_flags & IMM_FLAG)
223 tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
224 else
225 tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
226 return &env_imm;
227 } else
228 return &cpu_R[dc->rb];
229 }
230
231 static void dec_add(DisasContext *dc)
232 {
233 unsigned int k, c;
234 TCGv_i32 cf;
235
236 k = dc->opcode & 4;
237 c = dc->opcode & 2;
238
239 LOG_DIS("add%s%s%s r%d r%d r%d\n",
240 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
241 dc->rd, dc->ra, dc->rb);
242
243 /* Take care of the easy cases first. */
244 if (k) {
245 /* k - keep carry, no need to update MSR. */
246 /* If rd == r0, it's a nop. */
247 if (dc->rd) {
248 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
249
250 if (c) {
251 /* c - Add carry into the result. */
252 cf = tcg_temp_new_i32();
253
254 read_carry(dc, cf);
255 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
256 tcg_temp_free_i32(cf);
257 }
258 }
259 return;
260 }
261
262 /* From now on, we can assume k is zero. So we need to update MSR. */
263 /* Extract carry. */
264 cf = tcg_temp_new_i32();
265 if (c) {
266 read_carry(dc, cf);
267 } else {
268 tcg_gen_movi_i32(cf, 0);
269 }
270
271 if (dc->rd) {
272 TCGv_i32 ncf = tcg_temp_new_i32();
273 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
274 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
275 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
276 write_carry(dc, ncf);
277 tcg_temp_free_i32(ncf);
278 } else {
279 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
280 write_carry(dc, cf);
281 }
282 tcg_temp_free_i32(cf);
283 }
284
285 static void dec_sub(DisasContext *dc)
286 {
287 unsigned int u, cmp, k, c;
288 TCGv_i32 cf, na;
289
290 u = dc->imm & 2;
291 k = dc->opcode & 4;
292 c = dc->opcode & 2;
293 cmp = (dc->imm & 1) && (!dc->type_b) && k;
294
295 if (cmp) {
296 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
297 if (dc->rd) {
298 if (u)
299 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
300 else
301 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
302 }
303 return;
304 }
305
306 LOG_DIS("sub%s%s r%d, r%d r%d\n",
307 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
308
309 /* Take care of the easy cases first. */
310 if (k) {
311 /* k - keep carry, no need to update MSR. */
312 /* If rd == r0, it's a nop. */
313 if (dc->rd) {
314 tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
315
316 if (c) {
317 /* c - Add carry into the result. */
318 cf = tcg_temp_new_i32();
319
320 read_carry(dc, cf);
321 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
322 tcg_temp_free_i32(cf);
323 }
324 }
325 return;
326 }
327
328 /* From now on, we can assume k is zero. So we need to update MSR. */
329 /* Extract carry. And complement a into na. */
330 cf = tcg_temp_new_i32();
331 na = tcg_temp_new_i32();
332 if (c) {
333 read_carry(dc, cf);
334 } else {
335 tcg_gen_movi_i32(cf, 1);
336 }
337
338 /* d = b + ~a + c. carry defaults to 1. */
339 tcg_gen_not_i32(na, cpu_R[dc->ra]);
340
341 if (dc->rd) {
342 TCGv_i32 ncf = tcg_temp_new_i32();
343 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
344 tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
345 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
346 write_carry(dc, ncf);
347 tcg_temp_free_i32(ncf);
348 } else {
349 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
350 write_carry(dc, cf);
351 }
352 tcg_temp_free_i32(cf);
353 tcg_temp_free_i32(na);
354 }
355
356 static void dec_pattern(DisasContext *dc)
357 {
358 unsigned int mode;
359
360 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
361 return;
362 }
363
364 mode = dc->opcode & 3;
365 switch (mode) {
366 case 0:
367 /* pcmpbf. */
368 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
369 if (dc->rd)
370 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
371 break;
372 case 2:
373 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
374 if (dc->rd) {
375 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
376 cpu_R[dc->ra], cpu_R[dc->rb]);
377 }
378 break;
379 case 3:
380 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
381 if (dc->rd) {
382 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
383 cpu_R[dc->ra], cpu_R[dc->rb]);
384 }
385 break;
386 default:
387 cpu_abort(CPU(dc->cpu),
388 "unsupported pattern insn opcode=%x\n", dc->opcode);
389 break;
390 }
391 }
392
393 static void dec_and(DisasContext *dc)
394 {
395 unsigned int not;
396
397 if (!dc->type_b && (dc->imm & (1 << 10))) {
398 dec_pattern(dc);
399 return;
400 }
401
402 not = dc->opcode & (1 << 1);
403 LOG_DIS("and%s\n", not ? "n" : "");
404
405 if (!dc->rd)
406 return;
407
408 if (not) {
409 tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
410 } else
411 tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
412 }
413
414 static void dec_or(DisasContext *dc)
415 {
416 if (!dc->type_b && (dc->imm & (1 << 10))) {
417 dec_pattern(dc);
418 return;
419 }
420
421 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
422 if (dc->rd)
423 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
424 }
425
426 static void dec_xor(DisasContext *dc)
427 {
428 if (!dc->type_b && (dc->imm & (1 << 10))) {
429 dec_pattern(dc);
430 return;
431 }
432
433 LOG_DIS("xor r%d\n", dc->rd);
434 if (dc->rd)
435 tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
436 }
437
438 static inline void msr_read(DisasContext *dc, TCGv_i32 d)
439 {
440 tcg_gen_extrl_i64_i32(d, cpu_msr);
441 }
442
443 static inline void msr_write(DisasContext *dc, TCGv_i32 v)
444 {
445 TCGv_i64 t;
446
447 t = tcg_temp_new_i64();
448 dc->cpustate_changed = 1;
449 /* PVR bit is not writable. */
450 tcg_gen_extu_i32_i64(t, v);
451 tcg_gen_andi_i64(t, t, ~MSR_PVR);
452 tcg_gen_andi_i64(cpu_msr, cpu_msr, MSR_PVR);
453 tcg_gen_or_i64(cpu_msr, cpu_msr, t);
454 tcg_temp_free_i64(t);
455 }
456
457 static void dec_msr(DisasContext *dc)
458 {
459 CPUState *cs = CPU(dc->cpu);
460 TCGv_i32 t0, t1;
461 unsigned int sr, rn;
462 bool to, clrset, extended = false;
463
464 sr = extract32(dc->imm, 0, 14);
465 to = extract32(dc->imm, 14, 1);
466 clrset = extract32(dc->imm, 15, 1) == 0;
467 dc->type_b = 1;
468 if (to) {
469 dc->cpustate_changed = 1;
470 }
471
472 /* Extended MSRs are only available if addr_size > 32. */
473 if (dc->cpu->cfg.addr_size > 32) {
474 /* The E-bit is encoded differently for To/From MSR. */
475 static const unsigned int e_bit[] = { 19, 24 };
476
477 extended = extract32(dc->imm, e_bit[to], 1);
478 }
479
480 /* msrclr and msrset. */
481 if (clrset) {
482 bool clr = extract32(dc->ir, 16, 1);
483
484 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
485 dc->rd, dc->imm);
486
487 if (!dc->cpu->cfg.use_msr_instr) {
488 /* nop??? */
489 return;
490 }
491
492 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
493 return;
494 }
495
496 if (dc->rd)
497 msr_read(dc, cpu_R[dc->rd]);
498
499 t0 = tcg_temp_new_i32();
500 t1 = tcg_temp_new_i32();
501 msr_read(dc, t0);
502 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
503
504 if (clr) {
505 tcg_gen_not_i32(t1, t1);
506 tcg_gen_and_i32(t0, t0, t1);
507 } else
508 tcg_gen_or_i32(t0, t0, t1);
509 msr_write(dc, t0);
510 tcg_temp_free_i32(t0);
511 tcg_temp_free_i32(t1);
512 tcg_gen_movi_i32(cpu_pc, dc->pc + 4);
513 dc->is_jmp = DISAS_UPDATE;
514 return;
515 }
516
517 if (trap_userspace(dc, to)) {
518 return;
519 }
520
521 #if !defined(CONFIG_USER_ONLY)
522 /* Catch read/writes to the mmu block. */
523 if ((sr & ~0xff) == 0x1000) {
524 TCGv_i32 tmp_ext = tcg_const_i32(extended);
525 TCGv_i32 tmp_sr;
526
527 sr &= 7;
528 tmp_sr = tcg_const_i32(sr);
529 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
530 if (to) {
531 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
532 } else {
533 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
534 }
535 tcg_temp_free_i32(tmp_sr);
536 tcg_temp_free_i32(tmp_ext);
537 return;
538 }
539 #endif
540
541 if (to) {
542 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
543 switch (sr) {
544 case SR_PC:
545 break;
546 case SR_MSR:
547 msr_write(dc, cpu_R[dc->ra]);
548 break;
549 case SR_EAR:
550 tcg_gen_extu_i32_i64(cpu_ear, cpu_R[dc->ra]);
551 break;
552 case SR_ESR:
553 tcg_gen_extu_i32_i64(cpu_esr, cpu_R[dc->ra]);
554 break;
555 case SR_FSR:
556 tcg_gen_extu_i32_i64(cpu_fsr, cpu_R[dc->ra]);
557 break;
558 case SR_BTR:
559 tcg_gen_extu_i32_i64(cpu_btr, cpu_R[dc->ra]);
560 break;
561 case SR_EDR:
562 tcg_gen_extu_i32_i64(cpu_edr, cpu_R[dc->ra]);
563 break;
564 case 0x800:
565 tcg_gen_st_i32(cpu_R[dc->ra],
566 cpu_env, offsetof(CPUMBState, slr));
567 break;
568 case 0x802:
569 tcg_gen_st_i32(cpu_R[dc->ra],
570 cpu_env, offsetof(CPUMBState, shr));
571 break;
572 default:
573 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
574 break;
575 }
576 } else {
577 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
578
579 switch (sr) {
580 case SR_PC:
581 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
582 break;
583 case SR_MSR:
584 msr_read(dc, cpu_R[dc->rd]);
585 break;
586 case SR_EAR:
587 if (extended) {
588 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], cpu_ear);
589 } else {
590 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_ear);
591 }
592 break;
593 case SR_ESR:
594 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_esr);
595 break;
596 case SR_FSR:
597 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_fsr);
598 break;
599 case SR_BTR:
600 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_btr);
601 break;
602 case SR_EDR:
603 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_edr);
604 break;
605 case 0x800:
606 tcg_gen_ld_i32(cpu_R[dc->rd],
607 cpu_env, offsetof(CPUMBState, slr));
608 break;
609 case 0x802:
610 tcg_gen_ld_i32(cpu_R[dc->rd],
611 cpu_env, offsetof(CPUMBState, shr));
612 break;
613 case 0x2000 ... 0x200c:
614 rn = sr & 0xf;
615 tcg_gen_ld_i32(cpu_R[dc->rd],
616 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
617 break;
618 default:
619 cpu_abort(cs, "unknown mfs reg %x\n", sr);
620 break;
621 }
622 }
623
624 if (dc->rd == 0) {
625 tcg_gen_movi_i32(cpu_R[0], 0);
626 }
627 }
628
629 /* Multiplier unit. */
630 static void dec_mul(DisasContext *dc)
631 {
632 TCGv_i32 tmp;
633 unsigned int subcode;
634
635 if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
636 return;
637 }
638
639 subcode = dc->imm & 3;
640
641 if (dc->type_b) {
642 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
643 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
644 return;
645 }
646
647 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
648 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
649 /* nop??? */
650 }
651
652 tmp = tcg_temp_new_i32();
653 switch (subcode) {
654 case 0:
655 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
656 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
657 break;
658 case 1:
659 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
660 tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
661 cpu_R[dc->ra], cpu_R[dc->rb]);
662 break;
663 case 2:
664 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
665 tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
666 cpu_R[dc->ra], cpu_R[dc->rb]);
667 break;
668 case 3:
669 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
670 tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
671 break;
672 default:
673 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
674 break;
675 }
676 tcg_temp_free_i32(tmp);
677 }
678
679 /* Div unit. */
680 static void dec_div(DisasContext *dc)
681 {
682 unsigned int u;
683
684 u = dc->imm & 2;
685 LOG_DIS("div\n");
686
687 if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
688 return;
689 }
690
691 if (u)
692 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
693 cpu_R[dc->ra]);
694 else
695 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
696 cpu_R[dc->ra]);
697 if (!dc->rd)
698 tcg_gen_movi_i32(cpu_R[dc->rd], 0);
699 }
700
701 static void dec_barrel(DisasContext *dc)
702 {
703 TCGv_i32 t0;
704 unsigned int imm_w, imm_s;
705 bool s, t, e = false, i = false;
706
707 if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
708 return;
709 }
710
711 if (dc->type_b) {
712 /* Insert and extract are only available in immediate mode. */
713 i = extract32(dc->imm, 15, 1);
714 e = extract32(dc->imm, 14, 1);
715 }
716 s = extract32(dc->imm, 10, 1);
717 t = extract32(dc->imm, 9, 1);
718 imm_w = extract32(dc->imm, 6, 5);
719 imm_s = extract32(dc->imm, 0, 5);
720
721 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
722 e ? "e" : "",
723 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
724
725 if (e) {
726 if (imm_w + imm_s > 32 || imm_w == 0) {
727 /* These inputs have an undefined behavior. */
728 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
729 imm_w, imm_s);
730 } else {
731 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
732 }
733 } else if (i) {
734 int width = imm_w - imm_s + 1;
735
736 if (imm_w < imm_s) {
737 /* These inputs have an undefined behavior. */
738 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
739 imm_w, imm_s);
740 } else {
741 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
742 imm_s, width);
743 }
744 } else {
745 t0 = tcg_temp_new_i32();
746
747 tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
748 tcg_gen_andi_i32(t0, t0, 31);
749
750 if (s) {
751 tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
752 } else {
753 if (t) {
754 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
755 } else {
756 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
757 }
758 }
759 tcg_temp_free_i32(t0);
760 }
761 }
762
763 static void dec_bit(DisasContext *dc)
764 {
765 CPUState *cs = CPU(dc->cpu);
766 TCGv_i32 t0;
767 unsigned int op;
768
769 op = dc->ir & ((1 << 9) - 1);
770 switch (op) {
771 case 0x21:
772 /* src. */
773 t0 = tcg_temp_new_i32();
774
775 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
776 tcg_gen_extrl_i64_i32(t0, cpu_msr);
777 tcg_gen_andi_i32(t0, t0, MSR_CC);
778 write_carry(dc, cpu_R[dc->ra]);
779 if (dc->rd) {
780 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
781 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
782 }
783 tcg_temp_free_i32(t0);
784 break;
785
786 case 0x1:
787 case 0x41:
788 /* srl. */
789 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
790
791 /* Update carry. Note that write carry only looks at the LSB. */
792 write_carry(dc, cpu_R[dc->ra]);
793 if (dc->rd) {
794 if (op == 0x41)
795 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
796 else
797 tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
798 }
799 break;
800 case 0x60:
801 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
802 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
803 break;
804 case 0x61:
805 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
806 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
807 break;
808 case 0x64:
809 case 0x66:
810 case 0x74:
811 case 0x76:
812 /* wdc. */
813 LOG_DIS("wdc r%d\n", dc->ra);
814 trap_userspace(dc, true);
815 break;
816 case 0x68:
817 /* wic. */
818 LOG_DIS("wic r%d\n", dc->ra);
819 trap_userspace(dc, true);
820 break;
821 case 0xe0:
822 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
823 return;
824 }
825 if (dc->cpu->cfg.use_pcmp_instr) {
826 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
827 }
828 break;
829 case 0x1e0:
830 /* swapb */
831 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
832 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
833 break;
834 case 0x1e2:
835 /*swaph */
836 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
837 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
838 break;
839 default:
840 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
841 dc->pc, op, dc->rd, dc->ra, dc->rb);
842 break;
843 }
844 }
845
846 static inline void sync_jmpstate(DisasContext *dc)
847 {
848 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
849 if (dc->jmp == JMP_DIRECT) {
850 tcg_gen_movi_i32(env_btaken, 1);
851 }
852 dc->jmp = JMP_INDIRECT;
853 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
854 }
855 }
856
857 static void dec_imm(DisasContext *dc)
858 {
859 LOG_DIS("imm %x\n", dc->imm << 16);
860 tcg_gen_movi_i32(env_imm, (dc->imm << 16));
861 dc->tb_flags |= IMM_FLAG;
862 dc->clear_imm = 0;
863 }
864
865 static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
866 {
867 bool extimm = dc->tb_flags & IMM_FLAG;
868 /* Should be set to true if r1 is used by loadstores. */
869 bool stackprot = false;
870 TCGv_i32 t32;
871
872 /* All load/stores use ra. */
873 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
874 stackprot = true;
875 }
876
877 /* Treat the common cases first. */
878 if (!dc->type_b) {
879 if (ea) {
880 int addr_size = dc->cpu->cfg.addr_size;
881
882 if (addr_size == 32) {
883 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
884 return;
885 }
886
887 tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
888 if (addr_size < 64) {
889 /* Mask off out of range bits. */
890 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
891 }
892 return;
893 }
894
895 /* If any of the regs is r0, set t to the value of the other reg. */
896 if (dc->ra == 0) {
897 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
898 return;
899 } else if (dc->rb == 0) {
900 tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
901 return;
902 }
903
904 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
905 stackprot = true;
906 }
907
908 t32 = tcg_temp_new_i32();
909 tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
910 tcg_gen_extu_i32_tl(t, t32);
911 tcg_temp_free_i32(t32);
912
913 if (stackprot) {
914 gen_helper_stackprot(cpu_env, t);
915 }
916 return;
917 }
918 /* Immediate. */
919 t32 = tcg_temp_new_i32();
920 if (!extimm) {
921 tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
922 } else {
923 tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
924 }
925 tcg_gen_extu_i32_tl(t, t32);
926 tcg_temp_free_i32(t32);
927
928 if (stackprot) {
929 gen_helper_stackprot(cpu_env, t);
930 }
931 return;
932 }
933
934 static void dec_load(DisasContext *dc)
935 {
936 TCGv_i32 v;
937 TCGv addr;
938 unsigned int size;
939 bool rev = false, ex = false, ea = false;
940 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
941 MemOp mop;
942
943 mop = dc->opcode & 3;
944 size = 1 << mop;
945 if (!dc->type_b) {
946 ea = extract32(dc->ir, 7, 1);
947 rev = extract32(dc->ir, 9, 1);
948 ex = extract32(dc->ir, 10, 1);
949 }
950 mop |= MO_TE;
951 if (rev) {
952 mop ^= MO_BSWAP;
953 }
954
955 if (trap_illegal(dc, size > 4)) {
956 return;
957 }
958
959 if (trap_userspace(dc, ea)) {
960 return;
961 }
962
963 LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
964 ex ? "x" : "",
965 ea ? "ea" : "");
966
967 t_sync_flags(dc);
968 addr = tcg_temp_new();
969 compute_ldst_addr(dc, ea, addr);
970 /* Extended addressing bypasses the MMU. */
971 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
972
973 /*
974 * When doing reverse accesses we need to do two things.
975 *
976 * 1. Reverse the address wrt endianness.
977 * 2. Byteswap the data lanes on the way back into the CPU core.
978 */
979 if (rev && size != 4) {
980 /* Endian reverse the address. t is addr. */
981 switch (size) {
982 case 1:
983 {
984 tcg_gen_xori_tl(addr, addr, 3);
985 break;
986 }
987
988 case 2:
989 /* 00 -> 10
990 10 -> 00. */
991 tcg_gen_xori_tl(addr, addr, 2);
992 break;
993 default:
994 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
995 break;
996 }
997 }
998
999 /* lwx does not throw unaligned access errors, so force alignment */
1000 if (ex) {
1001 tcg_gen_andi_tl(addr, addr, ~3);
1002 }
1003
1004 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1005 sync_jmpstate(dc);
1006
1007 /* Verify alignment if needed. */
1008 /*
1009 * Microblaze gives MMU faults priority over faults due to
1010 * unaligned addresses. That's why we speculatively do the load
1011 * into v. If the load succeeds, we verify alignment of the
1012 * address and if that succeeds we write into the destination reg.
1013 */
1014 v = tcg_temp_new_i32();
1015 tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
1016
1017 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1018 TCGv_i32 t0 = tcg_const_i32(0);
1019 TCGv_i32 treg = tcg_const_i32(dc->rd);
1020 TCGv_i32 tsize = tcg_const_i32(size - 1);
1021
1022 tcg_gen_movi_i32(cpu_pc, dc->pc);
1023 gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
1024
1025 tcg_temp_free_i32(t0);
1026 tcg_temp_free_i32(treg);
1027 tcg_temp_free_i32(tsize);
1028 }
1029
1030 if (ex) {
1031 tcg_gen_mov_tl(env_res_addr, addr);
1032 tcg_gen_mov_i32(env_res_val, v);
1033 }
1034 if (dc->rd) {
1035 tcg_gen_mov_i32(cpu_R[dc->rd], v);
1036 }
1037 tcg_temp_free_i32(v);
1038
1039 if (ex) { /* lwx */
1040 /* no support for AXI exclusive so always clear C */
1041 write_carryi(dc, 0);
1042 }
1043
1044 tcg_temp_free(addr);
1045 }
1046
1047 static void dec_store(DisasContext *dc)
1048 {
1049 TCGv addr;
1050 TCGLabel *swx_skip = NULL;
1051 unsigned int size;
1052 bool rev = false, ex = false, ea = false;
1053 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1054 MemOp mop;
1055
1056 mop = dc->opcode & 3;
1057 size = 1 << mop;
1058 if (!dc->type_b) {
1059 ea = extract32(dc->ir, 7, 1);
1060 rev = extract32(dc->ir, 9, 1);
1061 ex = extract32(dc->ir, 10, 1);
1062 }
1063 mop |= MO_TE;
1064 if (rev) {
1065 mop ^= MO_BSWAP;
1066 }
1067
1068 if (trap_illegal(dc, size > 4)) {
1069 return;
1070 }
1071
1072 trap_userspace(dc, ea);
1073
1074 LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1075 ex ? "x" : "",
1076 ea ? "ea" : "");
1077 t_sync_flags(dc);
1078 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1079 sync_jmpstate(dc);
1080 /* SWX needs a temp_local. */
1081 addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1082 compute_ldst_addr(dc, ea, addr);
1083 /* Extended addressing bypasses the MMU. */
1084 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
1085
1086 if (ex) { /* swx */
1087 TCGv_i32 tval;
1088
1089 /* swx does not throw unaligned access errors, so force alignment */
1090 tcg_gen_andi_tl(addr, addr, ~3);
1091
1092 write_carryi(dc, 1);
1093 swx_skip = gen_new_label();
1094 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
1095
1096 /*
1097 * Compare the value loaded at lwx with current contents of
1098 * the reserved location.
1099 */
1100 tval = tcg_temp_new_i32();
1101
1102 tcg_gen_atomic_cmpxchg_i32(tval, addr, env_res_val,
1103 cpu_R[dc->rd], mem_index,
1104 mop);
1105
1106 tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1107 write_carryi(dc, 0);
1108 tcg_temp_free_i32(tval);
1109 }
1110
1111 if (rev && size != 4) {
1112 /* Endian reverse the address. t is addr. */
1113 switch (size) {
1114 case 1:
1115 {
1116 tcg_gen_xori_tl(addr, addr, 3);
1117 break;
1118 }
1119
1120 case 2:
1121 /* 00 -> 10
1122 10 -> 00. */
1123 /* Force addr into the temp. */
1124 tcg_gen_xori_tl(addr, addr, 2);
1125 break;
1126 default:
1127 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1128 break;
1129 }
1130 }
1131
1132 if (!ex) {
1133 tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1134 }
1135
1136 /* Verify alignment if needed. */
1137 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1138 TCGv_i32 t1 = tcg_const_i32(1);
1139 TCGv_i32 treg = tcg_const_i32(dc->rd);
1140 TCGv_i32 tsize = tcg_const_i32(size - 1);
1141
1142 tcg_gen_movi_i32(cpu_pc, dc->pc);
1143 /* FIXME: if the alignment is wrong, we should restore the value
1144 * in memory. One possible way to achieve this is to probe
1145 * the MMU prior to the memaccess, thay way we could put
1146 * the alignment checks in between the probe and the mem
1147 * access.
1148 */
1149 gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
1150
1151 tcg_temp_free_i32(t1);
1152 tcg_temp_free_i32(treg);
1153 tcg_temp_free_i32(tsize);
1154 }
1155
1156 if (ex) {
1157 gen_set_label(swx_skip);
1158 }
1159
1160 tcg_temp_free(addr);
1161 }
1162
1163 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1164 TCGv_i32 d, TCGv_i32 a)
1165 {
1166 static const int mb_to_tcg_cc[] = {
1167 [CC_EQ] = TCG_COND_EQ,
1168 [CC_NE] = TCG_COND_NE,
1169 [CC_LT] = TCG_COND_LT,
1170 [CC_LE] = TCG_COND_LE,
1171 [CC_GE] = TCG_COND_GE,
1172 [CC_GT] = TCG_COND_GT,
1173 };
1174
1175 switch (cc) {
1176 case CC_EQ:
1177 case CC_NE:
1178 case CC_LT:
1179 case CC_LE:
1180 case CC_GE:
1181 case CC_GT:
1182 tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
1183 break;
1184 default:
1185 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1186 break;
1187 }
1188 }
1189
1190 static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
1191 {
1192 TCGv_i32 zero = tcg_const_i32(0);
1193
1194 tcg_gen_movcond_i32(TCG_COND_NE, cpu_pc,
1195 env_btaken, zero,
1196 pc_true, pc_false);
1197
1198 tcg_temp_free_i32(zero);
1199 }
1200
1201 static void dec_setup_dslot(DisasContext *dc)
1202 {
1203 TCGv_i32 tmp = tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG));
1204
1205 dc->delayed_branch = 2;
1206 dc->tb_flags |= D_FLAG;
1207
1208 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, bimm));
1209 tcg_temp_free_i32(tmp);
1210 }
1211
1212 static void dec_bcc(DisasContext *dc)
1213 {
1214 unsigned int cc;
1215 unsigned int dslot;
1216
1217 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1218 dslot = dc->ir & (1 << 25);
1219 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1220
1221 dc->delayed_branch = 1;
1222 if (dslot) {
1223 dec_setup_dslot(dc);
1224 }
1225
1226 if (dec_alu_op_b_is_small_imm(dc)) {
1227 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1228
1229 tcg_gen_movi_i32(cpu_btarget, dc->pc + offset);
1230 dc->jmp = JMP_DIRECT_CC;
1231 dc->jmp_pc = dc->pc + offset;
1232 } else {
1233 dc->jmp = JMP_INDIRECT;
1234 tcg_gen_addi_i32(cpu_btarget, *dec_alu_op_b(dc), dc->pc);
1235 }
1236 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra]);
1237 }
1238
1239 static void dec_br(DisasContext *dc)
1240 {
1241 unsigned int dslot, link, abs, mbar;
1242
1243 dslot = dc->ir & (1 << 20);
1244 abs = dc->ir & (1 << 19);
1245 link = dc->ir & (1 << 18);
1246
1247 /* Memory barrier. */
1248 mbar = (dc->ir >> 16) & 31;
1249 if (mbar == 2 && dc->imm == 4) {
1250 uint16_t mbar_imm = dc->rd;
1251
1252 LOG_DIS("mbar %d\n", mbar_imm);
1253
1254 /* Data access memory barrier. */
1255 if ((mbar_imm & 2) == 0) {
1256 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1257 }
1258
1259 /* mbar IMM & 16 decodes to sleep. */
1260 if (mbar_imm & 16) {
1261 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1262 TCGv_i32 tmp_1 = tcg_const_i32(1);
1263
1264 LOG_DIS("sleep\n");
1265
1266 if (trap_userspace(dc, true)) {
1267 /* Sleep is a privileged instruction. */
1268 return;
1269 }
1270
1271 t_sync_flags(dc);
1272 tcg_gen_st_i32(tmp_1, cpu_env,
1273 -offsetof(MicroBlazeCPU, env)
1274 +offsetof(CPUState, halted));
1275 tcg_gen_movi_i32(cpu_pc, dc->pc + 4);
1276 gen_helper_raise_exception(cpu_env, tmp_hlt);
1277 tcg_temp_free_i32(tmp_hlt);
1278 tcg_temp_free_i32(tmp_1);
1279 return;
1280 }
1281 /* Break the TB. */
1282 dc->cpustate_changed = 1;
1283 return;
1284 }
1285
1286 LOG_DIS("br%s%s%s%s imm=%x\n",
1287 abs ? "a" : "", link ? "l" : "",
1288 dc->type_b ? "i" : "", dslot ? "d" : "",
1289 dc->imm);
1290
1291 dc->delayed_branch = 1;
1292 if (dslot) {
1293 dec_setup_dslot(dc);
1294 }
1295 if (link && dc->rd)
1296 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1297
1298 dc->jmp = JMP_INDIRECT;
1299 if (abs) {
1300 tcg_gen_movi_i32(env_btaken, 1);
1301 tcg_gen_mov_i32(cpu_btarget, *(dec_alu_op_b(dc)));
1302 if (link && !dslot) {
1303 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1304 t_gen_raise_exception(dc, EXCP_BREAK);
1305 if (dc->imm == 0) {
1306 if (trap_userspace(dc, true)) {
1307 return;
1308 }
1309
1310 t_gen_raise_exception(dc, EXCP_DEBUG);
1311 }
1312 }
1313 } else {
1314 if (dec_alu_op_b_is_small_imm(dc)) {
1315 dc->jmp = JMP_DIRECT;
1316 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1317 } else {
1318 tcg_gen_movi_i32(env_btaken, 1);
1319 tcg_gen_addi_i32(cpu_btarget, *dec_alu_op_b(dc), dc->pc);
1320 }
1321 }
1322 }
1323
1324 static inline void do_rti(DisasContext *dc)
1325 {
1326 TCGv_i32 t0, t1;
1327 t0 = tcg_temp_new_i32();
1328 t1 = tcg_temp_new_i32();
1329 tcg_gen_extrl_i64_i32(t1, cpu_msr);
1330 tcg_gen_shri_i32(t0, t1, 1);
1331 tcg_gen_ori_i32(t1, t1, MSR_IE);
1332 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1333
1334 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1335 tcg_gen_or_i32(t1, t1, t0);
1336 msr_write(dc, t1);
1337 tcg_temp_free_i32(t1);
1338 tcg_temp_free_i32(t0);
1339 dc->tb_flags &= ~DRTI_FLAG;
1340 }
1341
1342 static inline void do_rtb(DisasContext *dc)
1343 {
1344 TCGv_i32 t0, t1;
1345 t0 = tcg_temp_new_i32();
1346 t1 = tcg_temp_new_i32();
1347 tcg_gen_extrl_i64_i32(t1, cpu_msr);
1348 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
1349 tcg_gen_shri_i32(t0, t1, 1);
1350 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1351
1352 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1353 tcg_gen_or_i32(t1, t1, t0);
1354 msr_write(dc, t1);
1355 tcg_temp_free_i32(t1);
1356 tcg_temp_free_i32(t0);
1357 dc->tb_flags &= ~DRTB_FLAG;
1358 }
1359
1360 static inline void do_rte(DisasContext *dc)
1361 {
1362 TCGv_i32 t0, t1;
1363 t0 = tcg_temp_new_i32();
1364 t1 = tcg_temp_new_i32();
1365
1366 tcg_gen_extrl_i64_i32(t1, cpu_msr);
1367 tcg_gen_ori_i32(t1, t1, MSR_EE);
1368 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1369 tcg_gen_shri_i32(t0, t1, 1);
1370 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1371
1372 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1373 tcg_gen_or_i32(t1, t1, t0);
1374 msr_write(dc, t1);
1375 tcg_temp_free_i32(t1);
1376 tcg_temp_free_i32(t0);
1377 dc->tb_flags &= ~DRTE_FLAG;
1378 }
1379
1380 static void dec_rts(DisasContext *dc)
1381 {
1382 unsigned int b_bit, i_bit, e_bit;
1383
1384 i_bit = dc->ir & (1 << 21);
1385 b_bit = dc->ir & (1 << 22);
1386 e_bit = dc->ir & (1 << 23);
1387
1388 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1389 return;
1390 }
1391
1392 dec_setup_dslot(dc);
1393
1394 if (i_bit) {
1395 LOG_DIS("rtid ir=%x\n", dc->ir);
1396 dc->tb_flags |= DRTI_FLAG;
1397 } else if (b_bit) {
1398 LOG_DIS("rtbd ir=%x\n", dc->ir);
1399 dc->tb_flags |= DRTB_FLAG;
1400 } else if (e_bit) {
1401 LOG_DIS("rted ir=%x\n", dc->ir);
1402 dc->tb_flags |= DRTE_FLAG;
1403 } else
1404 LOG_DIS("rts ir=%x\n", dc->ir);
1405
1406 dc->jmp = JMP_INDIRECT;
1407 tcg_gen_movi_i32(env_btaken, 1);
1408 tcg_gen_add_i32(cpu_btarget, cpu_R[dc->ra], *dec_alu_op_b(dc));
1409 }
1410
1411 static int dec_check_fpuv2(DisasContext *dc)
1412 {
1413 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1414 tcg_gen_movi_i64(cpu_esr, ESR_EC_FPU);
1415 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1416 }
1417 return (dc->cpu->cfg.use_fpu == 2) ? PVR2_USE_FPU2_MASK : 0;
1418 }
1419
1420 static void dec_fpu(DisasContext *dc)
1421 {
1422 unsigned int fpu_insn;
1423
1424 if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1425 return;
1426 }
1427
1428 fpu_insn = (dc->ir >> 7) & 7;
1429
1430 switch (fpu_insn) {
1431 case 0:
1432 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1433 cpu_R[dc->rb]);
1434 break;
1435
1436 case 1:
1437 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1438 cpu_R[dc->rb]);
1439 break;
1440
1441 case 2:
1442 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1443 cpu_R[dc->rb]);
1444 break;
1445
1446 case 3:
1447 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1448 cpu_R[dc->rb]);
1449 break;
1450
1451 case 4:
1452 switch ((dc->ir >> 4) & 7) {
1453 case 0:
1454 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1455 cpu_R[dc->ra], cpu_R[dc->rb]);
1456 break;
1457 case 1:
1458 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1459 cpu_R[dc->ra], cpu_R[dc->rb]);
1460 break;
1461 case 2:
1462 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1463 cpu_R[dc->ra], cpu_R[dc->rb]);
1464 break;
1465 case 3:
1466 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1467 cpu_R[dc->ra], cpu_R[dc->rb]);
1468 break;
1469 case 4:
1470 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1471 cpu_R[dc->ra], cpu_R[dc->rb]);
1472 break;
1473 case 5:
1474 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1475 cpu_R[dc->ra], cpu_R[dc->rb]);
1476 break;
1477 case 6:
1478 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1479 cpu_R[dc->ra], cpu_R[dc->rb]);
1480 break;
1481 default:
1482 qemu_log_mask(LOG_UNIMP,
1483 "unimplemented fcmp fpu_insn=%x pc=%x"
1484 " opc=%x\n",
1485 fpu_insn, dc->pc, dc->opcode);
1486 dc->abort_at_next_insn = 1;
1487 break;
1488 }
1489 break;
1490
1491 case 5:
1492 if (!dec_check_fpuv2(dc)) {
1493 return;
1494 }
1495 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1496 break;
1497
1498 case 6:
1499 if (!dec_check_fpuv2(dc)) {
1500 return;
1501 }
1502 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1503 break;
1504
1505 case 7:
1506 if (!dec_check_fpuv2(dc)) {
1507 return;
1508 }
1509 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1510 break;
1511
1512 default:
1513 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1514 " opc=%x\n",
1515 fpu_insn, dc->pc, dc->opcode);
1516 dc->abort_at_next_insn = 1;
1517 break;
1518 }
1519 }
1520
1521 static void dec_null(DisasContext *dc)
1522 {
1523 if (trap_illegal(dc, true)) {
1524 return;
1525 }
1526 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1527 dc->abort_at_next_insn = 1;
1528 }
1529
1530 /* Insns connected to FSL or AXI stream attached devices. */
1531 static void dec_stream(DisasContext *dc)
1532 {
1533 TCGv_i32 t_id, t_ctrl;
1534 int ctrl;
1535
1536 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1537 dc->type_b ? "" : "d", dc->imm);
1538
1539 if (trap_userspace(dc, true)) {
1540 return;
1541 }
1542
1543 t_id = tcg_temp_new_i32();
1544 if (dc->type_b) {
1545 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1546 ctrl = dc->imm >> 10;
1547 } else {
1548 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1549 ctrl = dc->imm >> 5;
1550 }
1551
1552 t_ctrl = tcg_const_i32(ctrl);
1553
1554 if (dc->rd == 0) {
1555 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1556 } else {
1557 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1558 }
1559 tcg_temp_free_i32(t_id);
1560 tcg_temp_free_i32(t_ctrl);
1561 }
1562
1563 static struct decoder_info {
1564 struct {
1565 uint32_t bits;
1566 uint32_t mask;
1567 };
1568 void (*dec)(DisasContext *dc);
1569 } decinfo[] = {
1570 {DEC_ADD, dec_add},
1571 {DEC_SUB, dec_sub},
1572 {DEC_AND, dec_and},
1573 {DEC_XOR, dec_xor},
1574 {DEC_OR, dec_or},
1575 {DEC_BIT, dec_bit},
1576 {DEC_BARREL, dec_barrel},
1577 {DEC_LD, dec_load},
1578 {DEC_ST, dec_store},
1579 {DEC_IMM, dec_imm},
1580 {DEC_BR, dec_br},
1581 {DEC_BCC, dec_bcc},
1582 {DEC_RTS, dec_rts},
1583 {DEC_FPU, dec_fpu},
1584 {DEC_MUL, dec_mul},
1585 {DEC_DIV, dec_div},
1586 {DEC_MSR, dec_msr},
1587 {DEC_STREAM, dec_stream},
1588 {{0, 0}, dec_null}
1589 };
1590
1591 static inline void decode(DisasContext *dc, uint32_t ir)
1592 {
1593 int i;
1594
1595 dc->ir = ir;
1596 LOG_DIS("%8.8x\t", dc->ir);
1597
1598 if (ir == 0) {
1599 trap_illegal(dc, dc->cpu->cfg.opcode_0_illegal);
1600 /* Don't decode nop/zero instructions any further. */
1601 return;
1602 }
1603
1604 /* bit 2 seems to indicate insn type. */
1605 dc->type_b = ir & (1 << 29);
1606
1607 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1608 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1609 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1610 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1611 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1612
1613 /* Large switch for all insns. */
1614 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1615 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1616 decinfo[i].dec(dc);
1617 break;
1618 }
1619 }
1620 }
1621
1622 /* generate intermediate code for basic block 'tb'. */
1623 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1624 {
1625 CPUMBState *env = cs->env_ptr;
1626 MicroBlazeCPU *cpu = env_archcpu(env);
1627 uint32_t pc_start;
1628 struct DisasContext ctx;
1629 struct DisasContext *dc = &ctx;
1630 uint32_t page_start, org_flags;
1631 uint32_t npc;
1632 int num_insns;
1633
1634 pc_start = tb->pc;
1635 dc->cpu = cpu;
1636 dc->tb = tb;
1637 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1638
1639 dc->is_jmp = DISAS_NEXT;
1640 dc->jmp = 0;
1641 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1642 if (dc->delayed_branch) {
1643 dc->jmp = JMP_INDIRECT;
1644 }
1645 dc->pc = pc_start;
1646 dc->singlestep_enabled = cs->singlestep_enabled;
1647 dc->cpustate_changed = 0;
1648 dc->abort_at_next_insn = 0;
1649
1650 if (pc_start & 3) {
1651 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1652 }
1653
1654 page_start = pc_start & TARGET_PAGE_MASK;
1655 num_insns = 0;
1656
1657 gen_tb_start(tb);
1658 do
1659 {
1660 tcg_gen_insn_start(dc->pc);
1661 num_insns++;
1662
1663 #if SIM_COMPAT
1664 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1665 tcg_gen_movi_i32(cpu_pc, dc->pc);
1666 gen_helper_debug();
1667 }
1668 #endif
1669
1670 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1671 t_gen_raise_exception(dc, EXCP_DEBUG);
1672 dc->is_jmp = DISAS_UPDATE;
1673 /* The address covered by the breakpoint must be included in
1674 [tb->pc, tb->pc + tb->size) in order to for it to be
1675 properly cleared -- thus we increment the PC here so that
1676 the logic setting tb->size below does the right thing. */
1677 dc->pc += 4;
1678 break;
1679 }
1680
1681 /* Pretty disas. */
1682 LOG_DIS("%8.8x:\t", dc->pc);
1683
1684 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1685 gen_io_start();
1686 }
1687
1688 dc->clear_imm = 1;
1689 decode(dc, cpu_ldl_code(env, dc->pc));
1690 if (dc->clear_imm)
1691 dc->tb_flags &= ~IMM_FLAG;
1692 dc->pc += 4;
1693
1694 if (dc->delayed_branch) {
1695 dc->delayed_branch--;
1696 if (!dc->delayed_branch) {
1697 if (dc->tb_flags & DRTI_FLAG)
1698 do_rti(dc);
1699 if (dc->tb_flags & DRTB_FLAG)
1700 do_rtb(dc);
1701 if (dc->tb_flags & DRTE_FLAG)
1702 do_rte(dc);
1703 /* Clear the delay slot flag. */
1704 dc->tb_flags &= ~D_FLAG;
1705 /* If it is a direct jump, try direct chaining. */
1706 if (dc->jmp == JMP_INDIRECT) {
1707 TCGv_i32 tmp_pc = tcg_const_i32(dc->pc);
1708 eval_cond_jmp(dc, cpu_btarget, tmp_pc);
1709 tcg_temp_free_i32(tmp_pc);
1710 dc->is_jmp = DISAS_JUMP;
1711 } else if (dc->jmp == JMP_DIRECT) {
1712 t_sync_flags(dc);
1713 gen_goto_tb(dc, 0, dc->jmp_pc);
1714 dc->is_jmp = DISAS_TB_JUMP;
1715 } else if (dc->jmp == JMP_DIRECT_CC) {
1716 TCGLabel *l1 = gen_new_label();
1717 t_sync_flags(dc);
1718 /* Conditional jmp. */
1719 tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1720 gen_goto_tb(dc, 1, dc->pc);
1721 gen_set_label(l1);
1722 gen_goto_tb(dc, 0, dc->jmp_pc);
1723
1724 dc->is_jmp = DISAS_TB_JUMP;
1725 }
1726 break;
1727 }
1728 }
1729 if (cs->singlestep_enabled) {
1730 break;
1731 }
1732 } while (!dc->is_jmp && !dc->cpustate_changed
1733 && !tcg_op_buf_full()
1734 && !singlestep
1735 && (dc->pc - page_start < TARGET_PAGE_SIZE)
1736 && num_insns < max_insns);
1737
1738 npc = dc->pc;
1739 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1740 if (dc->tb_flags & D_FLAG) {
1741 dc->is_jmp = DISAS_UPDATE;
1742 tcg_gen_movi_i32(cpu_pc, npc);
1743 sync_jmpstate(dc);
1744 } else
1745 npc = dc->jmp_pc;
1746 }
1747
1748 /* Force an update if the per-tb cpu state has changed. */
1749 if (dc->is_jmp == DISAS_NEXT
1750 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1751 dc->is_jmp = DISAS_UPDATE;
1752 tcg_gen_movi_i32(cpu_pc, npc);
1753 }
1754 t_sync_flags(dc);
1755
1756 if (unlikely(cs->singlestep_enabled)) {
1757 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1758
1759 if (dc->is_jmp != DISAS_JUMP) {
1760 tcg_gen_movi_i32(cpu_pc, npc);
1761 }
1762 gen_helper_raise_exception(cpu_env, tmp);
1763 tcg_temp_free_i32(tmp);
1764 } else {
1765 switch(dc->is_jmp) {
1766 case DISAS_NEXT:
1767 gen_goto_tb(dc, 1, npc);
1768 break;
1769 default:
1770 case DISAS_JUMP:
1771 case DISAS_UPDATE:
1772 /* indicate that the hash table must be used
1773 to find the next TB */
1774 tcg_gen_exit_tb(NULL, 0);
1775 break;
1776 case DISAS_TB_JUMP:
1777 /* nothing more to generate */
1778 break;
1779 }
1780 }
1781 gen_tb_end(tb, num_insns);
1782
1783 tb->size = dc->pc - pc_start;
1784 tb->icount = num_insns;
1785
1786 #ifdef DEBUG_DISAS
1787 #if !SIM_COMPAT
1788 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1789 && qemu_log_in_addr_range(pc_start)) {
1790 FILE *logfile = qemu_log_lock();
1791 qemu_log("--------------\n");
1792 log_target_disas(cs, pc_start, dc->pc - pc_start);
1793 qemu_log_unlock(logfile);
1794 }
1795 #endif
1796 #endif
1797 assert(!dc->abort_at_next_insn);
1798 }
1799
1800 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1801 {
1802 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1803 CPUMBState *env = &cpu->env;
1804 int i;
1805
1806 if (!env) {
1807 return;
1808 }
1809
1810 qemu_fprintf(f, "IN: PC=%x %s\n",
1811 env->pc, lookup_symbol(env->pc));
1812 qemu_fprintf(f, "rmsr=%" PRIx64 " resr=%" PRIx64 " rear=%" PRIx64 " "
1813 "debug=%x imm=%x iflags=%x fsr=%" PRIx64 " "
1814 "rbtr=%" PRIx64 "\n",
1815 env->msr, env->esr, env->ear,
1816 env->debug, env->imm, env->iflags, env->fsr,
1817 env->btr);
1818 qemu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1819 env->btaken, env->btarget,
1820 (env->msr & MSR_UM) ? "user" : "kernel",
1821 (env->msr & MSR_UMS) ? "user" : "kernel",
1822 (bool)(env->msr & MSR_EIP),
1823 (bool)(env->msr & MSR_IE));
1824 for (i = 0; i < 12; i++) {
1825 qemu_fprintf(f, "rpvr%2.2d=%8.8x ", i, env->pvr.regs[i]);
1826 if ((i + 1) % 4 == 0) {
1827 qemu_fprintf(f, "\n");
1828 }
1829 }
1830
1831 /* Registers that aren't modeled are reported as 0 */
1832 qemu_fprintf(f, "redr=%" PRIx64 " rpid=0 rzpr=0 rtlbx=0 rtlbsx=0 "
1833 "rtlblo=0 rtlbhi=0\n", env->edr);
1834 qemu_fprintf(f, "slr=%x shr=%x\n", env->slr, env->shr);
1835 for (i = 0; i < 32; i++) {
1836 qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1837 if ((i + 1) % 4 == 0)
1838 qemu_fprintf(f, "\n");
1839 }
1840 qemu_fprintf(f, "\n\n");
1841 }
1842
1843 void mb_tcg_init(void)
1844 {
1845 int i;
1846
1847 env_debug = tcg_global_mem_new_i32(cpu_env,
1848 offsetof(CPUMBState, debug),
1849 "debug0");
1850 env_iflags = tcg_global_mem_new_i32(cpu_env,
1851 offsetof(CPUMBState, iflags),
1852 "iflags");
1853 env_imm = tcg_global_mem_new_i32(cpu_env,
1854 offsetof(CPUMBState, imm),
1855 "imm");
1856 cpu_btarget = tcg_global_mem_new_i32(cpu_env,
1857 offsetof(CPUMBState, btarget),
1858 "btarget");
1859 env_btaken = tcg_global_mem_new_i32(cpu_env,
1860 offsetof(CPUMBState, btaken),
1861 "btaken");
1862 env_res_addr = tcg_global_mem_new(cpu_env,
1863 offsetof(CPUMBState, res_addr),
1864 "res_addr");
1865 env_res_val = tcg_global_mem_new_i32(cpu_env,
1866 offsetof(CPUMBState, res_val),
1867 "res_val");
1868 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1869 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1870 offsetof(CPUMBState, regs[i]),
1871 regnames[i]);
1872 }
1873
1874 cpu_pc =
1875 tcg_global_mem_new_i32(cpu_env, offsetof(CPUMBState, pc), "rpc");
1876 cpu_msr =
1877 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, msr), "rmsr");
1878 cpu_ear =
1879 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, ear), "rear");
1880 cpu_esr =
1881 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, esr), "resr");
1882 cpu_fsr =
1883 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, fsr), "rfsr");
1884 cpu_btr =
1885 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, btr), "rbtr");
1886 cpu_edr =
1887 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, edr), "redr");
1888 }
1889
1890 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1891 target_ulong *data)
1892 {
1893 env->pc = data[0];
1894 }