target/microblaze: Split the cpu_SR array
[qemu.git] / target / microblaze / translate.c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "qemu/qemu-print.h"
32
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35
36
37 #define SIM_COMPAT 0
38 #define DISAS_GNU 1
39 #define DISAS_MB 1
40 #if DISAS_MB && !SIM_COMPAT
41 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 #else
43 # define LOG_DIS(...) do { } while (0)
44 #endif
45
46 #define D(x)
47
48 #define EXTRACT_FIELD(src, start, end) \
49 (((src) >> start) & ((1 << (end - start + 1)) - 1))
50
51 /* is_jmp field values */
52 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
53 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
54 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55
56 static TCGv_i32 env_debug;
57 static TCGv_i32 cpu_R[32];
58 static TCGv_i64 cpu_pc;
59 static TCGv_i64 cpu_msr;
60 static TCGv_i64 cpu_ear;
61 static TCGv_i64 cpu_esr;
62 static TCGv_i64 cpu_fsr;
63 static TCGv_i64 cpu_btr;
64 static TCGv_i64 cpu_edr;
65 static TCGv_i32 env_imm;
66 static TCGv_i32 env_btaken;
67 static TCGv_i64 env_btarget;
68 static TCGv_i32 env_iflags;
69 static TCGv env_res_addr;
70 static TCGv_i32 env_res_val;
71
72 #include "exec/gen-icount.h"
73
74 /* This is the state at translation time. */
75 typedef struct DisasContext {
76 MicroBlazeCPU *cpu;
77 uint32_t pc;
78
79 /* Decoder. */
80 int type_b;
81 uint32_t ir;
82 uint8_t opcode;
83 uint8_t rd, ra, rb;
84 uint16_t imm;
85
86 unsigned int cpustate_changed;
87 unsigned int delayed_branch;
88 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
89 unsigned int clear_imm;
90 int is_jmp;
91
92 #define JMP_NOJMP 0
93 #define JMP_DIRECT 1
94 #define JMP_DIRECT_CC 2
95 #define JMP_INDIRECT 3
96 unsigned int jmp;
97 uint32_t jmp_pc;
98
99 int abort_at_next_insn;
100 struct TranslationBlock *tb;
101 int singlestep_enabled;
102 } DisasContext;
103
104 static const char *regnames[] =
105 {
106 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
107 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
108 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
109 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
110 };
111
112 static inline void t_sync_flags(DisasContext *dc)
113 {
114 /* Synch the tb dependent flags between translator and runtime. */
115 if (dc->tb_flags != dc->synced_flags) {
116 tcg_gen_movi_i32(env_iflags, dc->tb_flags);
117 dc->synced_flags = dc->tb_flags;
118 }
119 }
120
121 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
122 {
123 TCGv_i32 tmp = tcg_const_i32(index);
124
125 t_sync_flags(dc);
126 tcg_gen_movi_i64(cpu_pc, dc->pc);
127 gen_helper_raise_exception(cpu_env, tmp);
128 tcg_temp_free_i32(tmp);
129 dc->is_jmp = DISAS_UPDATE;
130 }
131
132 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
133 {
134 #ifndef CONFIG_USER_ONLY
135 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
136 #else
137 return true;
138 #endif
139 }
140
141 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
142 {
143 if (use_goto_tb(dc, dest)) {
144 tcg_gen_goto_tb(n);
145 tcg_gen_movi_i64(cpu_pc, dest);
146 tcg_gen_exit_tb(dc->tb, n);
147 } else {
148 tcg_gen_movi_i64(cpu_pc, dest);
149 tcg_gen_exit_tb(NULL, 0);
150 }
151 }
152
153 static void read_carry(DisasContext *dc, TCGv_i32 d)
154 {
155 tcg_gen_extrl_i64_i32(d, cpu_msr);
156 tcg_gen_shri_i32(d, d, 31);
157 }
158
159 /*
160 * write_carry sets the carry bits in MSR based on bit 0 of v.
161 * v[31:1] are ignored.
162 */
163 static void write_carry(DisasContext *dc, TCGv_i32 v)
164 {
165 TCGv_i64 t0 = tcg_temp_new_i64();
166 tcg_gen_extu_i32_i64(t0, v);
167 /* Deposit bit 0 into MSR_C and the alias MSR_CC. */
168 tcg_gen_deposit_i64(cpu_msr, cpu_msr, t0, 2, 1);
169 tcg_gen_deposit_i64(cpu_msr, cpu_msr, t0, 31, 1);
170 tcg_temp_free_i64(t0);
171 }
172
173 static void write_carryi(DisasContext *dc, bool carry)
174 {
175 TCGv_i32 t0 = tcg_temp_new_i32();
176 tcg_gen_movi_i32(t0, carry);
177 write_carry(dc, t0);
178 tcg_temp_free_i32(t0);
179 }
180
181 /*
182 * Returns true if the insn an illegal operation.
183 * If exceptions are enabled, an exception is raised.
184 */
185 static bool trap_illegal(DisasContext *dc, bool cond)
186 {
187 if (cond && (dc->tb_flags & MSR_EE_FLAG)
188 && dc->cpu->cfg.illegal_opcode_exception) {
189 tcg_gen_movi_i64(cpu_esr, ESR_EC_ILLEGAL_OP);
190 t_gen_raise_exception(dc, EXCP_HW_EXCP);
191 }
192 return cond;
193 }
194
195 /*
196 * Returns true if the insn is illegal in userspace.
197 * If exceptions are enabled, an exception is raised.
198 */
199 static bool trap_userspace(DisasContext *dc, bool cond)
200 {
201 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
202 bool cond_user = cond && mem_index == MMU_USER_IDX;
203
204 if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
205 tcg_gen_movi_i64(cpu_esr, ESR_EC_PRIVINSN);
206 t_gen_raise_exception(dc, EXCP_HW_EXCP);
207 }
208 return cond_user;
209 }
210
211 /* True if ALU operand b is a small immediate that may deserve
212 faster treatment. */
213 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
214 {
215 /* Immediate insn without the imm prefix ? */
216 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
217 }
218
219 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
220 {
221 if (dc->type_b) {
222 if (dc->tb_flags & IMM_FLAG)
223 tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
224 else
225 tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
226 return &env_imm;
227 } else
228 return &cpu_R[dc->rb];
229 }
230
231 static void dec_add(DisasContext *dc)
232 {
233 unsigned int k, c;
234 TCGv_i32 cf;
235
236 k = dc->opcode & 4;
237 c = dc->opcode & 2;
238
239 LOG_DIS("add%s%s%s r%d r%d r%d\n",
240 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
241 dc->rd, dc->ra, dc->rb);
242
243 /* Take care of the easy cases first. */
244 if (k) {
245 /* k - keep carry, no need to update MSR. */
246 /* If rd == r0, it's a nop. */
247 if (dc->rd) {
248 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
249
250 if (c) {
251 /* c - Add carry into the result. */
252 cf = tcg_temp_new_i32();
253
254 read_carry(dc, cf);
255 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
256 tcg_temp_free_i32(cf);
257 }
258 }
259 return;
260 }
261
262 /* From now on, we can assume k is zero. So we need to update MSR. */
263 /* Extract carry. */
264 cf = tcg_temp_new_i32();
265 if (c) {
266 read_carry(dc, cf);
267 } else {
268 tcg_gen_movi_i32(cf, 0);
269 }
270
271 if (dc->rd) {
272 TCGv_i32 ncf = tcg_temp_new_i32();
273 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
274 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
275 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
276 write_carry(dc, ncf);
277 tcg_temp_free_i32(ncf);
278 } else {
279 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
280 write_carry(dc, cf);
281 }
282 tcg_temp_free_i32(cf);
283 }
284
285 static void dec_sub(DisasContext *dc)
286 {
287 unsigned int u, cmp, k, c;
288 TCGv_i32 cf, na;
289
290 u = dc->imm & 2;
291 k = dc->opcode & 4;
292 c = dc->opcode & 2;
293 cmp = (dc->imm & 1) && (!dc->type_b) && k;
294
295 if (cmp) {
296 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
297 if (dc->rd) {
298 if (u)
299 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
300 else
301 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
302 }
303 return;
304 }
305
306 LOG_DIS("sub%s%s r%d, r%d r%d\n",
307 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
308
309 /* Take care of the easy cases first. */
310 if (k) {
311 /* k - keep carry, no need to update MSR. */
312 /* If rd == r0, it's a nop. */
313 if (dc->rd) {
314 tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
315
316 if (c) {
317 /* c - Add carry into the result. */
318 cf = tcg_temp_new_i32();
319
320 read_carry(dc, cf);
321 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
322 tcg_temp_free_i32(cf);
323 }
324 }
325 return;
326 }
327
328 /* From now on, we can assume k is zero. So we need to update MSR. */
329 /* Extract carry. And complement a into na. */
330 cf = tcg_temp_new_i32();
331 na = tcg_temp_new_i32();
332 if (c) {
333 read_carry(dc, cf);
334 } else {
335 tcg_gen_movi_i32(cf, 1);
336 }
337
338 /* d = b + ~a + c. carry defaults to 1. */
339 tcg_gen_not_i32(na, cpu_R[dc->ra]);
340
341 if (dc->rd) {
342 TCGv_i32 ncf = tcg_temp_new_i32();
343 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
344 tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
345 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
346 write_carry(dc, ncf);
347 tcg_temp_free_i32(ncf);
348 } else {
349 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
350 write_carry(dc, cf);
351 }
352 tcg_temp_free_i32(cf);
353 tcg_temp_free_i32(na);
354 }
355
356 static void dec_pattern(DisasContext *dc)
357 {
358 unsigned int mode;
359
360 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
361 return;
362 }
363
364 mode = dc->opcode & 3;
365 switch (mode) {
366 case 0:
367 /* pcmpbf. */
368 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
369 if (dc->rd)
370 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
371 break;
372 case 2:
373 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
374 if (dc->rd) {
375 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
376 cpu_R[dc->ra], cpu_R[dc->rb]);
377 }
378 break;
379 case 3:
380 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
381 if (dc->rd) {
382 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
383 cpu_R[dc->ra], cpu_R[dc->rb]);
384 }
385 break;
386 default:
387 cpu_abort(CPU(dc->cpu),
388 "unsupported pattern insn opcode=%x\n", dc->opcode);
389 break;
390 }
391 }
392
393 static void dec_and(DisasContext *dc)
394 {
395 unsigned int not;
396
397 if (!dc->type_b && (dc->imm & (1 << 10))) {
398 dec_pattern(dc);
399 return;
400 }
401
402 not = dc->opcode & (1 << 1);
403 LOG_DIS("and%s\n", not ? "n" : "");
404
405 if (!dc->rd)
406 return;
407
408 if (not) {
409 tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
410 } else
411 tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
412 }
413
414 static void dec_or(DisasContext *dc)
415 {
416 if (!dc->type_b && (dc->imm & (1 << 10))) {
417 dec_pattern(dc);
418 return;
419 }
420
421 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
422 if (dc->rd)
423 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
424 }
425
426 static void dec_xor(DisasContext *dc)
427 {
428 if (!dc->type_b && (dc->imm & (1 << 10))) {
429 dec_pattern(dc);
430 return;
431 }
432
433 LOG_DIS("xor r%d\n", dc->rd);
434 if (dc->rd)
435 tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
436 }
437
438 static inline void msr_read(DisasContext *dc, TCGv_i32 d)
439 {
440 tcg_gen_extrl_i64_i32(d, cpu_msr);
441 }
442
443 static inline void msr_write(DisasContext *dc, TCGv_i32 v)
444 {
445 TCGv_i64 t;
446
447 t = tcg_temp_new_i64();
448 dc->cpustate_changed = 1;
449 /* PVR bit is not writable. */
450 tcg_gen_extu_i32_i64(t, v);
451 tcg_gen_andi_i64(t, t, ~MSR_PVR);
452 tcg_gen_andi_i64(cpu_msr, cpu_msr, MSR_PVR);
453 tcg_gen_or_i64(cpu_msr, cpu_msr, t);
454 tcg_temp_free_i64(t);
455 }
456
457 static void dec_msr(DisasContext *dc)
458 {
459 CPUState *cs = CPU(dc->cpu);
460 TCGv_i32 t0, t1;
461 unsigned int sr, rn;
462 bool to, clrset, extended = false;
463
464 sr = extract32(dc->imm, 0, 14);
465 to = extract32(dc->imm, 14, 1);
466 clrset = extract32(dc->imm, 15, 1) == 0;
467 dc->type_b = 1;
468 if (to) {
469 dc->cpustate_changed = 1;
470 }
471
472 /* Extended MSRs are only available if addr_size > 32. */
473 if (dc->cpu->cfg.addr_size > 32) {
474 /* The E-bit is encoded differently for To/From MSR. */
475 static const unsigned int e_bit[] = { 19, 24 };
476
477 extended = extract32(dc->imm, e_bit[to], 1);
478 }
479
480 /* msrclr and msrset. */
481 if (clrset) {
482 bool clr = extract32(dc->ir, 16, 1);
483
484 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
485 dc->rd, dc->imm);
486
487 if (!dc->cpu->cfg.use_msr_instr) {
488 /* nop??? */
489 return;
490 }
491
492 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
493 return;
494 }
495
496 if (dc->rd)
497 msr_read(dc, cpu_R[dc->rd]);
498
499 t0 = tcg_temp_new_i32();
500 t1 = tcg_temp_new_i32();
501 msr_read(dc, t0);
502 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
503
504 if (clr) {
505 tcg_gen_not_i32(t1, t1);
506 tcg_gen_and_i32(t0, t0, t1);
507 } else
508 tcg_gen_or_i32(t0, t0, t1);
509 msr_write(dc, t0);
510 tcg_temp_free_i32(t0);
511 tcg_temp_free_i32(t1);
512 tcg_gen_movi_i64(cpu_pc, dc->pc + 4);
513 dc->is_jmp = DISAS_UPDATE;
514 return;
515 }
516
517 if (trap_userspace(dc, to)) {
518 return;
519 }
520
521 #if !defined(CONFIG_USER_ONLY)
522 /* Catch read/writes to the mmu block. */
523 if ((sr & ~0xff) == 0x1000) {
524 TCGv_i32 tmp_ext = tcg_const_i32(extended);
525 TCGv_i32 tmp_sr;
526
527 sr &= 7;
528 tmp_sr = tcg_const_i32(sr);
529 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
530 if (to) {
531 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
532 } else {
533 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
534 }
535 tcg_temp_free_i32(tmp_sr);
536 tcg_temp_free_i32(tmp_ext);
537 return;
538 }
539 #endif
540
541 if (to) {
542 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
543 switch (sr) {
544 case SR_PC:
545 break;
546 case SR_MSR:
547 msr_write(dc, cpu_R[dc->ra]);
548 break;
549 case SR_EAR:
550 tcg_gen_extu_i32_i64(cpu_ear, cpu_R[dc->ra]);
551 break;
552 case SR_ESR:
553 tcg_gen_extu_i32_i64(cpu_esr, cpu_R[dc->ra]);
554 break;
555 case SR_FSR:
556 tcg_gen_extu_i32_i64(cpu_fsr, cpu_R[dc->ra]);
557 break;
558 case SR_BTR:
559 tcg_gen_extu_i32_i64(cpu_btr, cpu_R[dc->ra]);
560 break;
561 case SR_EDR:
562 tcg_gen_extu_i32_i64(cpu_edr, cpu_R[dc->ra]);
563 break;
564 case 0x800:
565 tcg_gen_st_i32(cpu_R[dc->ra],
566 cpu_env, offsetof(CPUMBState, slr));
567 break;
568 case 0x802:
569 tcg_gen_st_i32(cpu_R[dc->ra],
570 cpu_env, offsetof(CPUMBState, shr));
571 break;
572 default:
573 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
574 break;
575 }
576 } else {
577 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
578
579 switch (sr) {
580 case SR_PC:
581 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
582 break;
583 case SR_MSR:
584 msr_read(dc, cpu_R[dc->rd]);
585 break;
586 case SR_EAR:
587 if (extended) {
588 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], cpu_ear);
589 } else {
590 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_ear);
591 }
592 break;
593 case SR_ESR:
594 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_esr);
595 break;
596 case SR_FSR:
597 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_fsr);
598 break;
599 case SR_BTR:
600 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_btr);
601 break;
602 case SR_EDR:
603 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_edr);
604 break;
605 case 0x800:
606 tcg_gen_ld_i32(cpu_R[dc->rd],
607 cpu_env, offsetof(CPUMBState, slr));
608 break;
609 case 0x802:
610 tcg_gen_ld_i32(cpu_R[dc->rd],
611 cpu_env, offsetof(CPUMBState, shr));
612 break;
613 case 0x2000 ... 0x200c:
614 rn = sr & 0xf;
615 tcg_gen_ld_i32(cpu_R[dc->rd],
616 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
617 break;
618 default:
619 cpu_abort(cs, "unknown mfs reg %x\n", sr);
620 break;
621 }
622 }
623
624 if (dc->rd == 0) {
625 tcg_gen_movi_i32(cpu_R[0], 0);
626 }
627 }
628
629 /* Multiplier unit. */
630 static void dec_mul(DisasContext *dc)
631 {
632 TCGv_i32 tmp;
633 unsigned int subcode;
634
635 if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
636 return;
637 }
638
639 subcode = dc->imm & 3;
640
641 if (dc->type_b) {
642 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
643 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
644 return;
645 }
646
647 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
648 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
649 /* nop??? */
650 }
651
652 tmp = tcg_temp_new_i32();
653 switch (subcode) {
654 case 0:
655 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
656 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
657 break;
658 case 1:
659 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
660 tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
661 cpu_R[dc->ra], cpu_R[dc->rb]);
662 break;
663 case 2:
664 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
665 tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
666 cpu_R[dc->ra], cpu_R[dc->rb]);
667 break;
668 case 3:
669 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
670 tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
671 break;
672 default:
673 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
674 break;
675 }
676 tcg_temp_free_i32(tmp);
677 }
678
679 /* Div unit. */
680 static void dec_div(DisasContext *dc)
681 {
682 unsigned int u;
683
684 u = dc->imm & 2;
685 LOG_DIS("div\n");
686
687 if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
688 return;
689 }
690
691 if (u)
692 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
693 cpu_R[dc->ra]);
694 else
695 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
696 cpu_R[dc->ra]);
697 if (!dc->rd)
698 tcg_gen_movi_i32(cpu_R[dc->rd], 0);
699 }
700
701 static void dec_barrel(DisasContext *dc)
702 {
703 TCGv_i32 t0;
704 unsigned int imm_w, imm_s;
705 bool s, t, e = false, i = false;
706
707 if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
708 return;
709 }
710
711 if (dc->type_b) {
712 /* Insert and extract are only available in immediate mode. */
713 i = extract32(dc->imm, 15, 1);
714 e = extract32(dc->imm, 14, 1);
715 }
716 s = extract32(dc->imm, 10, 1);
717 t = extract32(dc->imm, 9, 1);
718 imm_w = extract32(dc->imm, 6, 5);
719 imm_s = extract32(dc->imm, 0, 5);
720
721 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
722 e ? "e" : "",
723 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
724
725 if (e) {
726 if (imm_w + imm_s > 32 || imm_w == 0) {
727 /* These inputs have an undefined behavior. */
728 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
729 imm_w, imm_s);
730 } else {
731 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
732 }
733 } else if (i) {
734 int width = imm_w - imm_s + 1;
735
736 if (imm_w < imm_s) {
737 /* These inputs have an undefined behavior. */
738 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
739 imm_w, imm_s);
740 } else {
741 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
742 imm_s, width);
743 }
744 } else {
745 t0 = tcg_temp_new_i32();
746
747 tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
748 tcg_gen_andi_i32(t0, t0, 31);
749
750 if (s) {
751 tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
752 } else {
753 if (t) {
754 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
755 } else {
756 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
757 }
758 }
759 tcg_temp_free_i32(t0);
760 }
761 }
762
763 static void dec_bit(DisasContext *dc)
764 {
765 CPUState *cs = CPU(dc->cpu);
766 TCGv_i32 t0;
767 unsigned int op;
768
769 op = dc->ir & ((1 << 9) - 1);
770 switch (op) {
771 case 0x21:
772 /* src. */
773 t0 = tcg_temp_new_i32();
774
775 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
776 tcg_gen_extrl_i64_i32(t0, cpu_msr);
777 tcg_gen_andi_i32(t0, t0, MSR_CC);
778 write_carry(dc, cpu_R[dc->ra]);
779 if (dc->rd) {
780 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
781 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
782 }
783 tcg_temp_free_i32(t0);
784 break;
785
786 case 0x1:
787 case 0x41:
788 /* srl. */
789 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
790
791 /* Update carry. Note that write carry only looks at the LSB. */
792 write_carry(dc, cpu_R[dc->ra]);
793 if (dc->rd) {
794 if (op == 0x41)
795 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
796 else
797 tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
798 }
799 break;
800 case 0x60:
801 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
802 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
803 break;
804 case 0x61:
805 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
806 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
807 break;
808 case 0x64:
809 case 0x66:
810 case 0x74:
811 case 0x76:
812 /* wdc. */
813 LOG_DIS("wdc r%d\n", dc->ra);
814 trap_userspace(dc, true);
815 break;
816 case 0x68:
817 /* wic. */
818 LOG_DIS("wic r%d\n", dc->ra);
819 trap_userspace(dc, true);
820 break;
821 case 0xe0:
822 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
823 return;
824 }
825 if (dc->cpu->cfg.use_pcmp_instr) {
826 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
827 }
828 break;
829 case 0x1e0:
830 /* swapb */
831 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
832 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
833 break;
834 case 0x1e2:
835 /*swaph */
836 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
837 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
838 break;
839 default:
840 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
841 dc->pc, op, dc->rd, dc->ra, dc->rb);
842 break;
843 }
844 }
845
846 static inline void sync_jmpstate(DisasContext *dc)
847 {
848 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
849 if (dc->jmp == JMP_DIRECT) {
850 tcg_gen_movi_i32(env_btaken, 1);
851 }
852 dc->jmp = JMP_INDIRECT;
853 tcg_gen_movi_i64(env_btarget, dc->jmp_pc);
854 }
855 }
856
857 static void dec_imm(DisasContext *dc)
858 {
859 LOG_DIS("imm %x\n", dc->imm << 16);
860 tcg_gen_movi_i32(env_imm, (dc->imm << 16));
861 dc->tb_flags |= IMM_FLAG;
862 dc->clear_imm = 0;
863 }
864
865 static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
866 {
867 bool extimm = dc->tb_flags & IMM_FLAG;
868 /* Should be set to true if r1 is used by loadstores. */
869 bool stackprot = false;
870 TCGv_i32 t32;
871
872 /* All load/stores use ra. */
873 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
874 stackprot = true;
875 }
876
877 /* Treat the common cases first. */
878 if (!dc->type_b) {
879 if (ea) {
880 int addr_size = dc->cpu->cfg.addr_size;
881
882 if (addr_size == 32) {
883 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
884 return;
885 }
886
887 tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
888 if (addr_size < 64) {
889 /* Mask off out of range bits. */
890 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
891 }
892 return;
893 }
894
895 /* If any of the regs is r0, set t to the value of the other reg. */
896 if (dc->ra == 0) {
897 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
898 return;
899 } else if (dc->rb == 0) {
900 tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
901 return;
902 }
903
904 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
905 stackprot = true;
906 }
907
908 t32 = tcg_temp_new_i32();
909 tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
910 tcg_gen_extu_i32_tl(t, t32);
911 tcg_temp_free_i32(t32);
912
913 if (stackprot) {
914 gen_helper_stackprot(cpu_env, t);
915 }
916 return;
917 }
918 /* Immediate. */
919 t32 = tcg_temp_new_i32();
920 if (!extimm) {
921 tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
922 } else {
923 tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
924 }
925 tcg_gen_extu_i32_tl(t, t32);
926 tcg_temp_free_i32(t32);
927
928 if (stackprot) {
929 gen_helper_stackprot(cpu_env, t);
930 }
931 return;
932 }
933
934 static void dec_load(DisasContext *dc)
935 {
936 TCGv_i32 v;
937 TCGv addr;
938 unsigned int size;
939 bool rev = false, ex = false, ea = false;
940 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
941 MemOp mop;
942
943 mop = dc->opcode & 3;
944 size = 1 << mop;
945 if (!dc->type_b) {
946 ea = extract32(dc->ir, 7, 1);
947 rev = extract32(dc->ir, 9, 1);
948 ex = extract32(dc->ir, 10, 1);
949 }
950 mop |= MO_TE;
951 if (rev) {
952 mop ^= MO_BSWAP;
953 }
954
955 if (trap_illegal(dc, size > 4)) {
956 return;
957 }
958
959 if (trap_userspace(dc, ea)) {
960 return;
961 }
962
963 LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
964 ex ? "x" : "",
965 ea ? "ea" : "");
966
967 t_sync_flags(dc);
968 addr = tcg_temp_new();
969 compute_ldst_addr(dc, ea, addr);
970 /* Extended addressing bypasses the MMU. */
971 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
972
973 /*
974 * When doing reverse accesses we need to do two things.
975 *
976 * 1. Reverse the address wrt endianness.
977 * 2. Byteswap the data lanes on the way back into the CPU core.
978 */
979 if (rev && size != 4) {
980 /* Endian reverse the address. t is addr. */
981 switch (size) {
982 case 1:
983 {
984 tcg_gen_xori_tl(addr, addr, 3);
985 break;
986 }
987
988 case 2:
989 /* 00 -> 10
990 10 -> 00. */
991 tcg_gen_xori_tl(addr, addr, 2);
992 break;
993 default:
994 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
995 break;
996 }
997 }
998
999 /* lwx does not throw unaligned access errors, so force alignment */
1000 if (ex) {
1001 tcg_gen_andi_tl(addr, addr, ~3);
1002 }
1003
1004 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1005 sync_jmpstate(dc);
1006
1007 /* Verify alignment if needed. */
1008 /*
1009 * Microblaze gives MMU faults priority over faults due to
1010 * unaligned addresses. That's why we speculatively do the load
1011 * into v. If the load succeeds, we verify alignment of the
1012 * address and if that succeeds we write into the destination reg.
1013 */
1014 v = tcg_temp_new_i32();
1015 tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
1016
1017 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1018 TCGv_i32 t0 = tcg_const_i32(0);
1019 TCGv_i32 treg = tcg_const_i32(dc->rd);
1020 TCGv_i32 tsize = tcg_const_i32(size - 1);
1021
1022 tcg_gen_movi_i64(cpu_pc, dc->pc);
1023 gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
1024
1025 tcg_temp_free_i32(t0);
1026 tcg_temp_free_i32(treg);
1027 tcg_temp_free_i32(tsize);
1028 }
1029
1030 if (ex) {
1031 tcg_gen_mov_tl(env_res_addr, addr);
1032 tcg_gen_mov_i32(env_res_val, v);
1033 }
1034 if (dc->rd) {
1035 tcg_gen_mov_i32(cpu_R[dc->rd], v);
1036 }
1037 tcg_temp_free_i32(v);
1038
1039 if (ex) { /* lwx */
1040 /* no support for AXI exclusive so always clear C */
1041 write_carryi(dc, 0);
1042 }
1043
1044 tcg_temp_free(addr);
1045 }
1046
1047 static void dec_store(DisasContext *dc)
1048 {
1049 TCGv addr;
1050 TCGLabel *swx_skip = NULL;
1051 unsigned int size;
1052 bool rev = false, ex = false, ea = false;
1053 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1054 MemOp mop;
1055
1056 mop = dc->opcode & 3;
1057 size = 1 << mop;
1058 if (!dc->type_b) {
1059 ea = extract32(dc->ir, 7, 1);
1060 rev = extract32(dc->ir, 9, 1);
1061 ex = extract32(dc->ir, 10, 1);
1062 }
1063 mop |= MO_TE;
1064 if (rev) {
1065 mop ^= MO_BSWAP;
1066 }
1067
1068 if (trap_illegal(dc, size > 4)) {
1069 return;
1070 }
1071
1072 trap_userspace(dc, ea);
1073
1074 LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1075 ex ? "x" : "",
1076 ea ? "ea" : "");
1077 t_sync_flags(dc);
1078 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1079 sync_jmpstate(dc);
1080 /* SWX needs a temp_local. */
1081 addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1082 compute_ldst_addr(dc, ea, addr);
1083 /* Extended addressing bypasses the MMU. */
1084 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
1085
1086 if (ex) { /* swx */
1087 TCGv_i32 tval;
1088
1089 /* swx does not throw unaligned access errors, so force alignment */
1090 tcg_gen_andi_tl(addr, addr, ~3);
1091
1092 write_carryi(dc, 1);
1093 swx_skip = gen_new_label();
1094 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
1095
1096 /*
1097 * Compare the value loaded at lwx with current contents of
1098 * the reserved location.
1099 */
1100 tval = tcg_temp_new_i32();
1101
1102 tcg_gen_atomic_cmpxchg_i32(tval, addr, env_res_val,
1103 cpu_R[dc->rd], mem_index,
1104 mop);
1105
1106 tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1107 write_carryi(dc, 0);
1108 tcg_temp_free_i32(tval);
1109 }
1110
1111 if (rev && size != 4) {
1112 /* Endian reverse the address. t is addr. */
1113 switch (size) {
1114 case 1:
1115 {
1116 tcg_gen_xori_tl(addr, addr, 3);
1117 break;
1118 }
1119
1120 case 2:
1121 /* 00 -> 10
1122 10 -> 00. */
1123 /* Force addr into the temp. */
1124 tcg_gen_xori_tl(addr, addr, 2);
1125 break;
1126 default:
1127 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1128 break;
1129 }
1130 }
1131
1132 if (!ex) {
1133 tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1134 }
1135
1136 /* Verify alignment if needed. */
1137 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1138 TCGv_i32 t1 = tcg_const_i32(1);
1139 TCGv_i32 treg = tcg_const_i32(dc->rd);
1140 TCGv_i32 tsize = tcg_const_i32(size - 1);
1141
1142 tcg_gen_movi_i64(cpu_pc, dc->pc);
1143 /* FIXME: if the alignment is wrong, we should restore the value
1144 * in memory. One possible way to achieve this is to probe
1145 * the MMU prior to the memaccess, thay way we could put
1146 * the alignment checks in between the probe and the mem
1147 * access.
1148 */
1149 gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
1150
1151 tcg_temp_free_i32(t1);
1152 tcg_temp_free_i32(treg);
1153 tcg_temp_free_i32(tsize);
1154 }
1155
1156 if (ex) {
1157 gen_set_label(swx_skip);
1158 }
1159
1160 tcg_temp_free(addr);
1161 }
1162
1163 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1164 TCGv_i32 d, TCGv_i32 a)
1165 {
1166 static const int mb_to_tcg_cc[] = {
1167 [CC_EQ] = TCG_COND_EQ,
1168 [CC_NE] = TCG_COND_NE,
1169 [CC_LT] = TCG_COND_LT,
1170 [CC_LE] = TCG_COND_LE,
1171 [CC_GE] = TCG_COND_GE,
1172 [CC_GT] = TCG_COND_GT,
1173 };
1174
1175 switch (cc) {
1176 case CC_EQ:
1177 case CC_NE:
1178 case CC_LT:
1179 case CC_LE:
1180 case CC_GE:
1181 case CC_GT:
1182 tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
1183 break;
1184 default:
1185 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1186 break;
1187 }
1188 }
1189
1190 static void eval_cond_jmp(DisasContext *dc, TCGv_i64 pc_true, TCGv_i64 pc_false)
1191 {
1192 TCGv_i64 tmp_btaken = tcg_temp_new_i64();
1193 TCGv_i64 tmp_zero = tcg_const_i64(0);
1194
1195 tcg_gen_extu_i32_i64(tmp_btaken, env_btaken);
1196 tcg_gen_movcond_i64(TCG_COND_NE, cpu_pc,
1197 tmp_btaken, tmp_zero,
1198 pc_true, pc_false);
1199
1200 tcg_temp_free_i64(tmp_btaken);
1201 tcg_temp_free_i64(tmp_zero);
1202 }
1203
1204 static void dec_setup_dslot(DisasContext *dc)
1205 {
1206 TCGv_i32 tmp = tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG));
1207
1208 dc->delayed_branch = 2;
1209 dc->tb_flags |= D_FLAG;
1210
1211 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, bimm));
1212 tcg_temp_free_i32(tmp);
1213 }
1214
1215 static void dec_bcc(DisasContext *dc)
1216 {
1217 unsigned int cc;
1218 unsigned int dslot;
1219
1220 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1221 dslot = dc->ir & (1 << 25);
1222 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1223
1224 dc->delayed_branch = 1;
1225 if (dslot) {
1226 dec_setup_dslot(dc);
1227 }
1228
1229 if (dec_alu_op_b_is_small_imm(dc)) {
1230 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1231
1232 tcg_gen_movi_i64(env_btarget, dc->pc + offset);
1233 dc->jmp = JMP_DIRECT_CC;
1234 dc->jmp_pc = dc->pc + offset;
1235 } else {
1236 dc->jmp = JMP_INDIRECT;
1237 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1238 tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1239 tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1240 }
1241 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra]);
1242 }
1243
1244 static void dec_br(DisasContext *dc)
1245 {
1246 unsigned int dslot, link, abs, mbar;
1247
1248 dslot = dc->ir & (1 << 20);
1249 abs = dc->ir & (1 << 19);
1250 link = dc->ir & (1 << 18);
1251
1252 /* Memory barrier. */
1253 mbar = (dc->ir >> 16) & 31;
1254 if (mbar == 2 && dc->imm == 4) {
1255 uint16_t mbar_imm = dc->rd;
1256
1257 LOG_DIS("mbar %d\n", mbar_imm);
1258
1259 /* Data access memory barrier. */
1260 if ((mbar_imm & 2) == 0) {
1261 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1262 }
1263
1264 /* mbar IMM & 16 decodes to sleep. */
1265 if (mbar_imm & 16) {
1266 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1267 TCGv_i32 tmp_1 = tcg_const_i32(1);
1268
1269 LOG_DIS("sleep\n");
1270
1271 if (trap_userspace(dc, true)) {
1272 /* Sleep is a privileged instruction. */
1273 return;
1274 }
1275
1276 t_sync_flags(dc);
1277 tcg_gen_st_i32(tmp_1, cpu_env,
1278 -offsetof(MicroBlazeCPU, env)
1279 +offsetof(CPUState, halted));
1280 tcg_gen_movi_i64(cpu_pc, dc->pc + 4);
1281 gen_helper_raise_exception(cpu_env, tmp_hlt);
1282 tcg_temp_free_i32(tmp_hlt);
1283 tcg_temp_free_i32(tmp_1);
1284 return;
1285 }
1286 /* Break the TB. */
1287 dc->cpustate_changed = 1;
1288 return;
1289 }
1290
1291 LOG_DIS("br%s%s%s%s imm=%x\n",
1292 abs ? "a" : "", link ? "l" : "",
1293 dc->type_b ? "i" : "", dslot ? "d" : "",
1294 dc->imm);
1295
1296 dc->delayed_branch = 1;
1297 if (dslot) {
1298 dec_setup_dslot(dc);
1299 }
1300 if (link && dc->rd)
1301 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1302
1303 dc->jmp = JMP_INDIRECT;
1304 if (abs) {
1305 tcg_gen_movi_i32(env_btaken, 1);
1306 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1307 if (link && !dslot) {
1308 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1309 t_gen_raise_exception(dc, EXCP_BREAK);
1310 if (dc->imm == 0) {
1311 if (trap_userspace(dc, true)) {
1312 return;
1313 }
1314
1315 t_gen_raise_exception(dc, EXCP_DEBUG);
1316 }
1317 }
1318 } else {
1319 if (dec_alu_op_b_is_small_imm(dc)) {
1320 dc->jmp = JMP_DIRECT;
1321 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1322 } else {
1323 tcg_gen_movi_i32(env_btaken, 1);
1324 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1325 tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1326 tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1327 }
1328 }
1329 }
1330
1331 static inline void do_rti(DisasContext *dc)
1332 {
1333 TCGv_i32 t0, t1;
1334 t0 = tcg_temp_new_i32();
1335 t1 = tcg_temp_new_i32();
1336 tcg_gen_extrl_i64_i32(t1, cpu_msr);
1337 tcg_gen_shri_i32(t0, t1, 1);
1338 tcg_gen_ori_i32(t1, t1, MSR_IE);
1339 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1340
1341 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1342 tcg_gen_or_i32(t1, t1, t0);
1343 msr_write(dc, t1);
1344 tcg_temp_free_i32(t1);
1345 tcg_temp_free_i32(t0);
1346 dc->tb_flags &= ~DRTI_FLAG;
1347 }
1348
1349 static inline void do_rtb(DisasContext *dc)
1350 {
1351 TCGv_i32 t0, t1;
1352 t0 = tcg_temp_new_i32();
1353 t1 = tcg_temp_new_i32();
1354 tcg_gen_extrl_i64_i32(t1, cpu_msr);
1355 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
1356 tcg_gen_shri_i32(t0, t1, 1);
1357 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1358
1359 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1360 tcg_gen_or_i32(t1, t1, t0);
1361 msr_write(dc, t1);
1362 tcg_temp_free_i32(t1);
1363 tcg_temp_free_i32(t0);
1364 dc->tb_flags &= ~DRTB_FLAG;
1365 }
1366
1367 static inline void do_rte(DisasContext *dc)
1368 {
1369 TCGv_i32 t0, t1;
1370 t0 = tcg_temp_new_i32();
1371 t1 = tcg_temp_new_i32();
1372
1373 tcg_gen_extrl_i64_i32(t1, cpu_msr);
1374 tcg_gen_ori_i32(t1, t1, MSR_EE);
1375 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1376 tcg_gen_shri_i32(t0, t1, 1);
1377 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1378
1379 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1380 tcg_gen_or_i32(t1, t1, t0);
1381 msr_write(dc, t1);
1382 tcg_temp_free_i32(t1);
1383 tcg_temp_free_i32(t0);
1384 dc->tb_flags &= ~DRTE_FLAG;
1385 }
1386
1387 static void dec_rts(DisasContext *dc)
1388 {
1389 unsigned int b_bit, i_bit, e_bit;
1390 TCGv_i64 tmp64;
1391
1392 i_bit = dc->ir & (1 << 21);
1393 b_bit = dc->ir & (1 << 22);
1394 e_bit = dc->ir & (1 << 23);
1395
1396 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1397 return;
1398 }
1399
1400 dec_setup_dslot(dc);
1401
1402 if (i_bit) {
1403 LOG_DIS("rtid ir=%x\n", dc->ir);
1404 dc->tb_flags |= DRTI_FLAG;
1405 } else if (b_bit) {
1406 LOG_DIS("rtbd ir=%x\n", dc->ir);
1407 dc->tb_flags |= DRTB_FLAG;
1408 } else if (e_bit) {
1409 LOG_DIS("rted ir=%x\n", dc->ir);
1410 dc->tb_flags |= DRTE_FLAG;
1411 } else
1412 LOG_DIS("rts ir=%x\n", dc->ir);
1413
1414 dc->jmp = JMP_INDIRECT;
1415 tcg_gen_movi_i32(env_btaken, 1);
1416
1417 tmp64 = tcg_temp_new_i64();
1418 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1419 tcg_gen_extu_i32_i64(tmp64, cpu_R[dc->ra]);
1420 tcg_gen_add_i64(env_btarget, env_btarget, tmp64);
1421 tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1422 tcg_temp_free_i64(tmp64);
1423 }
1424
1425 static int dec_check_fpuv2(DisasContext *dc)
1426 {
1427 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1428 tcg_gen_movi_i64(cpu_esr, ESR_EC_FPU);
1429 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1430 }
1431 return (dc->cpu->cfg.use_fpu == 2) ? PVR2_USE_FPU2_MASK : 0;
1432 }
1433
1434 static void dec_fpu(DisasContext *dc)
1435 {
1436 unsigned int fpu_insn;
1437
1438 if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1439 return;
1440 }
1441
1442 fpu_insn = (dc->ir >> 7) & 7;
1443
1444 switch (fpu_insn) {
1445 case 0:
1446 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1447 cpu_R[dc->rb]);
1448 break;
1449
1450 case 1:
1451 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1452 cpu_R[dc->rb]);
1453 break;
1454
1455 case 2:
1456 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1457 cpu_R[dc->rb]);
1458 break;
1459
1460 case 3:
1461 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1462 cpu_R[dc->rb]);
1463 break;
1464
1465 case 4:
1466 switch ((dc->ir >> 4) & 7) {
1467 case 0:
1468 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1469 cpu_R[dc->ra], cpu_R[dc->rb]);
1470 break;
1471 case 1:
1472 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1473 cpu_R[dc->ra], cpu_R[dc->rb]);
1474 break;
1475 case 2:
1476 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1477 cpu_R[dc->ra], cpu_R[dc->rb]);
1478 break;
1479 case 3:
1480 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1481 cpu_R[dc->ra], cpu_R[dc->rb]);
1482 break;
1483 case 4:
1484 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1485 cpu_R[dc->ra], cpu_R[dc->rb]);
1486 break;
1487 case 5:
1488 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1489 cpu_R[dc->ra], cpu_R[dc->rb]);
1490 break;
1491 case 6:
1492 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1493 cpu_R[dc->ra], cpu_R[dc->rb]);
1494 break;
1495 default:
1496 qemu_log_mask(LOG_UNIMP,
1497 "unimplemented fcmp fpu_insn=%x pc=%x"
1498 " opc=%x\n",
1499 fpu_insn, dc->pc, dc->opcode);
1500 dc->abort_at_next_insn = 1;
1501 break;
1502 }
1503 break;
1504
1505 case 5:
1506 if (!dec_check_fpuv2(dc)) {
1507 return;
1508 }
1509 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1510 break;
1511
1512 case 6:
1513 if (!dec_check_fpuv2(dc)) {
1514 return;
1515 }
1516 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1517 break;
1518
1519 case 7:
1520 if (!dec_check_fpuv2(dc)) {
1521 return;
1522 }
1523 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1524 break;
1525
1526 default:
1527 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1528 " opc=%x\n",
1529 fpu_insn, dc->pc, dc->opcode);
1530 dc->abort_at_next_insn = 1;
1531 break;
1532 }
1533 }
1534
1535 static void dec_null(DisasContext *dc)
1536 {
1537 if (trap_illegal(dc, true)) {
1538 return;
1539 }
1540 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1541 dc->abort_at_next_insn = 1;
1542 }
1543
1544 /* Insns connected to FSL or AXI stream attached devices. */
1545 static void dec_stream(DisasContext *dc)
1546 {
1547 TCGv_i32 t_id, t_ctrl;
1548 int ctrl;
1549
1550 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1551 dc->type_b ? "" : "d", dc->imm);
1552
1553 if (trap_userspace(dc, true)) {
1554 return;
1555 }
1556
1557 t_id = tcg_temp_new_i32();
1558 if (dc->type_b) {
1559 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1560 ctrl = dc->imm >> 10;
1561 } else {
1562 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1563 ctrl = dc->imm >> 5;
1564 }
1565
1566 t_ctrl = tcg_const_i32(ctrl);
1567
1568 if (dc->rd == 0) {
1569 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1570 } else {
1571 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1572 }
1573 tcg_temp_free_i32(t_id);
1574 tcg_temp_free_i32(t_ctrl);
1575 }
1576
1577 static struct decoder_info {
1578 struct {
1579 uint32_t bits;
1580 uint32_t mask;
1581 };
1582 void (*dec)(DisasContext *dc);
1583 } decinfo[] = {
1584 {DEC_ADD, dec_add},
1585 {DEC_SUB, dec_sub},
1586 {DEC_AND, dec_and},
1587 {DEC_XOR, dec_xor},
1588 {DEC_OR, dec_or},
1589 {DEC_BIT, dec_bit},
1590 {DEC_BARREL, dec_barrel},
1591 {DEC_LD, dec_load},
1592 {DEC_ST, dec_store},
1593 {DEC_IMM, dec_imm},
1594 {DEC_BR, dec_br},
1595 {DEC_BCC, dec_bcc},
1596 {DEC_RTS, dec_rts},
1597 {DEC_FPU, dec_fpu},
1598 {DEC_MUL, dec_mul},
1599 {DEC_DIV, dec_div},
1600 {DEC_MSR, dec_msr},
1601 {DEC_STREAM, dec_stream},
1602 {{0, 0}, dec_null}
1603 };
1604
1605 static inline void decode(DisasContext *dc, uint32_t ir)
1606 {
1607 int i;
1608
1609 dc->ir = ir;
1610 LOG_DIS("%8.8x\t", dc->ir);
1611
1612 if (ir == 0) {
1613 trap_illegal(dc, dc->cpu->cfg.opcode_0_illegal);
1614 /* Don't decode nop/zero instructions any further. */
1615 return;
1616 }
1617
1618 /* bit 2 seems to indicate insn type. */
1619 dc->type_b = ir & (1 << 29);
1620
1621 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1622 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1623 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1624 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1625 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1626
1627 /* Large switch for all insns. */
1628 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1629 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1630 decinfo[i].dec(dc);
1631 break;
1632 }
1633 }
1634 }
1635
1636 /* generate intermediate code for basic block 'tb'. */
1637 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1638 {
1639 CPUMBState *env = cs->env_ptr;
1640 MicroBlazeCPU *cpu = env_archcpu(env);
1641 uint32_t pc_start;
1642 struct DisasContext ctx;
1643 struct DisasContext *dc = &ctx;
1644 uint32_t page_start, org_flags;
1645 uint32_t npc;
1646 int num_insns;
1647
1648 pc_start = tb->pc;
1649 dc->cpu = cpu;
1650 dc->tb = tb;
1651 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1652
1653 dc->is_jmp = DISAS_NEXT;
1654 dc->jmp = 0;
1655 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1656 if (dc->delayed_branch) {
1657 dc->jmp = JMP_INDIRECT;
1658 }
1659 dc->pc = pc_start;
1660 dc->singlestep_enabled = cs->singlestep_enabled;
1661 dc->cpustate_changed = 0;
1662 dc->abort_at_next_insn = 0;
1663
1664 if (pc_start & 3) {
1665 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1666 }
1667
1668 page_start = pc_start & TARGET_PAGE_MASK;
1669 num_insns = 0;
1670
1671 gen_tb_start(tb);
1672 do
1673 {
1674 tcg_gen_insn_start(dc->pc);
1675 num_insns++;
1676
1677 #if SIM_COMPAT
1678 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1679 tcg_gen_movi_i64(cpu_pc, dc->pc);
1680 gen_helper_debug();
1681 }
1682 #endif
1683
1684 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1685 t_gen_raise_exception(dc, EXCP_DEBUG);
1686 dc->is_jmp = DISAS_UPDATE;
1687 /* The address covered by the breakpoint must be included in
1688 [tb->pc, tb->pc + tb->size) in order to for it to be
1689 properly cleared -- thus we increment the PC here so that
1690 the logic setting tb->size below does the right thing. */
1691 dc->pc += 4;
1692 break;
1693 }
1694
1695 /* Pretty disas. */
1696 LOG_DIS("%8.8x:\t", dc->pc);
1697
1698 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1699 gen_io_start();
1700 }
1701
1702 dc->clear_imm = 1;
1703 decode(dc, cpu_ldl_code(env, dc->pc));
1704 if (dc->clear_imm)
1705 dc->tb_flags &= ~IMM_FLAG;
1706 dc->pc += 4;
1707
1708 if (dc->delayed_branch) {
1709 dc->delayed_branch--;
1710 if (!dc->delayed_branch) {
1711 if (dc->tb_flags & DRTI_FLAG)
1712 do_rti(dc);
1713 if (dc->tb_flags & DRTB_FLAG)
1714 do_rtb(dc);
1715 if (dc->tb_flags & DRTE_FLAG)
1716 do_rte(dc);
1717 /* Clear the delay slot flag. */
1718 dc->tb_flags &= ~D_FLAG;
1719 /* If it is a direct jump, try direct chaining. */
1720 if (dc->jmp == JMP_INDIRECT) {
1721 TCGv_i64 tmp_pc = tcg_const_i64(dc->pc);
1722 eval_cond_jmp(dc, env_btarget, tmp_pc);
1723 tcg_temp_free_i64(tmp_pc);
1724
1725 dc->is_jmp = DISAS_JUMP;
1726 } else if (dc->jmp == JMP_DIRECT) {
1727 t_sync_flags(dc);
1728 gen_goto_tb(dc, 0, dc->jmp_pc);
1729 dc->is_jmp = DISAS_TB_JUMP;
1730 } else if (dc->jmp == JMP_DIRECT_CC) {
1731 TCGLabel *l1 = gen_new_label();
1732 t_sync_flags(dc);
1733 /* Conditional jmp. */
1734 tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1735 gen_goto_tb(dc, 1, dc->pc);
1736 gen_set_label(l1);
1737 gen_goto_tb(dc, 0, dc->jmp_pc);
1738
1739 dc->is_jmp = DISAS_TB_JUMP;
1740 }
1741 break;
1742 }
1743 }
1744 if (cs->singlestep_enabled) {
1745 break;
1746 }
1747 } while (!dc->is_jmp && !dc->cpustate_changed
1748 && !tcg_op_buf_full()
1749 && !singlestep
1750 && (dc->pc - page_start < TARGET_PAGE_SIZE)
1751 && num_insns < max_insns);
1752
1753 npc = dc->pc;
1754 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1755 if (dc->tb_flags & D_FLAG) {
1756 dc->is_jmp = DISAS_UPDATE;
1757 tcg_gen_movi_i64(cpu_pc, npc);
1758 sync_jmpstate(dc);
1759 } else
1760 npc = dc->jmp_pc;
1761 }
1762
1763 /* Force an update if the per-tb cpu state has changed. */
1764 if (dc->is_jmp == DISAS_NEXT
1765 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1766 dc->is_jmp = DISAS_UPDATE;
1767 tcg_gen_movi_i64(cpu_pc, npc);
1768 }
1769 t_sync_flags(dc);
1770
1771 if (unlikely(cs->singlestep_enabled)) {
1772 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1773
1774 if (dc->is_jmp != DISAS_JUMP) {
1775 tcg_gen_movi_i64(cpu_pc, npc);
1776 }
1777 gen_helper_raise_exception(cpu_env, tmp);
1778 tcg_temp_free_i32(tmp);
1779 } else {
1780 switch(dc->is_jmp) {
1781 case DISAS_NEXT:
1782 gen_goto_tb(dc, 1, npc);
1783 break;
1784 default:
1785 case DISAS_JUMP:
1786 case DISAS_UPDATE:
1787 /* indicate that the hash table must be used
1788 to find the next TB */
1789 tcg_gen_exit_tb(NULL, 0);
1790 break;
1791 case DISAS_TB_JUMP:
1792 /* nothing more to generate */
1793 break;
1794 }
1795 }
1796 gen_tb_end(tb, num_insns);
1797
1798 tb->size = dc->pc - pc_start;
1799 tb->icount = num_insns;
1800
1801 #ifdef DEBUG_DISAS
1802 #if !SIM_COMPAT
1803 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1804 && qemu_log_in_addr_range(pc_start)) {
1805 FILE *logfile = qemu_log_lock();
1806 qemu_log("--------------\n");
1807 log_target_disas(cs, pc_start, dc->pc - pc_start);
1808 qemu_log_unlock(logfile);
1809 }
1810 #endif
1811 #endif
1812 assert(!dc->abort_at_next_insn);
1813 }
1814
1815 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1816 {
1817 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1818 CPUMBState *env = &cpu->env;
1819 int i;
1820
1821 if (!env) {
1822 return;
1823 }
1824
1825 qemu_fprintf(f, "IN: PC=%" PRIx64 " %s\n",
1826 env->pc, lookup_symbol(env->pc));
1827 qemu_fprintf(f, "rmsr=%" PRIx64 " resr=%" PRIx64 " rear=%" PRIx64 " "
1828 "debug=%x imm=%x iflags=%x fsr=%" PRIx64 " "
1829 "rbtr=%" PRIx64 "\n",
1830 env->msr, env->esr, env->ear,
1831 env->debug, env->imm, env->iflags, env->fsr,
1832 env->btr);
1833 qemu_fprintf(f, "btaken=%d btarget=%" PRIx64 " mode=%s(saved=%s) "
1834 "eip=%d ie=%d\n",
1835 env->btaken, env->btarget,
1836 (env->msr & MSR_UM) ? "user" : "kernel",
1837 (env->msr & MSR_UMS) ? "user" : "kernel",
1838 (bool)(env->msr & MSR_EIP),
1839 (bool)(env->msr & MSR_IE));
1840 for (i = 0; i < 12; i++) {
1841 qemu_fprintf(f, "rpvr%2.2d=%8.8x ", i, env->pvr.regs[i]);
1842 if ((i + 1) % 4 == 0) {
1843 qemu_fprintf(f, "\n");
1844 }
1845 }
1846
1847 /* Registers that aren't modeled are reported as 0 */
1848 qemu_fprintf(f, "redr=%" PRIx64 " rpid=0 rzpr=0 rtlbx=0 rtlbsx=0 "
1849 "rtlblo=0 rtlbhi=0\n", env->edr);
1850 qemu_fprintf(f, "slr=%x shr=%x\n", env->slr, env->shr);
1851 for (i = 0; i < 32; i++) {
1852 qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1853 if ((i + 1) % 4 == 0)
1854 qemu_fprintf(f, "\n");
1855 }
1856 qemu_fprintf(f, "\n\n");
1857 }
1858
1859 void mb_tcg_init(void)
1860 {
1861 int i;
1862
1863 env_debug = tcg_global_mem_new_i32(cpu_env,
1864 offsetof(CPUMBState, debug),
1865 "debug0");
1866 env_iflags = tcg_global_mem_new_i32(cpu_env,
1867 offsetof(CPUMBState, iflags),
1868 "iflags");
1869 env_imm = tcg_global_mem_new_i32(cpu_env,
1870 offsetof(CPUMBState, imm),
1871 "imm");
1872 env_btarget = tcg_global_mem_new_i64(cpu_env,
1873 offsetof(CPUMBState, btarget),
1874 "btarget");
1875 env_btaken = tcg_global_mem_new_i32(cpu_env,
1876 offsetof(CPUMBState, btaken),
1877 "btaken");
1878 env_res_addr = tcg_global_mem_new(cpu_env,
1879 offsetof(CPUMBState, res_addr),
1880 "res_addr");
1881 env_res_val = tcg_global_mem_new_i32(cpu_env,
1882 offsetof(CPUMBState, res_val),
1883 "res_val");
1884 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1885 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1886 offsetof(CPUMBState, regs[i]),
1887 regnames[i]);
1888 }
1889
1890 cpu_pc =
1891 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, pc), "rpc");
1892 cpu_msr =
1893 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, msr), "rmsr");
1894 cpu_ear =
1895 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, ear), "rear");
1896 cpu_esr =
1897 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, esr), "resr");
1898 cpu_fsr =
1899 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, fsr), "rfsr");
1900 cpu_btr =
1901 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, btr), "rbtr");
1902 cpu_edr =
1903 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, edr), "redr");
1904 }
1905
1906 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1907 target_ulong *data)
1908 {
1909 env->pc = data[0];
1910 }