target/microblaze: Split out EDR from env->sregs
[qemu.git] / target / microblaze / translate.c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "qemu/qemu-print.h"
32
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35
36
37 #define SIM_COMPAT 0
38 #define DISAS_GNU 1
39 #define DISAS_MB 1
40 #if DISAS_MB && !SIM_COMPAT
41 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 #else
43 # define LOG_DIS(...) do { } while (0)
44 #endif
45
46 #define D(x)
47
48 #define EXTRACT_FIELD(src, start, end) \
49 (((src) >> start) & ((1 << (end - start + 1)) - 1))
50
51 /* is_jmp field values */
52 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
53 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
54 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55
56 static TCGv_i32 env_debug;
57 static TCGv_i32 cpu_R[32];
58 static TCGv_i64 cpu_SR[14];
59 static TCGv_i32 env_imm;
60 static TCGv_i32 env_btaken;
61 static TCGv_i64 env_btarget;
62 static TCGv_i32 env_iflags;
63 static TCGv env_res_addr;
64 static TCGv_i32 env_res_val;
65
66 #include "exec/gen-icount.h"
67
68 /* This is the state at translation time. */
69 typedef struct DisasContext {
70 MicroBlazeCPU *cpu;
71 uint32_t pc;
72
73 /* Decoder. */
74 int type_b;
75 uint32_t ir;
76 uint8_t opcode;
77 uint8_t rd, ra, rb;
78 uint16_t imm;
79
80 unsigned int cpustate_changed;
81 unsigned int delayed_branch;
82 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
83 unsigned int clear_imm;
84 int is_jmp;
85
86 #define JMP_NOJMP 0
87 #define JMP_DIRECT 1
88 #define JMP_DIRECT_CC 2
89 #define JMP_INDIRECT 3
90 unsigned int jmp;
91 uint32_t jmp_pc;
92
93 int abort_at_next_insn;
94 struct TranslationBlock *tb;
95 int singlestep_enabled;
96 } DisasContext;
97
98 static const char *regnames[] =
99 {
100 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
104 };
105
106 static inline void t_sync_flags(DisasContext *dc)
107 {
108 /* Synch the tb dependent flags between translator and runtime. */
109 if (dc->tb_flags != dc->synced_flags) {
110 tcg_gen_movi_i32(env_iflags, dc->tb_flags);
111 dc->synced_flags = dc->tb_flags;
112 }
113 }
114
115 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
116 {
117 TCGv_i32 tmp = tcg_const_i32(index);
118
119 t_sync_flags(dc);
120 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
121 gen_helper_raise_exception(cpu_env, tmp);
122 tcg_temp_free_i32(tmp);
123 dc->is_jmp = DISAS_UPDATE;
124 }
125
126 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
127 {
128 #ifndef CONFIG_USER_ONLY
129 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
130 #else
131 return true;
132 #endif
133 }
134
135 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
136 {
137 if (use_goto_tb(dc, dest)) {
138 tcg_gen_goto_tb(n);
139 tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
140 tcg_gen_exit_tb(dc->tb, n);
141 } else {
142 tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
143 tcg_gen_exit_tb(NULL, 0);
144 }
145 }
146
147 static void read_carry(DisasContext *dc, TCGv_i32 d)
148 {
149 tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
150 tcg_gen_shri_i32(d, d, 31);
151 }
152
153 /*
154 * write_carry sets the carry bits in MSR based on bit 0 of v.
155 * v[31:1] are ignored.
156 */
157 static void write_carry(DisasContext *dc, TCGv_i32 v)
158 {
159 TCGv_i64 t0 = tcg_temp_new_i64();
160 tcg_gen_extu_i32_i64(t0, v);
161 /* Deposit bit 0 into MSR_C and the alias MSR_CC. */
162 tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 2, 1);
163 tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 31, 1);
164 tcg_temp_free_i64(t0);
165 }
166
167 static void write_carryi(DisasContext *dc, bool carry)
168 {
169 TCGv_i32 t0 = tcg_temp_new_i32();
170 tcg_gen_movi_i32(t0, carry);
171 write_carry(dc, t0);
172 tcg_temp_free_i32(t0);
173 }
174
175 /*
176 * Returns true if the insn an illegal operation.
177 * If exceptions are enabled, an exception is raised.
178 */
179 static bool trap_illegal(DisasContext *dc, bool cond)
180 {
181 if (cond && (dc->tb_flags & MSR_EE_FLAG)
182 && dc->cpu->cfg.illegal_opcode_exception) {
183 tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
184 t_gen_raise_exception(dc, EXCP_HW_EXCP);
185 }
186 return cond;
187 }
188
189 /*
190 * Returns true if the insn is illegal in userspace.
191 * If exceptions are enabled, an exception is raised.
192 */
193 static bool trap_userspace(DisasContext *dc, bool cond)
194 {
195 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
196 bool cond_user = cond && mem_index == MMU_USER_IDX;
197
198 if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
199 tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
200 t_gen_raise_exception(dc, EXCP_HW_EXCP);
201 }
202 return cond_user;
203 }
204
205 /* True if ALU operand b is a small immediate that may deserve
206 faster treatment. */
207 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
208 {
209 /* Immediate insn without the imm prefix ? */
210 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
211 }
212
213 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
214 {
215 if (dc->type_b) {
216 if (dc->tb_flags & IMM_FLAG)
217 tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
218 else
219 tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
220 return &env_imm;
221 } else
222 return &cpu_R[dc->rb];
223 }
224
225 static void dec_add(DisasContext *dc)
226 {
227 unsigned int k, c;
228 TCGv_i32 cf;
229
230 k = dc->opcode & 4;
231 c = dc->opcode & 2;
232
233 LOG_DIS("add%s%s%s r%d r%d r%d\n",
234 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
235 dc->rd, dc->ra, dc->rb);
236
237 /* Take care of the easy cases first. */
238 if (k) {
239 /* k - keep carry, no need to update MSR. */
240 /* If rd == r0, it's a nop. */
241 if (dc->rd) {
242 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
243
244 if (c) {
245 /* c - Add carry into the result. */
246 cf = tcg_temp_new_i32();
247
248 read_carry(dc, cf);
249 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
250 tcg_temp_free_i32(cf);
251 }
252 }
253 return;
254 }
255
256 /* From now on, we can assume k is zero. So we need to update MSR. */
257 /* Extract carry. */
258 cf = tcg_temp_new_i32();
259 if (c) {
260 read_carry(dc, cf);
261 } else {
262 tcg_gen_movi_i32(cf, 0);
263 }
264
265 if (dc->rd) {
266 TCGv_i32 ncf = tcg_temp_new_i32();
267 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
268 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
269 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
270 write_carry(dc, ncf);
271 tcg_temp_free_i32(ncf);
272 } else {
273 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
274 write_carry(dc, cf);
275 }
276 tcg_temp_free_i32(cf);
277 }
278
279 static void dec_sub(DisasContext *dc)
280 {
281 unsigned int u, cmp, k, c;
282 TCGv_i32 cf, na;
283
284 u = dc->imm & 2;
285 k = dc->opcode & 4;
286 c = dc->opcode & 2;
287 cmp = (dc->imm & 1) && (!dc->type_b) && k;
288
289 if (cmp) {
290 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
291 if (dc->rd) {
292 if (u)
293 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
294 else
295 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
296 }
297 return;
298 }
299
300 LOG_DIS("sub%s%s r%d, r%d r%d\n",
301 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
302
303 /* Take care of the easy cases first. */
304 if (k) {
305 /* k - keep carry, no need to update MSR. */
306 /* If rd == r0, it's a nop. */
307 if (dc->rd) {
308 tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
309
310 if (c) {
311 /* c - Add carry into the result. */
312 cf = tcg_temp_new_i32();
313
314 read_carry(dc, cf);
315 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
316 tcg_temp_free_i32(cf);
317 }
318 }
319 return;
320 }
321
322 /* From now on, we can assume k is zero. So we need to update MSR. */
323 /* Extract carry. And complement a into na. */
324 cf = tcg_temp_new_i32();
325 na = tcg_temp_new_i32();
326 if (c) {
327 read_carry(dc, cf);
328 } else {
329 tcg_gen_movi_i32(cf, 1);
330 }
331
332 /* d = b + ~a + c. carry defaults to 1. */
333 tcg_gen_not_i32(na, cpu_R[dc->ra]);
334
335 if (dc->rd) {
336 TCGv_i32 ncf = tcg_temp_new_i32();
337 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
338 tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
339 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
340 write_carry(dc, ncf);
341 tcg_temp_free_i32(ncf);
342 } else {
343 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
344 write_carry(dc, cf);
345 }
346 tcg_temp_free_i32(cf);
347 tcg_temp_free_i32(na);
348 }
349
350 static void dec_pattern(DisasContext *dc)
351 {
352 unsigned int mode;
353
354 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
355 return;
356 }
357
358 mode = dc->opcode & 3;
359 switch (mode) {
360 case 0:
361 /* pcmpbf. */
362 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
363 if (dc->rd)
364 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
365 break;
366 case 2:
367 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
368 if (dc->rd) {
369 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
370 cpu_R[dc->ra], cpu_R[dc->rb]);
371 }
372 break;
373 case 3:
374 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
375 if (dc->rd) {
376 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
377 cpu_R[dc->ra], cpu_R[dc->rb]);
378 }
379 break;
380 default:
381 cpu_abort(CPU(dc->cpu),
382 "unsupported pattern insn opcode=%x\n", dc->opcode);
383 break;
384 }
385 }
386
387 static void dec_and(DisasContext *dc)
388 {
389 unsigned int not;
390
391 if (!dc->type_b && (dc->imm & (1 << 10))) {
392 dec_pattern(dc);
393 return;
394 }
395
396 not = dc->opcode & (1 << 1);
397 LOG_DIS("and%s\n", not ? "n" : "");
398
399 if (!dc->rd)
400 return;
401
402 if (not) {
403 tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
404 } else
405 tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
406 }
407
408 static void dec_or(DisasContext *dc)
409 {
410 if (!dc->type_b && (dc->imm & (1 << 10))) {
411 dec_pattern(dc);
412 return;
413 }
414
415 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
416 if (dc->rd)
417 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
418 }
419
420 static void dec_xor(DisasContext *dc)
421 {
422 if (!dc->type_b && (dc->imm & (1 << 10))) {
423 dec_pattern(dc);
424 return;
425 }
426
427 LOG_DIS("xor r%d\n", dc->rd);
428 if (dc->rd)
429 tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
430 }
431
432 static inline void msr_read(DisasContext *dc, TCGv_i32 d)
433 {
434 tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
435 }
436
437 static inline void msr_write(DisasContext *dc, TCGv_i32 v)
438 {
439 TCGv_i64 t;
440
441 t = tcg_temp_new_i64();
442 dc->cpustate_changed = 1;
443 /* PVR bit is not writable. */
444 tcg_gen_extu_i32_i64(t, v);
445 tcg_gen_andi_i64(t, t, ~MSR_PVR);
446 tcg_gen_andi_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
447 tcg_gen_or_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
448 tcg_temp_free_i64(t);
449 }
450
451 static void dec_msr(DisasContext *dc)
452 {
453 CPUState *cs = CPU(dc->cpu);
454 TCGv_i32 t0, t1;
455 unsigned int sr, rn;
456 bool to, clrset, extended = false;
457
458 sr = extract32(dc->imm, 0, 14);
459 to = extract32(dc->imm, 14, 1);
460 clrset = extract32(dc->imm, 15, 1) == 0;
461 dc->type_b = 1;
462 if (to) {
463 dc->cpustate_changed = 1;
464 }
465
466 /* Extended MSRs are only available if addr_size > 32. */
467 if (dc->cpu->cfg.addr_size > 32) {
468 /* The E-bit is encoded differently for To/From MSR. */
469 static const unsigned int e_bit[] = { 19, 24 };
470
471 extended = extract32(dc->imm, e_bit[to], 1);
472 }
473
474 /* msrclr and msrset. */
475 if (clrset) {
476 bool clr = extract32(dc->ir, 16, 1);
477
478 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
479 dc->rd, dc->imm);
480
481 if (!dc->cpu->cfg.use_msr_instr) {
482 /* nop??? */
483 return;
484 }
485
486 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
487 return;
488 }
489
490 if (dc->rd)
491 msr_read(dc, cpu_R[dc->rd]);
492
493 t0 = tcg_temp_new_i32();
494 t1 = tcg_temp_new_i32();
495 msr_read(dc, t0);
496 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
497
498 if (clr) {
499 tcg_gen_not_i32(t1, t1);
500 tcg_gen_and_i32(t0, t0, t1);
501 } else
502 tcg_gen_or_i32(t0, t0, t1);
503 msr_write(dc, t0);
504 tcg_temp_free_i32(t0);
505 tcg_temp_free_i32(t1);
506 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
507 dc->is_jmp = DISAS_UPDATE;
508 return;
509 }
510
511 if (trap_userspace(dc, to)) {
512 return;
513 }
514
515 #if !defined(CONFIG_USER_ONLY)
516 /* Catch read/writes to the mmu block. */
517 if ((sr & ~0xff) == 0x1000) {
518 TCGv_i32 tmp_ext = tcg_const_i32(extended);
519 TCGv_i32 tmp_sr;
520
521 sr &= 7;
522 tmp_sr = tcg_const_i32(sr);
523 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
524 if (to) {
525 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
526 } else {
527 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
528 }
529 tcg_temp_free_i32(tmp_sr);
530 tcg_temp_free_i32(tmp_ext);
531 return;
532 }
533 #endif
534
535 if (to) {
536 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
537 switch (sr) {
538 case 0:
539 break;
540 case 1:
541 msr_write(dc, cpu_R[dc->ra]);
542 break;
543 case SR_EAR:
544 case SR_ESR:
545 case SR_FSR:
546 tcg_gen_extu_i32_i64(cpu_SR[sr], cpu_R[dc->ra]);
547 break;
548 case 0x800:
549 tcg_gen_st_i32(cpu_R[dc->ra],
550 cpu_env, offsetof(CPUMBState, slr));
551 break;
552 case 0x802:
553 tcg_gen_st_i32(cpu_R[dc->ra],
554 cpu_env, offsetof(CPUMBState, shr));
555 break;
556 default:
557 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
558 break;
559 }
560 } else {
561 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
562
563 switch (sr) {
564 case 0:
565 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
566 break;
567 case 1:
568 msr_read(dc, cpu_R[dc->rd]);
569 break;
570 case SR_EAR:
571 if (extended) {
572 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
573 break;
574 }
575 case SR_ESR:
576 case SR_FSR:
577 case SR_BTR:
578 case SR_EDR:
579 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
580 break;
581 case 0x800:
582 tcg_gen_ld_i32(cpu_R[dc->rd],
583 cpu_env, offsetof(CPUMBState, slr));
584 break;
585 case 0x802:
586 tcg_gen_ld_i32(cpu_R[dc->rd],
587 cpu_env, offsetof(CPUMBState, shr));
588 break;
589 case 0x2000 ... 0x200c:
590 rn = sr & 0xf;
591 tcg_gen_ld_i32(cpu_R[dc->rd],
592 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
593 break;
594 default:
595 cpu_abort(cs, "unknown mfs reg %x\n", sr);
596 break;
597 }
598 }
599
600 if (dc->rd == 0) {
601 tcg_gen_movi_i32(cpu_R[0], 0);
602 }
603 }
604
605 /* Multiplier unit. */
606 static void dec_mul(DisasContext *dc)
607 {
608 TCGv_i32 tmp;
609 unsigned int subcode;
610
611 if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
612 return;
613 }
614
615 subcode = dc->imm & 3;
616
617 if (dc->type_b) {
618 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
619 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
620 return;
621 }
622
623 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
624 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
625 /* nop??? */
626 }
627
628 tmp = tcg_temp_new_i32();
629 switch (subcode) {
630 case 0:
631 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
632 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
633 break;
634 case 1:
635 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
636 tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
637 cpu_R[dc->ra], cpu_R[dc->rb]);
638 break;
639 case 2:
640 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
641 tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
642 cpu_R[dc->ra], cpu_R[dc->rb]);
643 break;
644 case 3:
645 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
646 tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
647 break;
648 default:
649 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
650 break;
651 }
652 tcg_temp_free_i32(tmp);
653 }
654
655 /* Div unit. */
656 static void dec_div(DisasContext *dc)
657 {
658 unsigned int u;
659
660 u = dc->imm & 2;
661 LOG_DIS("div\n");
662
663 if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
664 return;
665 }
666
667 if (u)
668 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
669 cpu_R[dc->ra]);
670 else
671 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
672 cpu_R[dc->ra]);
673 if (!dc->rd)
674 tcg_gen_movi_i32(cpu_R[dc->rd], 0);
675 }
676
677 static void dec_barrel(DisasContext *dc)
678 {
679 TCGv_i32 t0;
680 unsigned int imm_w, imm_s;
681 bool s, t, e = false, i = false;
682
683 if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
684 return;
685 }
686
687 if (dc->type_b) {
688 /* Insert and extract are only available in immediate mode. */
689 i = extract32(dc->imm, 15, 1);
690 e = extract32(dc->imm, 14, 1);
691 }
692 s = extract32(dc->imm, 10, 1);
693 t = extract32(dc->imm, 9, 1);
694 imm_w = extract32(dc->imm, 6, 5);
695 imm_s = extract32(dc->imm, 0, 5);
696
697 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
698 e ? "e" : "",
699 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
700
701 if (e) {
702 if (imm_w + imm_s > 32 || imm_w == 0) {
703 /* These inputs have an undefined behavior. */
704 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
705 imm_w, imm_s);
706 } else {
707 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
708 }
709 } else if (i) {
710 int width = imm_w - imm_s + 1;
711
712 if (imm_w < imm_s) {
713 /* These inputs have an undefined behavior. */
714 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
715 imm_w, imm_s);
716 } else {
717 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
718 imm_s, width);
719 }
720 } else {
721 t0 = tcg_temp_new_i32();
722
723 tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
724 tcg_gen_andi_i32(t0, t0, 31);
725
726 if (s) {
727 tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
728 } else {
729 if (t) {
730 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
731 } else {
732 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
733 }
734 }
735 tcg_temp_free_i32(t0);
736 }
737 }
738
739 static void dec_bit(DisasContext *dc)
740 {
741 CPUState *cs = CPU(dc->cpu);
742 TCGv_i32 t0;
743 unsigned int op;
744
745 op = dc->ir & ((1 << 9) - 1);
746 switch (op) {
747 case 0x21:
748 /* src. */
749 t0 = tcg_temp_new_i32();
750
751 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
752 tcg_gen_extrl_i64_i32(t0, cpu_SR[SR_MSR]);
753 tcg_gen_andi_i32(t0, t0, MSR_CC);
754 write_carry(dc, cpu_R[dc->ra]);
755 if (dc->rd) {
756 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
757 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
758 }
759 tcg_temp_free_i32(t0);
760 break;
761
762 case 0x1:
763 case 0x41:
764 /* srl. */
765 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
766
767 /* Update carry. Note that write carry only looks at the LSB. */
768 write_carry(dc, cpu_R[dc->ra]);
769 if (dc->rd) {
770 if (op == 0x41)
771 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
772 else
773 tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
774 }
775 break;
776 case 0x60:
777 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
778 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
779 break;
780 case 0x61:
781 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
782 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
783 break;
784 case 0x64:
785 case 0x66:
786 case 0x74:
787 case 0x76:
788 /* wdc. */
789 LOG_DIS("wdc r%d\n", dc->ra);
790 trap_userspace(dc, true);
791 break;
792 case 0x68:
793 /* wic. */
794 LOG_DIS("wic r%d\n", dc->ra);
795 trap_userspace(dc, true);
796 break;
797 case 0xe0:
798 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
799 return;
800 }
801 if (dc->cpu->cfg.use_pcmp_instr) {
802 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
803 }
804 break;
805 case 0x1e0:
806 /* swapb */
807 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
808 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
809 break;
810 case 0x1e2:
811 /*swaph */
812 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
813 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
814 break;
815 default:
816 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
817 dc->pc, op, dc->rd, dc->ra, dc->rb);
818 break;
819 }
820 }
821
822 static inline void sync_jmpstate(DisasContext *dc)
823 {
824 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
825 if (dc->jmp == JMP_DIRECT) {
826 tcg_gen_movi_i32(env_btaken, 1);
827 }
828 dc->jmp = JMP_INDIRECT;
829 tcg_gen_movi_i64(env_btarget, dc->jmp_pc);
830 }
831 }
832
833 static void dec_imm(DisasContext *dc)
834 {
835 LOG_DIS("imm %x\n", dc->imm << 16);
836 tcg_gen_movi_i32(env_imm, (dc->imm << 16));
837 dc->tb_flags |= IMM_FLAG;
838 dc->clear_imm = 0;
839 }
840
841 static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
842 {
843 bool extimm = dc->tb_flags & IMM_FLAG;
844 /* Should be set to true if r1 is used by loadstores. */
845 bool stackprot = false;
846 TCGv_i32 t32;
847
848 /* All load/stores use ra. */
849 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
850 stackprot = true;
851 }
852
853 /* Treat the common cases first. */
854 if (!dc->type_b) {
855 if (ea) {
856 int addr_size = dc->cpu->cfg.addr_size;
857
858 if (addr_size == 32) {
859 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
860 return;
861 }
862
863 tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
864 if (addr_size < 64) {
865 /* Mask off out of range bits. */
866 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
867 }
868 return;
869 }
870
871 /* If any of the regs is r0, set t to the value of the other reg. */
872 if (dc->ra == 0) {
873 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
874 return;
875 } else if (dc->rb == 0) {
876 tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
877 return;
878 }
879
880 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
881 stackprot = true;
882 }
883
884 t32 = tcg_temp_new_i32();
885 tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
886 tcg_gen_extu_i32_tl(t, t32);
887 tcg_temp_free_i32(t32);
888
889 if (stackprot) {
890 gen_helper_stackprot(cpu_env, t);
891 }
892 return;
893 }
894 /* Immediate. */
895 t32 = tcg_temp_new_i32();
896 if (!extimm) {
897 tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
898 } else {
899 tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
900 }
901 tcg_gen_extu_i32_tl(t, t32);
902 tcg_temp_free_i32(t32);
903
904 if (stackprot) {
905 gen_helper_stackprot(cpu_env, t);
906 }
907 return;
908 }
909
910 static void dec_load(DisasContext *dc)
911 {
912 TCGv_i32 v;
913 TCGv addr;
914 unsigned int size;
915 bool rev = false, ex = false, ea = false;
916 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
917 MemOp mop;
918
919 mop = dc->opcode & 3;
920 size = 1 << mop;
921 if (!dc->type_b) {
922 ea = extract32(dc->ir, 7, 1);
923 rev = extract32(dc->ir, 9, 1);
924 ex = extract32(dc->ir, 10, 1);
925 }
926 mop |= MO_TE;
927 if (rev) {
928 mop ^= MO_BSWAP;
929 }
930
931 if (trap_illegal(dc, size > 4)) {
932 return;
933 }
934
935 if (trap_userspace(dc, ea)) {
936 return;
937 }
938
939 LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
940 ex ? "x" : "",
941 ea ? "ea" : "");
942
943 t_sync_flags(dc);
944 addr = tcg_temp_new();
945 compute_ldst_addr(dc, ea, addr);
946 /* Extended addressing bypasses the MMU. */
947 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
948
949 /*
950 * When doing reverse accesses we need to do two things.
951 *
952 * 1. Reverse the address wrt endianness.
953 * 2. Byteswap the data lanes on the way back into the CPU core.
954 */
955 if (rev && size != 4) {
956 /* Endian reverse the address. t is addr. */
957 switch (size) {
958 case 1:
959 {
960 tcg_gen_xori_tl(addr, addr, 3);
961 break;
962 }
963
964 case 2:
965 /* 00 -> 10
966 10 -> 00. */
967 tcg_gen_xori_tl(addr, addr, 2);
968 break;
969 default:
970 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
971 break;
972 }
973 }
974
975 /* lwx does not throw unaligned access errors, so force alignment */
976 if (ex) {
977 tcg_gen_andi_tl(addr, addr, ~3);
978 }
979
980 /* If we get a fault on a dslot, the jmpstate better be in sync. */
981 sync_jmpstate(dc);
982
983 /* Verify alignment if needed. */
984 /*
985 * Microblaze gives MMU faults priority over faults due to
986 * unaligned addresses. That's why we speculatively do the load
987 * into v. If the load succeeds, we verify alignment of the
988 * address and if that succeeds we write into the destination reg.
989 */
990 v = tcg_temp_new_i32();
991 tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
992
993 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
994 TCGv_i32 t0 = tcg_const_i32(0);
995 TCGv_i32 treg = tcg_const_i32(dc->rd);
996 TCGv_i32 tsize = tcg_const_i32(size - 1);
997
998 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
999 gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
1000
1001 tcg_temp_free_i32(t0);
1002 tcg_temp_free_i32(treg);
1003 tcg_temp_free_i32(tsize);
1004 }
1005
1006 if (ex) {
1007 tcg_gen_mov_tl(env_res_addr, addr);
1008 tcg_gen_mov_i32(env_res_val, v);
1009 }
1010 if (dc->rd) {
1011 tcg_gen_mov_i32(cpu_R[dc->rd], v);
1012 }
1013 tcg_temp_free_i32(v);
1014
1015 if (ex) { /* lwx */
1016 /* no support for AXI exclusive so always clear C */
1017 write_carryi(dc, 0);
1018 }
1019
1020 tcg_temp_free(addr);
1021 }
1022
1023 static void dec_store(DisasContext *dc)
1024 {
1025 TCGv addr;
1026 TCGLabel *swx_skip = NULL;
1027 unsigned int size;
1028 bool rev = false, ex = false, ea = false;
1029 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1030 MemOp mop;
1031
1032 mop = dc->opcode & 3;
1033 size = 1 << mop;
1034 if (!dc->type_b) {
1035 ea = extract32(dc->ir, 7, 1);
1036 rev = extract32(dc->ir, 9, 1);
1037 ex = extract32(dc->ir, 10, 1);
1038 }
1039 mop |= MO_TE;
1040 if (rev) {
1041 mop ^= MO_BSWAP;
1042 }
1043
1044 if (trap_illegal(dc, size > 4)) {
1045 return;
1046 }
1047
1048 trap_userspace(dc, ea);
1049
1050 LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1051 ex ? "x" : "",
1052 ea ? "ea" : "");
1053 t_sync_flags(dc);
1054 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1055 sync_jmpstate(dc);
1056 /* SWX needs a temp_local. */
1057 addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1058 compute_ldst_addr(dc, ea, addr);
1059 /* Extended addressing bypasses the MMU. */
1060 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
1061
1062 if (ex) { /* swx */
1063 TCGv_i32 tval;
1064
1065 /* swx does not throw unaligned access errors, so force alignment */
1066 tcg_gen_andi_tl(addr, addr, ~3);
1067
1068 write_carryi(dc, 1);
1069 swx_skip = gen_new_label();
1070 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
1071
1072 /*
1073 * Compare the value loaded at lwx with current contents of
1074 * the reserved location.
1075 */
1076 tval = tcg_temp_new_i32();
1077
1078 tcg_gen_atomic_cmpxchg_i32(tval, addr, env_res_val,
1079 cpu_R[dc->rd], mem_index,
1080 mop);
1081
1082 tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1083 write_carryi(dc, 0);
1084 tcg_temp_free_i32(tval);
1085 }
1086
1087 if (rev && size != 4) {
1088 /* Endian reverse the address. t is addr. */
1089 switch (size) {
1090 case 1:
1091 {
1092 tcg_gen_xori_tl(addr, addr, 3);
1093 break;
1094 }
1095
1096 case 2:
1097 /* 00 -> 10
1098 10 -> 00. */
1099 /* Force addr into the temp. */
1100 tcg_gen_xori_tl(addr, addr, 2);
1101 break;
1102 default:
1103 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1104 break;
1105 }
1106 }
1107
1108 if (!ex) {
1109 tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1110 }
1111
1112 /* Verify alignment if needed. */
1113 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1114 TCGv_i32 t1 = tcg_const_i32(1);
1115 TCGv_i32 treg = tcg_const_i32(dc->rd);
1116 TCGv_i32 tsize = tcg_const_i32(size - 1);
1117
1118 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1119 /* FIXME: if the alignment is wrong, we should restore the value
1120 * in memory. One possible way to achieve this is to probe
1121 * the MMU prior to the memaccess, thay way we could put
1122 * the alignment checks in between the probe and the mem
1123 * access.
1124 */
1125 gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
1126
1127 tcg_temp_free_i32(t1);
1128 tcg_temp_free_i32(treg);
1129 tcg_temp_free_i32(tsize);
1130 }
1131
1132 if (ex) {
1133 gen_set_label(swx_skip);
1134 }
1135
1136 tcg_temp_free(addr);
1137 }
1138
1139 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1140 TCGv_i32 d, TCGv_i32 a)
1141 {
1142 static const int mb_to_tcg_cc[] = {
1143 [CC_EQ] = TCG_COND_EQ,
1144 [CC_NE] = TCG_COND_NE,
1145 [CC_LT] = TCG_COND_LT,
1146 [CC_LE] = TCG_COND_LE,
1147 [CC_GE] = TCG_COND_GE,
1148 [CC_GT] = TCG_COND_GT,
1149 };
1150
1151 switch (cc) {
1152 case CC_EQ:
1153 case CC_NE:
1154 case CC_LT:
1155 case CC_LE:
1156 case CC_GE:
1157 case CC_GT:
1158 tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
1159 break;
1160 default:
1161 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1162 break;
1163 }
1164 }
1165
1166 static void eval_cond_jmp(DisasContext *dc, TCGv_i64 pc_true, TCGv_i64 pc_false)
1167 {
1168 TCGv_i64 tmp_btaken = tcg_temp_new_i64();
1169 TCGv_i64 tmp_zero = tcg_const_i64(0);
1170
1171 tcg_gen_extu_i32_i64(tmp_btaken, env_btaken);
1172 tcg_gen_movcond_i64(TCG_COND_NE, cpu_SR[SR_PC],
1173 tmp_btaken, tmp_zero,
1174 pc_true, pc_false);
1175
1176 tcg_temp_free_i64(tmp_btaken);
1177 tcg_temp_free_i64(tmp_zero);
1178 }
1179
1180 static void dec_setup_dslot(DisasContext *dc)
1181 {
1182 TCGv_i32 tmp = tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG));
1183
1184 dc->delayed_branch = 2;
1185 dc->tb_flags |= D_FLAG;
1186
1187 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, bimm));
1188 tcg_temp_free_i32(tmp);
1189 }
1190
1191 static void dec_bcc(DisasContext *dc)
1192 {
1193 unsigned int cc;
1194 unsigned int dslot;
1195
1196 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1197 dslot = dc->ir & (1 << 25);
1198 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1199
1200 dc->delayed_branch = 1;
1201 if (dslot) {
1202 dec_setup_dslot(dc);
1203 }
1204
1205 if (dec_alu_op_b_is_small_imm(dc)) {
1206 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1207
1208 tcg_gen_movi_i64(env_btarget, dc->pc + offset);
1209 dc->jmp = JMP_DIRECT_CC;
1210 dc->jmp_pc = dc->pc + offset;
1211 } else {
1212 dc->jmp = JMP_INDIRECT;
1213 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1214 tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1215 tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1216 }
1217 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra]);
1218 }
1219
1220 static void dec_br(DisasContext *dc)
1221 {
1222 unsigned int dslot, link, abs, mbar;
1223
1224 dslot = dc->ir & (1 << 20);
1225 abs = dc->ir & (1 << 19);
1226 link = dc->ir & (1 << 18);
1227
1228 /* Memory barrier. */
1229 mbar = (dc->ir >> 16) & 31;
1230 if (mbar == 2 && dc->imm == 4) {
1231 uint16_t mbar_imm = dc->rd;
1232
1233 LOG_DIS("mbar %d\n", mbar_imm);
1234
1235 /* Data access memory barrier. */
1236 if ((mbar_imm & 2) == 0) {
1237 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1238 }
1239
1240 /* mbar IMM & 16 decodes to sleep. */
1241 if (mbar_imm & 16) {
1242 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1243 TCGv_i32 tmp_1 = tcg_const_i32(1);
1244
1245 LOG_DIS("sleep\n");
1246
1247 if (trap_userspace(dc, true)) {
1248 /* Sleep is a privileged instruction. */
1249 return;
1250 }
1251
1252 t_sync_flags(dc);
1253 tcg_gen_st_i32(tmp_1, cpu_env,
1254 -offsetof(MicroBlazeCPU, env)
1255 +offsetof(CPUState, halted));
1256 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
1257 gen_helper_raise_exception(cpu_env, tmp_hlt);
1258 tcg_temp_free_i32(tmp_hlt);
1259 tcg_temp_free_i32(tmp_1);
1260 return;
1261 }
1262 /* Break the TB. */
1263 dc->cpustate_changed = 1;
1264 return;
1265 }
1266
1267 LOG_DIS("br%s%s%s%s imm=%x\n",
1268 abs ? "a" : "", link ? "l" : "",
1269 dc->type_b ? "i" : "", dslot ? "d" : "",
1270 dc->imm);
1271
1272 dc->delayed_branch = 1;
1273 if (dslot) {
1274 dec_setup_dslot(dc);
1275 }
1276 if (link && dc->rd)
1277 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1278
1279 dc->jmp = JMP_INDIRECT;
1280 if (abs) {
1281 tcg_gen_movi_i32(env_btaken, 1);
1282 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1283 if (link && !dslot) {
1284 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1285 t_gen_raise_exception(dc, EXCP_BREAK);
1286 if (dc->imm == 0) {
1287 if (trap_userspace(dc, true)) {
1288 return;
1289 }
1290
1291 t_gen_raise_exception(dc, EXCP_DEBUG);
1292 }
1293 }
1294 } else {
1295 if (dec_alu_op_b_is_small_imm(dc)) {
1296 dc->jmp = JMP_DIRECT;
1297 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1298 } else {
1299 tcg_gen_movi_i32(env_btaken, 1);
1300 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1301 tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1302 tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1303 }
1304 }
1305 }
1306
1307 static inline void do_rti(DisasContext *dc)
1308 {
1309 TCGv_i32 t0, t1;
1310 t0 = tcg_temp_new_i32();
1311 t1 = tcg_temp_new_i32();
1312 tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1313 tcg_gen_shri_i32(t0, t1, 1);
1314 tcg_gen_ori_i32(t1, t1, MSR_IE);
1315 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1316
1317 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1318 tcg_gen_or_i32(t1, t1, t0);
1319 msr_write(dc, t1);
1320 tcg_temp_free_i32(t1);
1321 tcg_temp_free_i32(t0);
1322 dc->tb_flags &= ~DRTI_FLAG;
1323 }
1324
1325 static inline void do_rtb(DisasContext *dc)
1326 {
1327 TCGv_i32 t0, t1;
1328 t0 = tcg_temp_new_i32();
1329 t1 = tcg_temp_new_i32();
1330 tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1331 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
1332 tcg_gen_shri_i32(t0, t1, 1);
1333 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1334
1335 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1336 tcg_gen_or_i32(t1, t1, t0);
1337 msr_write(dc, t1);
1338 tcg_temp_free_i32(t1);
1339 tcg_temp_free_i32(t0);
1340 dc->tb_flags &= ~DRTB_FLAG;
1341 }
1342
1343 static inline void do_rte(DisasContext *dc)
1344 {
1345 TCGv_i32 t0, t1;
1346 t0 = tcg_temp_new_i32();
1347 t1 = tcg_temp_new_i32();
1348
1349 tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1350 tcg_gen_ori_i32(t1, t1, MSR_EE);
1351 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1352 tcg_gen_shri_i32(t0, t1, 1);
1353 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1354
1355 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1356 tcg_gen_or_i32(t1, t1, t0);
1357 msr_write(dc, t1);
1358 tcg_temp_free_i32(t1);
1359 tcg_temp_free_i32(t0);
1360 dc->tb_flags &= ~DRTE_FLAG;
1361 }
1362
1363 static void dec_rts(DisasContext *dc)
1364 {
1365 unsigned int b_bit, i_bit, e_bit;
1366 TCGv_i64 tmp64;
1367
1368 i_bit = dc->ir & (1 << 21);
1369 b_bit = dc->ir & (1 << 22);
1370 e_bit = dc->ir & (1 << 23);
1371
1372 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1373 return;
1374 }
1375
1376 dec_setup_dslot(dc);
1377
1378 if (i_bit) {
1379 LOG_DIS("rtid ir=%x\n", dc->ir);
1380 dc->tb_flags |= DRTI_FLAG;
1381 } else if (b_bit) {
1382 LOG_DIS("rtbd ir=%x\n", dc->ir);
1383 dc->tb_flags |= DRTB_FLAG;
1384 } else if (e_bit) {
1385 LOG_DIS("rted ir=%x\n", dc->ir);
1386 dc->tb_flags |= DRTE_FLAG;
1387 } else
1388 LOG_DIS("rts ir=%x\n", dc->ir);
1389
1390 dc->jmp = JMP_INDIRECT;
1391 tcg_gen_movi_i32(env_btaken, 1);
1392
1393 tmp64 = tcg_temp_new_i64();
1394 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1395 tcg_gen_extu_i32_i64(tmp64, cpu_R[dc->ra]);
1396 tcg_gen_add_i64(env_btarget, env_btarget, tmp64);
1397 tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1398 tcg_temp_free_i64(tmp64);
1399 }
1400
1401 static int dec_check_fpuv2(DisasContext *dc)
1402 {
1403 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1404 tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_FPU);
1405 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1406 }
1407 return (dc->cpu->cfg.use_fpu == 2) ? PVR2_USE_FPU2_MASK : 0;
1408 }
1409
1410 static void dec_fpu(DisasContext *dc)
1411 {
1412 unsigned int fpu_insn;
1413
1414 if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1415 return;
1416 }
1417
1418 fpu_insn = (dc->ir >> 7) & 7;
1419
1420 switch (fpu_insn) {
1421 case 0:
1422 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1423 cpu_R[dc->rb]);
1424 break;
1425
1426 case 1:
1427 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1428 cpu_R[dc->rb]);
1429 break;
1430
1431 case 2:
1432 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1433 cpu_R[dc->rb]);
1434 break;
1435
1436 case 3:
1437 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1438 cpu_R[dc->rb]);
1439 break;
1440
1441 case 4:
1442 switch ((dc->ir >> 4) & 7) {
1443 case 0:
1444 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1445 cpu_R[dc->ra], cpu_R[dc->rb]);
1446 break;
1447 case 1:
1448 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1449 cpu_R[dc->ra], cpu_R[dc->rb]);
1450 break;
1451 case 2:
1452 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1453 cpu_R[dc->ra], cpu_R[dc->rb]);
1454 break;
1455 case 3:
1456 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1457 cpu_R[dc->ra], cpu_R[dc->rb]);
1458 break;
1459 case 4:
1460 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1461 cpu_R[dc->ra], cpu_R[dc->rb]);
1462 break;
1463 case 5:
1464 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1465 cpu_R[dc->ra], cpu_R[dc->rb]);
1466 break;
1467 case 6:
1468 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1469 cpu_R[dc->ra], cpu_R[dc->rb]);
1470 break;
1471 default:
1472 qemu_log_mask(LOG_UNIMP,
1473 "unimplemented fcmp fpu_insn=%x pc=%x"
1474 " opc=%x\n",
1475 fpu_insn, dc->pc, dc->opcode);
1476 dc->abort_at_next_insn = 1;
1477 break;
1478 }
1479 break;
1480
1481 case 5:
1482 if (!dec_check_fpuv2(dc)) {
1483 return;
1484 }
1485 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1486 break;
1487
1488 case 6:
1489 if (!dec_check_fpuv2(dc)) {
1490 return;
1491 }
1492 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1493 break;
1494
1495 case 7:
1496 if (!dec_check_fpuv2(dc)) {
1497 return;
1498 }
1499 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1500 break;
1501
1502 default:
1503 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1504 " opc=%x\n",
1505 fpu_insn, dc->pc, dc->opcode);
1506 dc->abort_at_next_insn = 1;
1507 break;
1508 }
1509 }
1510
1511 static void dec_null(DisasContext *dc)
1512 {
1513 if (trap_illegal(dc, true)) {
1514 return;
1515 }
1516 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1517 dc->abort_at_next_insn = 1;
1518 }
1519
1520 /* Insns connected to FSL or AXI stream attached devices. */
1521 static void dec_stream(DisasContext *dc)
1522 {
1523 TCGv_i32 t_id, t_ctrl;
1524 int ctrl;
1525
1526 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1527 dc->type_b ? "" : "d", dc->imm);
1528
1529 if (trap_userspace(dc, true)) {
1530 return;
1531 }
1532
1533 t_id = tcg_temp_new_i32();
1534 if (dc->type_b) {
1535 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1536 ctrl = dc->imm >> 10;
1537 } else {
1538 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1539 ctrl = dc->imm >> 5;
1540 }
1541
1542 t_ctrl = tcg_const_i32(ctrl);
1543
1544 if (dc->rd == 0) {
1545 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1546 } else {
1547 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1548 }
1549 tcg_temp_free_i32(t_id);
1550 tcg_temp_free_i32(t_ctrl);
1551 }
1552
1553 static struct decoder_info {
1554 struct {
1555 uint32_t bits;
1556 uint32_t mask;
1557 };
1558 void (*dec)(DisasContext *dc);
1559 } decinfo[] = {
1560 {DEC_ADD, dec_add},
1561 {DEC_SUB, dec_sub},
1562 {DEC_AND, dec_and},
1563 {DEC_XOR, dec_xor},
1564 {DEC_OR, dec_or},
1565 {DEC_BIT, dec_bit},
1566 {DEC_BARREL, dec_barrel},
1567 {DEC_LD, dec_load},
1568 {DEC_ST, dec_store},
1569 {DEC_IMM, dec_imm},
1570 {DEC_BR, dec_br},
1571 {DEC_BCC, dec_bcc},
1572 {DEC_RTS, dec_rts},
1573 {DEC_FPU, dec_fpu},
1574 {DEC_MUL, dec_mul},
1575 {DEC_DIV, dec_div},
1576 {DEC_MSR, dec_msr},
1577 {DEC_STREAM, dec_stream},
1578 {{0, 0}, dec_null}
1579 };
1580
1581 static inline void decode(DisasContext *dc, uint32_t ir)
1582 {
1583 int i;
1584
1585 dc->ir = ir;
1586 LOG_DIS("%8.8x\t", dc->ir);
1587
1588 if (ir == 0) {
1589 trap_illegal(dc, dc->cpu->cfg.opcode_0_illegal);
1590 /* Don't decode nop/zero instructions any further. */
1591 return;
1592 }
1593
1594 /* bit 2 seems to indicate insn type. */
1595 dc->type_b = ir & (1 << 29);
1596
1597 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1598 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1599 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1600 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1601 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1602
1603 /* Large switch for all insns. */
1604 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1605 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1606 decinfo[i].dec(dc);
1607 break;
1608 }
1609 }
1610 }
1611
1612 /* generate intermediate code for basic block 'tb'. */
1613 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1614 {
1615 CPUMBState *env = cs->env_ptr;
1616 MicroBlazeCPU *cpu = env_archcpu(env);
1617 uint32_t pc_start;
1618 struct DisasContext ctx;
1619 struct DisasContext *dc = &ctx;
1620 uint32_t page_start, org_flags;
1621 uint32_t npc;
1622 int num_insns;
1623
1624 pc_start = tb->pc;
1625 dc->cpu = cpu;
1626 dc->tb = tb;
1627 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1628
1629 dc->is_jmp = DISAS_NEXT;
1630 dc->jmp = 0;
1631 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1632 if (dc->delayed_branch) {
1633 dc->jmp = JMP_INDIRECT;
1634 }
1635 dc->pc = pc_start;
1636 dc->singlestep_enabled = cs->singlestep_enabled;
1637 dc->cpustate_changed = 0;
1638 dc->abort_at_next_insn = 0;
1639
1640 if (pc_start & 3) {
1641 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1642 }
1643
1644 page_start = pc_start & TARGET_PAGE_MASK;
1645 num_insns = 0;
1646
1647 gen_tb_start(tb);
1648 do
1649 {
1650 tcg_gen_insn_start(dc->pc);
1651 num_insns++;
1652
1653 #if SIM_COMPAT
1654 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1655 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1656 gen_helper_debug();
1657 }
1658 #endif
1659
1660 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1661 t_gen_raise_exception(dc, EXCP_DEBUG);
1662 dc->is_jmp = DISAS_UPDATE;
1663 /* The address covered by the breakpoint must be included in
1664 [tb->pc, tb->pc + tb->size) in order to for it to be
1665 properly cleared -- thus we increment the PC here so that
1666 the logic setting tb->size below does the right thing. */
1667 dc->pc += 4;
1668 break;
1669 }
1670
1671 /* Pretty disas. */
1672 LOG_DIS("%8.8x:\t", dc->pc);
1673
1674 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1675 gen_io_start();
1676 }
1677
1678 dc->clear_imm = 1;
1679 decode(dc, cpu_ldl_code(env, dc->pc));
1680 if (dc->clear_imm)
1681 dc->tb_flags &= ~IMM_FLAG;
1682 dc->pc += 4;
1683
1684 if (dc->delayed_branch) {
1685 dc->delayed_branch--;
1686 if (!dc->delayed_branch) {
1687 if (dc->tb_flags & DRTI_FLAG)
1688 do_rti(dc);
1689 if (dc->tb_flags & DRTB_FLAG)
1690 do_rtb(dc);
1691 if (dc->tb_flags & DRTE_FLAG)
1692 do_rte(dc);
1693 /* Clear the delay slot flag. */
1694 dc->tb_flags &= ~D_FLAG;
1695 /* If it is a direct jump, try direct chaining. */
1696 if (dc->jmp == JMP_INDIRECT) {
1697 TCGv_i64 tmp_pc = tcg_const_i64(dc->pc);
1698 eval_cond_jmp(dc, env_btarget, tmp_pc);
1699 tcg_temp_free_i64(tmp_pc);
1700
1701 dc->is_jmp = DISAS_JUMP;
1702 } else if (dc->jmp == JMP_DIRECT) {
1703 t_sync_flags(dc);
1704 gen_goto_tb(dc, 0, dc->jmp_pc);
1705 dc->is_jmp = DISAS_TB_JUMP;
1706 } else if (dc->jmp == JMP_DIRECT_CC) {
1707 TCGLabel *l1 = gen_new_label();
1708 t_sync_flags(dc);
1709 /* Conditional jmp. */
1710 tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1711 gen_goto_tb(dc, 1, dc->pc);
1712 gen_set_label(l1);
1713 gen_goto_tb(dc, 0, dc->jmp_pc);
1714
1715 dc->is_jmp = DISAS_TB_JUMP;
1716 }
1717 break;
1718 }
1719 }
1720 if (cs->singlestep_enabled) {
1721 break;
1722 }
1723 } while (!dc->is_jmp && !dc->cpustate_changed
1724 && !tcg_op_buf_full()
1725 && !singlestep
1726 && (dc->pc - page_start < TARGET_PAGE_SIZE)
1727 && num_insns < max_insns);
1728
1729 npc = dc->pc;
1730 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1731 if (dc->tb_flags & D_FLAG) {
1732 dc->is_jmp = DISAS_UPDATE;
1733 tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1734 sync_jmpstate(dc);
1735 } else
1736 npc = dc->jmp_pc;
1737 }
1738
1739 /* Force an update if the per-tb cpu state has changed. */
1740 if (dc->is_jmp == DISAS_NEXT
1741 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1742 dc->is_jmp = DISAS_UPDATE;
1743 tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1744 }
1745 t_sync_flags(dc);
1746
1747 if (unlikely(cs->singlestep_enabled)) {
1748 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1749
1750 if (dc->is_jmp != DISAS_JUMP) {
1751 tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1752 }
1753 gen_helper_raise_exception(cpu_env, tmp);
1754 tcg_temp_free_i32(tmp);
1755 } else {
1756 switch(dc->is_jmp) {
1757 case DISAS_NEXT:
1758 gen_goto_tb(dc, 1, npc);
1759 break;
1760 default:
1761 case DISAS_JUMP:
1762 case DISAS_UPDATE:
1763 /* indicate that the hash table must be used
1764 to find the next TB */
1765 tcg_gen_exit_tb(NULL, 0);
1766 break;
1767 case DISAS_TB_JUMP:
1768 /* nothing more to generate */
1769 break;
1770 }
1771 }
1772 gen_tb_end(tb, num_insns);
1773
1774 tb->size = dc->pc - pc_start;
1775 tb->icount = num_insns;
1776
1777 #ifdef DEBUG_DISAS
1778 #if !SIM_COMPAT
1779 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1780 && qemu_log_in_addr_range(pc_start)) {
1781 FILE *logfile = qemu_log_lock();
1782 qemu_log("--------------\n");
1783 log_target_disas(cs, pc_start, dc->pc - pc_start);
1784 qemu_log_unlock(logfile);
1785 }
1786 #endif
1787 #endif
1788 assert(!dc->abort_at_next_insn);
1789 }
1790
1791 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1792 {
1793 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1794 CPUMBState *env = &cpu->env;
1795 int i;
1796
1797 if (!env) {
1798 return;
1799 }
1800
1801 qemu_fprintf(f, "IN: PC=%" PRIx64 " %s\n",
1802 env->pc, lookup_symbol(env->pc));
1803 qemu_fprintf(f, "rmsr=%" PRIx64 " resr=%" PRIx64 " rear=%" PRIx64 " "
1804 "debug=%x imm=%x iflags=%x fsr=%" PRIx64 " "
1805 "rbtr=%" PRIx64 "\n",
1806 env->msr, env->esr, env->ear,
1807 env->debug, env->imm, env->iflags, env->fsr,
1808 env->btr);
1809 qemu_fprintf(f, "btaken=%d btarget=%" PRIx64 " mode=%s(saved=%s) "
1810 "eip=%d ie=%d\n",
1811 env->btaken, env->btarget,
1812 (env->msr & MSR_UM) ? "user" : "kernel",
1813 (env->msr & MSR_UMS) ? "user" : "kernel",
1814 (bool)(env->msr & MSR_EIP),
1815 (bool)(env->msr & MSR_IE));
1816 for (i = 0; i < 12; i++) {
1817 qemu_fprintf(f, "rpvr%2.2d=%8.8x ", i, env->pvr.regs[i]);
1818 if ((i + 1) % 4 == 0) {
1819 qemu_fprintf(f, "\n");
1820 }
1821 }
1822
1823 /* Registers that aren't modeled are reported as 0 */
1824 qemu_fprintf(f, "redr=%" PRIx64 " rpid=0 rzpr=0 rtlbx=0 rtlbsx=0 "
1825 "rtlblo=0 rtlbhi=0\n", env->edr);
1826 qemu_fprintf(f, "slr=%x shr=%x\n", env->slr, env->shr);
1827 for (i = 0; i < 32; i++) {
1828 qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1829 if ((i + 1) % 4 == 0)
1830 qemu_fprintf(f, "\n");
1831 }
1832 qemu_fprintf(f, "\n\n");
1833 }
1834
1835 void mb_tcg_init(void)
1836 {
1837 int i;
1838
1839 env_debug = tcg_global_mem_new_i32(cpu_env,
1840 offsetof(CPUMBState, debug),
1841 "debug0");
1842 env_iflags = tcg_global_mem_new_i32(cpu_env,
1843 offsetof(CPUMBState, iflags),
1844 "iflags");
1845 env_imm = tcg_global_mem_new_i32(cpu_env,
1846 offsetof(CPUMBState, imm),
1847 "imm");
1848 env_btarget = tcg_global_mem_new_i64(cpu_env,
1849 offsetof(CPUMBState, btarget),
1850 "btarget");
1851 env_btaken = tcg_global_mem_new_i32(cpu_env,
1852 offsetof(CPUMBState, btaken),
1853 "btaken");
1854 env_res_addr = tcg_global_mem_new(cpu_env,
1855 offsetof(CPUMBState, res_addr),
1856 "res_addr");
1857 env_res_val = tcg_global_mem_new_i32(cpu_env,
1858 offsetof(CPUMBState, res_val),
1859 "res_val");
1860 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1861 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1862 offsetof(CPUMBState, regs[i]),
1863 regnames[i]);
1864 }
1865
1866 cpu_SR[SR_PC] =
1867 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, pc), "rpc");
1868 cpu_SR[SR_MSR] =
1869 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, msr), "rmsr");
1870 cpu_SR[SR_EAR] =
1871 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, ear), "rear");
1872 cpu_SR[SR_ESR] =
1873 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, esr), "resr");
1874 cpu_SR[SR_FSR] =
1875 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, fsr), "rfsr");
1876 cpu_SR[SR_BTR] =
1877 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, btr), "rbtr");
1878 cpu_SR[SR_EDR] =
1879 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, edr), "redr");
1880 }
1881
1882 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1883 target_ulong *data)
1884 {
1885 env->pc = data[0];
1886 }