cpu: Move breakpoints field from CPU_COMMON to CPUState
[qemu.git] / target-sh4 / translate.c
1 /*
2 * SH4 translation
3 *
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #define DEBUG_DISAS
21 //#define SH4_SINGLE_STEP
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "tcg-op.h"
26
27 #include "helper.h"
28 #define GEN_HELPER 1
29 #include "helper.h"
30
31 typedef struct DisasContext {
32 struct TranslationBlock *tb;
33 target_ulong pc;
34 uint16_t opcode;
35 uint32_t flags;
36 int bstate;
37 int memidx;
38 uint32_t delayed_pc;
39 int singlestep_enabled;
40 uint32_t features;
41 int has_movcal;
42 } DisasContext;
43
44 #if defined(CONFIG_USER_ONLY)
45 #define IS_USER(ctx) 1
46 #else
47 #define IS_USER(ctx) (!(ctx->flags & SR_MD))
48 #endif
49
50 enum {
51 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
52 * exception condition
53 */
54 BS_STOP = 1, /* We want to stop translation for any reason */
55 BS_BRANCH = 2, /* We reached a branch condition */
56 BS_EXCP = 3, /* We reached an exception condition */
57 };
58
59 /* global register indexes */
60 static TCGv_ptr cpu_env;
61 static TCGv cpu_gregs[24];
62 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
63 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
64 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
65 static TCGv cpu_fregs[32];
66
67 /* internal register indexes */
68 static TCGv cpu_flags, cpu_delayed_pc;
69
70 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
71
72 #include "exec/gen-icount.h"
73
74 void sh4_translate_init(void)
75 {
76 int i;
77 static int done_init = 0;
78 static const char * const gregnames[24] = {
79 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
80 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
81 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
82 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
83 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
84 };
85 static const char * const fregnames[32] = {
86 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
87 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
88 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
89 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
90 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
91 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
92 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
93 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
94 };
95
96 if (done_init)
97 return;
98
99 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
100
101 for (i = 0; i < 24; i++)
102 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
103 offsetof(CPUSH4State, gregs[i]),
104 gregnames[i]);
105
106 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
107 offsetof(CPUSH4State, pc), "PC");
108 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
109 offsetof(CPUSH4State, sr), "SR");
110 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUSH4State, ssr), "SSR");
112 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
113 offsetof(CPUSH4State, spc), "SPC");
114 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUSH4State, gbr), "GBR");
116 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUSH4State, vbr), "VBR");
118 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUSH4State, sgr), "SGR");
120 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUSH4State, dbr), "DBR");
122 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUSH4State, mach), "MACH");
124 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUSH4State, macl), "MACL");
126 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUSH4State, pr), "PR");
128 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
129 offsetof(CPUSH4State, fpscr), "FPSCR");
130 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
131 offsetof(CPUSH4State, fpul), "FPUL");
132
133 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
134 offsetof(CPUSH4State, flags), "_flags_");
135 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
136 offsetof(CPUSH4State, delayed_pc),
137 "_delayed_pc_");
138 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
139 offsetof(CPUSH4State, ldst), "_ldst_");
140
141 for (i = 0; i < 32; i++)
142 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
143 offsetof(CPUSH4State, fregs[i]),
144 fregnames[i]);
145
146 done_init = 1;
147 }
148
149 void superh_cpu_dump_state(CPUState *cs, FILE *f,
150 fprintf_function cpu_fprintf, int flags)
151 {
152 SuperHCPU *cpu = SUPERH_CPU(cs);
153 CPUSH4State *env = &cpu->env;
154 int i;
155 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
156 env->pc, env->sr, env->pr, env->fpscr);
157 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
158 env->spc, env->ssr, env->gbr, env->vbr);
159 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
160 env->sgr, env->dbr, env->delayed_pc, env->fpul);
161 for (i = 0; i < 24; i += 4) {
162 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
163 i, env->gregs[i], i + 1, env->gregs[i + 1],
164 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
165 }
166 if (env->flags & DELAY_SLOT) {
167 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
168 env->delayed_pc);
169 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
170 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
171 env->delayed_pc);
172 }
173 }
174
175 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
176 {
177 TranslationBlock *tb;
178 tb = ctx->tb;
179
180 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
181 !ctx->singlestep_enabled) {
182 /* Use a direct jump if in same page and singlestep not enabled */
183 tcg_gen_goto_tb(n);
184 tcg_gen_movi_i32(cpu_pc, dest);
185 tcg_gen_exit_tb((uintptr_t)tb + n);
186 } else {
187 tcg_gen_movi_i32(cpu_pc, dest);
188 if (ctx->singlestep_enabled)
189 gen_helper_debug(cpu_env);
190 tcg_gen_exit_tb(0);
191 }
192 }
193
194 static void gen_jump(DisasContext * ctx)
195 {
196 if (ctx->delayed_pc == (uint32_t) - 1) {
197 /* Target is not statically known, it comes necessarily from a
198 delayed jump as immediate jump are conditinal jumps */
199 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
200 if (ctx->singlestep_enabled)
201 gen_helper_debug(cpu_env);
202 tcg_gen_exit_tb(0);
203 } else {
204 gen_goto_tb(ctx, 0, ctx->delayed_pc);
205 }
206 }
207
208 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
209 {
210 TCGv sr;
211 int label = gen_new_label();
212 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
213 sr = tcg_temp_new();
214 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
215 tcg_gen_brcondi_i32(t ? TCG_COND_EQ:TCG_COND_NE, sr, 0, label);
216 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
217 gen_set_label(label);
218 }
219
220 /* Immediate conditional jump (bt or bf) */
221 static void gen_conditional_jump(DisasContext * ctx,
222 target_ulong ift, target_ulong ifnott)
223 {
224 int l1;
225 TCGv sr;
226
227 l1 = gen_new_label();
228 sr = tcg_temp_new();
229 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
230 tcg_gen_brcondi_i32(TCG_COND_NE, sr, 0, l1);
231 gen_goto_tb(ctx, 0, ifnott);
232 gen_set_label(l1);
233 gen_goto_tb(ctx, 1, ift);
234 }
235
236 /* Delayed conditional jump (bt or bf) */
237 static void gen_delayed_conditional_jump(DisasContext * ctx)
238 {
239 int l1;
240 TCGv ds;
241
242 l1 = gen_new_label();
243 ds = tcg_temp_new();
244 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
245 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
246 gen_goto_tb(ctx, 1, ctx->pc + 2);
247 gen_set_label(l1);
248 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
249 gen_jump(ctx);
250 }
251
252 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
253 {
254 TCGv t;
255
256 t = tcg_temp_new();
257 tcg_gen_setcond_i32(cond, t, t1, t0);
258 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
259 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
260
261 tcg_temp_free(t);
262 }
263
264 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
265 {
266 TCGv t;
267
268 t = tcg_temp_new();
269 tcg_gen_setcondi_i32(cond, t, t0, imm);
270 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
271 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
272
273 tcg_temp_free(t);
274 }
275
276 static inline void gen_store_flags(uint32_t flags)
277 {
278 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
279 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
280 }
281
282 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
283 {
284 TCGv tmp = tcg_temp_new();
285
286 p0 &= 0x1f;
287 p1 &= 0x1f;
288
289 tcg_gen_andi_i32(tmp, t1, (1 << p1));
290 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
291 if (p0 < p1)
292 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
293 else if (p0 > p1)
294 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
295 tcg_gen_or_i32(t0, t0, tmp);
296
297 tcg_temp_free(tmp);
298 }
299
300 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
301 {
302 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
303 }
304
305 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
306 {
307 TCGv_i32 tmp = tcg_temp_new_i32();
308 tcg_gen_trunc_i64_i32(tmp, t);
309 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
310 tcg_gen_shri_i64(t, t, 32);
311 tcg_gen_trunc_i64_i32(tmp, t);
312 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
313 tcg_temp_free_i32(tmp);
314 }
315
316 #define B3_0 (ctx->opcode & 0xf)
317 #define B6_4 ((ctx->opcode >> 4) & 0x7)
318 #define B7_4 ((ctx->opcode >> 4) & 0xf)
319 #define B7_0 (ctx->opcode & 0xff)
320 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
321 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
322 (ctx->opcode & 0xfff))
323 #define B11_8 ((ctx->opcode >> 8) & 0xf)
324 #define B15_12 ((ctx->opcode >> 12) & 0xf)
325
326 #define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
327 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
328
329 #define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
330 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
331
332 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
333 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
334 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
335 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
336
337 #define CHECK_NOT_DELAY_SLOT \
338 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
339 { \
340 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
341 gen_helper_raise_slot_illegal_instruction(cpu_env); \
342 ctx->bstate = BS_BRANCH; \
343 return; \
344 }
345
346 #define CHECK_PRIVILEGED \
347 if (IS_USER(ctx)) { \
348 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
349 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
350 gen_helper_raise_slot_illegal_instruction(cpu_env); \
351 } else { \
352 gen_helper_raise_illegal_instruction(cpu_env); \
353 } \
354 ctx->bstate = BS_BRANCH; \
355 return; \
356 }
357
358 #define CHECK_FPU_ENABLED \
359 if (ctx->flags & SR_FD) { \
360 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
361 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
362 gen_helper_raise_slot_fpu_disable(cpu_env); \
363 } else { \
364 gen_helper_raise_fpu_disable(cpu_env); \
365 } \
366 ctx->bstate = BS_BRANCH; \
367 return; \
368 }
369
370 static void _decode_opc(DisasContext * ctx)
371 {
372 /* This code tries to make movcal emulation sufficiently
373 accurate for Linux purposes. This instruction writes
374 memory, and prior to that, always allocates a cache line.
375 It is used in two contexts:
376 - in memcpy, where data is copied in blocks, the first write
377 of to a block uses movca.l for performance.
378 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
379 to flush the cache. Here, the data written by movcal.l is never
380 written to memory, and the data written is just bogus.
381
382 To simulate this, we simulate movcal.l, we store the value to memory,
383 but we also remember the previous content. If we see ocbi, we check
384 if movcal.l for that address was done previously. If so, the write should
385 not have hit the memory, so we restore the previous content.
386 When we see an instruction that is neither movca.l
387 nor ocbi, the previous content is discarded.
388
389 To optimize, we only try to flush stores when we're at the start of
390 TB, or if we already saw movca.l in this TB and did not flush stores
391 yet. */
392 if (ctx->has_movcal)
393 {
394 int opcode = ctx->opcode & 0xf0ff;
395 if (opcode != 0x0093 /* ocbi */
396 && opcode != 0x00c3 /* movca.l */)
397 {
398 gen_helper_discard_movcal_backup(cpu_env);
399 ctx->has_movcal = 0;
400 }
401 }
402
403 #if 0
404 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
405 #endif
406
407 switch (ctx->opcode) {
408 case 0x0019: /* div0u */
409 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
410 return;
411 case 0x000b: /* rts */
412 CHECK_NOT_DELAY_SLOT
413 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
414 ctx->flags |= DELAY_SLOT;
415 ctx->delayed_pc = (uint32_t) - 1;
416 return;
417 case 0x0028: /* clrmac */
418 tcg_gen_movi_i32(cpu_mach, 0);
419 tcg_gen_movi_i32(cpu_macl, 0);
420 return;
421 case 0x0048: /* clrs */
422 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
423 return;
424 case 0x0008: /* clrt */
425 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
426 return;
427 case 0x0038: /* ldtlb */
428 CHECK_PRIVILEGED
429 gen_helper_ldtlb(cpu_env);
430 return;
431 case 0x002b: /* rte */
432 CHECK_PRIVILEGED
433 CHECK_NOT_DELAY_SLOT
434 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
435 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
436 ctx->flags |= DELAY_SLOT;
437 ctx->delayed_pc = (uint32_t) - 1;
438 return;
439 case 0x0058: /* sets */
440 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
441 return;
442 case 0x0018: /* sett */
443 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
444 return;
445 case 0xfbfd: /* frchg */
446 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
447 ctx->bstate = BS_STOP;
448 return;
449 case 0xf3fd: /* fschg */
450 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
451 ctx->bstate = BS_STOP;
452 return;
453 case 0x0009: /* nop */
454 return;
455 case 0x001b: /* sleep */
456 CHECK_PRIVILEGED
457 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
458 gen_helper_sleep(cpu_env);
459 return;
460 }
461
462 switch (ctx->opcode & 0xf000) {
463 case 0x1000: /* mov.l Rm,@(disp,Rn) */
464 {
465 TCGv addr = tcg_temp_new();
466 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
467 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
468 tcg_temp_free(addr);
469 }
470 return;
471 case 0x5000: /* mov.l @(disp,Rm),Rn */
472 {
473 TCGv addr = tcg_temp_new();
474 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
475 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
476 tcg_temp_free(addr);
477 }
478 return;
479 case 0xe000: /* mov #imm,Rn */
480 tcg_gen_movi_i32(REG(B11_8), B7_0s);
481 return;
482 case 0x9000: /* mov.w @(disp,PC),Rn */
483 {
484 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
485 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
486 tcg_temp_free(addr);
487 }
488 return;
489 case 0xd000: /* mov.l @(disp,PC),Rn */
490 {
491 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
492 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
493 tcg_temp_free(addr);
494 }
495 return;
496 case 0x7000: /* add #imm,Rn */
497 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
498 return;
499 case 0xa000: /* bra disp */
500 CHECK_NOT_DELAY_SLOT
501 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
502 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
503 ctx->flags |= DELAY_SLOT;
504 return;
505 case 0xb000: /* bsr disp */
506 CHECK_NOT_DELAY_SLOT
507 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
508 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
509 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
510 ctx->flags |= DELAY_SLOT;
511 return;
512 }
513
514 switch (ctx->opcode & 0xf00f) {
515 case 0x6003: /* mov Rm,Rn */
516 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
517 return;
518 case 0x2000: /* mov.b Rm,@Rn */
519 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
520 return;
521 case 0x2001: /* mov.w Rm,@Rn */
522 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
523 return;
524 case 0x2002: /* mov.l Rm,@Rn */
525 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
526 return;
527 case 0x6000: /* mov.b @Rm,Rn */
528 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
529 return;
530 case 0x6001: /* mov.w @Rm,Rn */
531 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
532 return;
533 case 0x6002: /* mov.l @Rm,Rn */
534 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
535 return;
536 case 0x2004: /* mov.b Rm,@-Rn */
537 {
538 TCGv addr = tcg_temp_new();
539 tcg_gen_subi_i32(addr, REG(B11_8), 1);
540 /* might cause re-execution */
541 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
542 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
543 tcg_temp_free(addr);
544 }
545 return;
546 case 0x2005: /* mov.w Rm,@-Rn */
547 {
548 TCGv addr = tcg_temp_new();
549 tcg_gen_subi_i32(addr, REG(B11_8), 2);
550 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
551 tcg_gen_mov_i32(REG(B11_8), addr);
552 tcg_temp_free(addr);
553 }
554 return;
555 case 0x2006: /* mov.l Rm,@-Rn */
556 {
557 TCGv addr = tcg_temp_new();
558 tcg_gen_subi_i32(addr, REG(B11_8), 4);
559 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
560 tcg_gen_mov_i32(REG(B11_8), addr);
561 }
562 return;
563 case 0x6004: /* mov.b @Rm+,Rn */
564 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
565 if ( B11_8 != B7_4 )
566 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
567 return;
568 case 0x6005: /* mov.w @Rm+,Rn */
569 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
570 if ( B11_8 != B7_4 )
571 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
572 return;
573 case 0x6006: /* mov.l @Rm+,Rn */
574 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
575 if ( B11_8 != B7_4 )
576 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
577 return;
578 case 0x0004: /* mov.b Rm,@(R0,Rn) */
579 {
580 TCGv addr = tcg_temp_new();
581 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
582 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
583 tcg_temp_free(addr);
584 }
585 return;
586 case 0x0005: /* mov.w Rm,@(R0,Rn) */
587 {
588 TCGv addr = tcg_temp_new();
589 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
590 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
591 tcg_temp_free(addr);
592 }
593 return;
594 case 0x0006: /* mov.l Rm,@(R0,Rn) */
595 {
596 TCGv addr = tcg_temp_new();
597 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
598 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
599 tcg_temp_free(addr);
600 }
601 return;
602 case 0x000c: /* mov.b @(R0,Rm),Rn */
603 {
604 TCGv addr = tcg_temp_new();
605 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
606 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
607 tcg_temp_free(addr);
608 }
609 return;
610 case 0x000d: /* mov.w @(R0,Rm),Rn */
611 {
612 TCGv addr = tcg_temp_new();
613 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
614 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
615 tcg_temp_free(addr);
616 }
617 return;
618 case 0x000e: /* mov.l @(R0,Rm),Rn */
619 {
620 TCGv addr = tcg_temp_new();
621 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
622 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
623 tcg_temp_free(addr);
624 }
625 return;
626 case 0x6008: /* swap.b Rm,Rn */
627 {
628 TCGv high, low;
629 high = tcg_temp_new();
630 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
631 low = tcg_temp_new();
632 tcg_gen_ext16u_i32(low, REG(B7_4));
633 tcg_gen_bswap16_i32(low, low);
634 tcg_gen_or_i32(REG(B11_8), high, low);
635 tcg_temp_free(low);
636 tcg_temp_free(high);
637 }
638 return;
639 case 0x6009: /* swap.w Rm,Rn */
640 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
641 return;
642 case 0x200d: /* xtrct Rm,Rn */
643 {
644 TCGv high, low;
645 high = tcg_temp_new();
646 tcg_gen_shli_i32(high, REG(B7_4), 16);
647 low = tcg_temp_new();
648 tcg_gen_shri_i32(low, REG(B11_8), 16);
649 tcg_gen_or_i32(REG(B11_8), high, low);
650 tcg_temp_free(low);
651 tcg_temp_free(high);
652 }
653 return;
654 case 0x300c: /* add Rm,Rn */
655 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
656 return;
657 case 0x300e: /* addc Rm,Rn */
658 {
659 TCGv t0, t1, t2;
660 t0 = tcg_temp_new();
661 tcg_gen_andi_i32(t0, cpu_sr, SR_T);
662 t1 = tcg_temp_new();
663 tcg_gen_add_i32(t1, REG(B7_4), REG(B11_8));
664 tcg_gen_add_i32(t0, t0, t1);
665 t2 = tcg_temp_new();
666 tcg_gen_setcond_i32(TCG_COND_GTU, t2, REG(B11_8), t1);
667 tcg_gen_setcond_i32(TCG_COND_GTU, t1, t1, t0);
668 tcg_gen_or_i32(t1, t1, t2);
669 tcg_temp_free(t2);
670 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
671 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
672 tcg_temp_free(t1);
673 tcg_gen_mov_i32(REG(B11_8), t0);
674 tcg_temp_free(t0);
675 }
676 return;
677 case 0x300f: /* addv Rm,Rn */
678 {
679 TCGv t0, t1, t2;
680 t0 = tcg_temp_new();
681 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
682 t1 = tcg_temp_new();
683 tcg_gen_xor_i32(t1, t0, REG(B11_8));
684 t2 = tcg_temp_new();
685 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
686 tcg_gen_andc_i32(t1, t1, t2);
687 tcg_temp_free(t2);
688 tcg_gen_shri_i32(t1, t1, 31);
689 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
690 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
691 tcg_temp_free(t1);
692 tcg_gen_mov_i32(REG(B7_4), t0);
693 tcg_temp_free(t0);
694 }
695 return;
696 case 0x2009: /* and Rm,Rn */
697 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
698 return;
699 case 0x3000: /* cmp/eq Rm,Rn */
700 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
701 return;
702 case 0x3003: /* cmp/ge Rm,Rn */
703 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
704 return;
705 case 0x3007: /* cmp/gt Rm,Rn */
706 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
707 return;
708 case 0x3006: /* cmp/hi Rm,Rn */
709 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
710 return;
711 case 0x3002: /* cmp/hs Rm,Rn */
712 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
713 return;
714 case 0x200c: /* cmp/str Rm,Rn */
715 {
716 TCGv cmp1 = tcg_temp_new();
717 TCGv cmp2 = tcg_temp_new();
718 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
719 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
720 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
721 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
722 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
723 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
724 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
725 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
726 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
727 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
728 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
729 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
730 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
731 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
732 tcg_temp_free(cmp2);
733 tcg_temp_free(cmp1);
734 }
735 return;
736 case 0x2007: /* div0s Rm,Rn */
737 {
738 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
739 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
740 TCGv val = tcg_temp_new();
741 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
742 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
743 tcg_temp_free(val);
744 }
745 return;
746 case 0x3004: /* div1 Rm,Rn */
747 gen_helper_div1(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
748 return;
749 case 0x300d: /* dmuls.l Rm,Rn */
750 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
751 return;
752 case 0x3005: /* dmulu.l Rm,Rn */
753 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
754 return;
755 case 0x600e: /* exts.b Rm,Rn */
756 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
757 return;
758 case 0x600f: /* exts.w Rm,Rn */
759 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
760 return;
761 case 0x600c: /* extu.b Rm,Rn */
762 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
763 return;
764 case 0x600d: /* extu.w Rm,Rn */
765 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
766 return;
767 case 0x000f: /* mac.l @Rm+,@Rn+ */
768 {
769 TCGv arg0, arg1;
770 arg0 = tcg_temp_new();
771 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
772 arg1 = tcg_temp_new();
773 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
774 gen_helper_macl(cpu_env, arg0, arg1);
775 tcg_temp_free(arg1);
776 tcg_temp_free(arg0);
777 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
778 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
779 }
780 return;
781 case 0x400f: /* mac.w @Rm+,@Rn+ */
782 {
783 TCGv arg0, arg1;
784 arg0 = tcg_temp_new();
785 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
786 arg1 = tcg_temp_new();
787 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
788 gen_helper_macw(cpu_env, arg0, arg1);
789 tcg_temp_free(arg1);
790 tcg_temp_free(arg0);
791 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
792 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
793 }
794 return;
795 case 0x0007: /* mul.l Rm,Rn */
796 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
797 return;
798 case 0x200f: /* muls.w Rm,Rn */
799 {
800 TCGv arg0, arg1;
801 arg0 = tcg_temp_new();
802 tcg_gen_ext16s_i32(arg0, REG(B7_4));
803 arg1 = tcg_temp_new();
804 tcg_gen_ext16s_i32(arg1, REG(B11_8));
805 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
806 tcg_temp_free(arg1);
807 tcg_temp_free(arg0);
808 }
809 return;
810 case 0x200e: /* mulu.w Rm,Rn */
811 {
812 TCGv arg0, arg1;
813 arg0 = tcg_temp_new();
814 tcg_gen_ext16u_i32(arg0, REG(B7_4));
815 arg1 = tcg_temp_new();
816 tcg_gen_ext16u_i32(arg1, REG(B11_8));
817 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
818 tcg_temp_free(arg1);
819 tcg_temp_free(arg0);
820 }
821 return;
822 case 0x600b: /* neg Rm,Rn */
823 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
824 return;
825 case 0x600a: /* negc Rm,Rn */
826 {
827 TCGv t0, t1;
828 t0 = tcg_temp_new();
829 tcg_gen_neg_i32(t0, REG(B7_4));
830 t1 = tcg_temp_new();
831 tcg_gen_andi_i32(t1, cpu_sr, SR_T);
832 tcg_gen_sub_i32(REG(B11_8), t0, t1);
833 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
834 tcg_gen_setcondi_i32(TCG_COND_GTU, t1, t0, 0);
835 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
836 tcg_gen_setcond_i32(TCG_COND_GTU, t1, REG(B11_8), t0);
837 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
838 tcg_temp_free(t0);
839 tcg_temp_free(t1);
840 }
841 return;
842 case 0x6007: /* not Rm,Rn */
843 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
844 return;
845 case 0x200b: /* or Rm,Rn */
846 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
847 return;
848 case 0x400c: /* shad Rm,Rn */
849 {
850 int label1 = gen_new_label();
851 int label2 = gen_new_label();
852 int label3 = gen_new_label();
853 int label4 = gen_new_label();
854 TCGv shift;
855 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
856 /* Rm positive, shift to the left */
857 shift = tcg_temp_new();
858 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
859 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
860 tcg_temp_free(shift);
861 tcg_gen_br(label4);
862 /* Rm negative, shift to the right */
863 gen_set_label(label1);
864 shift = tcg_temp_new();
865 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
866 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
867 tcg_gen_not_i32(shift, REG(B7_4));
868 tcg_gen_andi_i32(shift, shift, 0x1f);
869 tcg_gen_addi_i32(shift, shift, 1);
870 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
871 tcg_temp_free(shift);
872 tcg_gen_br(label4);
873 /* Rm = -32 */
874 gen_set_label(label2);
875 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
876 tcg_gen_movi_i32(REG(B11_8), 0);
877 tcg_gen_br(label4);
878 gen_set_label(label3);
879 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
880 gen_set_label(label4);
881 }
882 return;
883 case 0x400d: /* shld Rm,Rn */
884 {
885 int label1 = gen_new_label();
886 int label2 = gen_new_label();
887 int label3 = gen_new_label();
888 TCGv shift;
889 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
890 /* Rm positive, shift to the left */
891 shift = tcg_temp_new();
892 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
893 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
894 tcg_temp_free(shift);
895 tcg_gen_br(label3);
896 /* Rm negative, shift to the right */
897 gen_set_label(label1);
898 shift = tcg_temp_new();
899 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
900 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
901 tcg_gen_not_i32(shift, REG(B7_4));
902 tcg_gen_andi_i32(shift, shift, 0x1f);
903 tcg_gen_addi_i32(shift, shift, 1);
904 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
905 tcg_temp_free(shift);
906 tcg_gen_br(label3);
907 /* Rm = -32 */
908 gen_set_label(label2);
909 tcg_gen_movi_i32(REG(B11_8), 0);
910 gen_set_label(label3);
911 }
912 return;
913 case 0x3008: /* sub Rm,Rn */
914 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
915 return;
916 case 0x300a: /* subc Rm,Rn */
917 {
918 TCGv t0, t1, t2;
919 t0 = tcg_temp_new();
920 tcg_gen_andi_i32(t0, cpu_sr, SR_T);
921 t1 = tcg_temp_new();
922 tcg_gen_sub_i32(t1, REG(B11_8), REG(B7_4));
923 tcg_gen_sub_i32(t0, t1, t0);
924 t2 = tcg_temp_new();
925 tcg_gen_setcond_i32(TCG_COND_LTU, t2, REG(B11_8), t1);
926 tcg_gen_setcond_i32(TCG_COND_LTU, t1, t1, t0);
927 tcg_gen_or_i32(t1, t1, t2);
928 tcg_temp_free(t2);
929 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
930 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
931 tcg_temp_free(t1);
932 tcg_gen_mov_i32(REG(B11_8), t0);
933 tcg_temp_free(t0);
934 }
935 return;
936 case 0x300b: /* subv Rm,Rn */
937 {
938 TCGv t0, t1, t2;
939 t0 = tcg_temp_new();
940 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
941 t1 = tcg_temp_new();
942 tcg_gen_xor_i32(t1, t0, REG(B7_4));
943 t2 = tcg_temp_new();
944 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
945 tcg_gen_and_i32(t1, t1, t2);
946 tcg_temp_free(t2);
947 tcg_gen_shri_i32(t1, t1, 31);
948 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
949 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
950 tcg_temp_free(t1);
951 tcg_gen_mov_i32(REG(B11_8), t0);
952 tcg_temp_free(t0);
953 }
954 return;
955 case 0x2008: /* tst Rm,Rn */
956 {
957 TCGv val = tcg_temp_new();
958 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
959 gen_cmp_imm(TCG_COND_EQ, val, 0);
960 tcg_temp_free(val);
961 }
962 return;
963 case 0x200a: /* xor Rm,Rn */
964 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
965 return;
966 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
967 CHECK_FPU_ENABLED
968 if (ctx->flags & FPSCR_SZ) {
969 TCGv_i64 fp = tcg_temp_new_i64();
970 gen_load_fpr64(fp, XREG(B7_4));
971 gen_store_fpr64(fp, XREG(B11_8));
972 tcg_temp_free_i64(fp);
973 } else {
974 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
975 }
976 return;
977 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
978 CHECK_FPU_ENABLED
979 if (ctx->flags & FPSCR_SZ) {
980 TCGv addr_hi = tcg_temp_new();
981 int fr = XREG(B7_4);
982 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
983 tcg_gen_qemu_st_i32(cpu_fregs[fr], REG(B11_8),
984 ctx->memidx, MO_TEUL);
985 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr_hi,
986 ctx->memidx, MO_TEUL);
987 tcg_temp_free(addr_hi);
988 } else {
989 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], REG(B11_8),
990 ctx->memidx, MO_TEUL);
991 }
992 return;
993 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
994 CHECK_FPU_ENABLED
995 if (ctx->flags & FPSCR_SZ) {
996 TCGv addr_hi = tcg_temp_new();
997 int fr = XREG(B11_8);
998 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
999 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
1000 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
1001 tcg_temp_free(addr_hi);
1002 } else {
1003 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
1004 ctx->memidx, MO_TEUL);
1005 }
1006 return;
1007 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1008 CHECK_FPU_ENABLED
1009 if (ctx->flags & FPSCR_SZ) {
1010 TCGv addr_hi = tcg_temp_new();
1011 int fr = XREG(B11_8);
1012 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1013 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
1014 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
1015 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1016 tcg_temp_free(addr_hi);
1017 } else {
1018 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
1019 ctx->memidx, MO_TEUL);
1020 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1021 }
1022 return;
1023 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1024 CHECK_FPU_ENABLED
1025 if (ctx->flags & FPSCR_SZ) {
1026 TCGv addr = tcg_temp_new_i32();
1027 int fr = XREG(B7_4);
1028 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1029 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL);
1030 tcg_gen_subi_i32(addr, addr, 4);
1031 tcg_gen_qemu_st_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL);
1032 tcg_gen_mov_i32(REG(B11_8), addr);
1033 tcg_temp_free(addr);
1034 } else {
1035 TCGv addr;
1036 addr = tcg_temp_new_i32();
1037 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1038 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1039 ctx->memidx, MO_TEUL);
1040 tcg_gen_mov_i32(REG(B11_8), addr);
1041 tcg_temp_free(addr);
1042 }
1043 return;
1044 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1045 CHECK_FPU_ENABLED
1046 {
1047 TCGv addr = tcg_temp_new_i32();
1048 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1049 if (ctx->flags & FPSCR_SZ) {
1050 int fr = XREG(B11_8);
1051 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1052 ctx->memidx, MO_TEUL);
1053 tcg_gen_addi_i32(addr, addr, 4);
1054 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1055 ctx->memidx, MO_TEUL);
1056 } else {
1057 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], addr,
1058 ctx->memidx, MO_TEUL);
1059 }
1060 tcg_temp_free(addr);
1061 }
1062 return;
1063 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1064 CHECK_FPU_ENABLED
1065 {
1066 TCGv addr = tcg_temp_new();
1067 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1068 if (ctx->flags & FPSCR_SZ) {
1069 int fr = XREG(B7_4);
1070 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1071 ctx->memidx, MO_TEUL);
1072 tcg_gen_addi_i32(addr, addr, 4);
1073 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1074 ctx->memidx, MO_TEUL);
1075 } else {
1076 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1077 ctx->memidx, MO_TEUL);
1078 }
1079 tcg_temp_free(addr);
1080 }
1081 return;
1082 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1083 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1084 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1085 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1086 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1087 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1088 {
1089 CHECK_FPU_ENABLED
1090 if (ctx->flags & FPSCR_PR) {
1091 TCGv_i64 fp0, fp1;
1092
1093 if (ctx->opcode & 0x0110)
1094 break; /* illegal instruction */
1095 fp0 = tcg_temp_new_i64();
1096 fp1 = tcg_temp_new_i64();
1097 gen_load_fpr64(fp0, DREG(B11_8));
1098 gen_load_fpr64(fp1, DREG(B7_4));
1099 switch (ctx->opcode & 0xf00f) {
1100 case 0xf000: /* fadd Rm,Rn */
1101 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1102 break;
1103 case 0xf001: /* fsub Rm,Rn */
1104 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1105 break;
1106 case 0xf002: /* fmul Rm,Rn */
1107 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1108 break;
1109 case 0xf003: /* fdiv Rm,Rn */
1110 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1111 break;
1112 case 0xf004: /* fcmp/eq Rm,Rn */
1113 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1114 return;
1115 case 0xf005: /* fcmp/gt Rm,Rn */
1116 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1117 return;
1118 }
1119 gen_store_fpr64(fp0, DREG(B11_8));
1120 tcg_temp_free_i64(fp0);
1121 tcg_temp_free_i64(fp1);
1122 } else {
1123 switch (ctx->opcode & 0xf00f) {
1124 case 0xf000: /* fadd Rm,Rn */
1125 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1126 cpu_fregs[FREG(B11_8)],
1127 cpu_fregs[FREG(B7_4)]);
1128 break;
1129 case 0xf001: /* fsub Rm,Rn */
1130 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1131 cpu_fregs[FREG(B11_8)],
1132 cpu_fregs[FREG(B7_4)]);
1133 break;
1134 case 0xf002: /* fmul Rm,Rn */
1135 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1136 cpu_fregs[FREG(B11_8)],
1137 cpu_fregs[FREG(B7_4)]);
1138 break;
1139 case 0xf003: /* fdiv Rm,Rn */
1140 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1141 cpu_fregs[FREG(B11_8)],
1142 cpu_fregs[FREG(B7_4)]);
1143 break;
1144 case 0xf004: /* fcmp/eq Rm,Rn */
1145 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1146 cpu_fregs[FREG(B7_4)]);
1147 return;
1148 case 0xf005: /* fcmp/gt Rm,Rn */
1149 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1150 cpu_fregs[FREG(B7_4)]);
1151 return;
1152 }
1153 }
1154 }
1155 return;
1156 case 0xf00e: /* fmac FR0,RM,Rn */
1157 {
1158 CHECK_FPU_ENABLED
1159 if (ctx->flags & FPSCR_PR) {
1160 break; /* illegal instruction */
1161 } else {
1162 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1163 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1164 cpu_fregs[FREG(B11_8)]);
1165 return;
1166 }
1167 }
1168 }
1169
1170 switch (ctx->opcode & 0xff00) {
1171 case 0xc900: /* and #imm,R0 */
1172 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1173 return;
1174 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1175 {
1176 TCGv addr, val;
1177 addr = tcg_temp_new();
1178 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1179 val = tcg_temp_new();
1180 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1181 tcg_gen_andi_i32(val, val, B7_0);
1182 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1183 tcg_temp_free(val);
1184 tcg_temp_free(addr);
1185 }
1186 return;
1187 case 0x8b00: /* bf label */
1188 CHECK_NOT_DELAY_SLOT
1189 gen_conditional_jump(ctx, ctx->pc + 2,
1190 ctx->pc + 4 + B7_0s * 2);
1191 ctx->bstate = BS_BRANCH;
1192 return;
1193 case 0x8f00: /* bf/s label */
1194 CHECK_NOT_DELAY_SLOT
1195 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1196 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1197 return;
1198 case 0x8900: /* bt label */
1199 CHECK_NOT_DELAY_SLOT
1200 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1201 ctx->pc + 2);
1202 ctx->bstate = BS_BRANCH;
1203 return;
1204 case 0x8d00: /* bt/s label */
1205 CHECK_NOT_DELAY_SLOT
1206 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1207 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1208 return;
1209 case 0x8800: /* cmp/eq #imm,R0 */
1210 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1211 return;
1212 case 0xc400: /* mov.b @(disp,GBR),R0 */
1213 {
1214 TCGv addr = tcg_temp_new();
1215 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1216 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1217 tcg_temp_free(addr);
1218 }
1219 return;
1220 case 0xc500: /* mov.w @(disp,GBR),R0 */
1221 {
1222 TCGv addr = tcg_temp_new();
1223 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1224 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1225 tcg_temp_free(addr);
1226 }
1227 return;
1228 case 0xc600: /* mov.l @(disp,GBR),R0 */
1229 {
1230 TCGv addr = tcg_temp_new();
1231 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1232 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1233 tcg_temp_free(addr);
1234 }
1235 return;
1236 case 0xc000: /* mov.b R0,@(disp,GBR) */
1237 {
1238 TCGv addr = tcg_temp_new();
1239 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1240 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1241 tcg_temp_free(addr);
1242 }
1243 return;
1244 case 0xc100: /* mov.w R0,@(disp,GBR) */
1245 {
1246 TCGv addr = tcg_temp_new();
1247 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1248 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1249 tcg_temp_free(addr);
1250 }
1251 return;
1252 case 0xc200: /* mov.l R0,@(disp,GBR) */
1253 {
1254 TCGv addr = tcg_temp_new();
1255 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1256 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1257 tcg_temp_free(addr);
1258 }
1259 return;
1260 case 0x8000: /* mov.b R0,@(disp,Rn) */
1261 {
1262 TCGv addr = tcg_temp_new();
1263 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1264 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1265 tcg_temp_free(addr);
1266 }
1267 return;
1268 case 0x8100: /* mov.w R0,@(disp,Rn) */
1269 {
1270 TCGv addr = tcg_temp_new();
1271 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1272 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1273 tcg_temp_free(addr);
1274 }
1275 return;
1276 case 0x8400: /* mov.b @(disp,Rn),R0 */
1277 {
1278 TCGv addr = tcg_temp_new();
1279 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1280 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1281 tcg_temp_free(addr);
1282 }
1283 return;
1284 case 0x8500: /* mov.w @(disp,Rn),R0 */
1285 {
1286 TCGv addr = tcg_temp_new();
1287 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1288 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1289 tcg_temp_free(addr);
1290 }
1291 return;
1292 case 0xc700: /* mova @(disp,PC),R0 */
1293 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1294 return;
1295 case 0xcb00: /* or #imm,R0 */
1296 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1297 return;
1298 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1299 {
1300 TCGv addr, val;
1301 addr = tcg_temp_new();
1302 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1303 val = tcg_temp_new();
1304 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1305 tcg_gen_ori_i32(val, val, B7_0);
1306 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1307 tcg_temp_free(val);
1308 tcg_temp_free(addr);
1309 }
1310 return;
1311 case 0xc300: /* trapa #imm */
1312 {
1313 TCGv imm;
1314 CHECK_NOT_DELAY_SLOT
1315 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1316 imm = tcg_const_i32(B7_0);
1317 gen_helper_trapa(cpu_env, imm);
1318 tcg_temp_free(imm);
1319 ctx->bstate = BS_BRANCH;
1320 }
1321 return;
1322 case 0xc800: /* tst #imm,R0 */
1323 {
1324 TCGv val = tcg_temp_new();
1325 tcg_gen_andi_i32(val, REG(0), B7_0);
1326 gen_cmp_imm(TCG_COND_EQ, val, 0);
1327 tcg_temp_free(val);
1328 }
1329 return;
1330 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1331 {
1332 TCGv val = tcg_temp_new();
1333 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1334 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1335 tcg_gen_andi_i32(val, val, B7_0);
1336 gen_cmp_imm(TCG_COND_EQ, val, 0);
1337 tcg_temp_free(val);
1338 }
1339 return;
1340 case 0xca00: /* xor #imm,R0 */
1341 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1342 return;
1343 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1344 {
1345 TCGv addr, val;
1346 addr = tcg_temp_new();
1347 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1348 val = tcg_temp_new();
1349 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1350 tcg_gen_xori_i32(val, val, B7_0);
1351 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1352 tcg_temp_free(val);
1353 tcg_temp_free(addr);
1354 }
1355 return;
1356 }
1357
1358 switch (ctx->opcode & 0xf08f) {
1359 case 0x408e: /* ldc Rm,Rn_BANK */
1360 CHECK_PRIVILEGED
1361 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1362 return;
1363 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1364 CHECK_PRIVILEGED
1365 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1366 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1367 return;
1368 case 0x0082: /* stc Rm_BANK,Rn */
1369 CHECK_PRIVILEGED
1370 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1371 return;
1372 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1373 CHECK_PRIVILEGED
1374 {
1375 TCGv addr = tcg_temp_new();
1376 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1377 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1378 tcg_gen_mov_i32(REG(B11_8), addr);
1379 tcg_temp_free(addr);
1380 }
1381 return;
1382 }
1383
1384 switch (ctx->opcode & 0xf0ff) {
1385 case 0x0023: /* braf Rn */
1386 CHECK_NOT_DELAY_SLOT
1387 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1388 ctx->flags |= DELAY_SLOT;
1389 ctx->delayed_pc = (uint32_t) - 1;
1390 return;
1391 case 0x0003: /* bsrf Rn */
1392 CHECK_NOT_DELAY_SLOT
1393 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1394 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1395 ctx->flags |= DELAY_SLOT;
1396 ctx->delayed_pc = (uint32_t) - 1;
1397 return;
1398 case 0x4015: /* cmp/pl Rn */
1399 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1400 return;
1401 case 0x4011: /* cmp/pz Rn */
1402 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1403 return;
1404 case 0x4010: /* dt Rn */
1405 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1406 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1407 return;
1408 case 0x402b: /* jmp @Rn */
1409 CHECK_NOT_DELAY_SLOT
1410 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1411 ctx->flags |= DELAY_SLOT;
1412 ctx->delayed_pc = (uint32_t) - 1;
1413 return;
1414 case 0x400b: /* jsr @Rn */
1415 CHECK_NOT_DELAY_SLOT
1416 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1417 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1418 ctx->flags |= DELAY_SLOT;
1419 ctx->delayed_pc = (uint32_t) - 1;
1420 return;
1421 case 0x400e: /* ldc Rm,SR */
1422 CHECK_PRIVILEGED
1423 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1424 ctx->bstate = BS_STOP;
1425 return;
1426 case 0x4007: /* ldc.l @Rm+,SR */
1427 CHECK_PRIVILEGED
1428 {
1429 TCGv val = tcg_temp_new();
1430 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1431 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1432 tcg_temp_free(val);
1433 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1434 ctx->bstate = BS_STOP;
1435 }
1436 return;
1437 case 0x0002: /* stc SR,Rn */
1438 CHECK_PRIVILEGED
1439 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1440 return;
1441 case 0x4003: /* stc SR,@-Rn */
1442 CHECK_PRIVILEGED
1443 {
1444 TCGv addr = tcg_temp_new();
1445 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1446 tcg_gen_qemu_st_i32(cpu_sr, addr, ctx->memidx, MO_TEUL);
1447 tcg_gen_mov_i32(REG(B11_8), addr);
1448 tcg_temp_free(addr);
1449 }
1450 return;
1451 #define LD(reg,ldnum,ldpnum,prechk) \
1452 case ldnum: \
1453 prechk \
1454 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1455 return; \
1456 case ldpnum: \
1457 prechk \
1458 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1459 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1460 return;
1461 #define ST(reg,stnum,stpnum,prechk) \
1462 case stnum: \
1463 prechk \
1464 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1465 return; \
1466 case stpnum: \
1467 prechk \
1468 { \
1469 TCGv addr = tcg_temp_new(); \
1470 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1471 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1472 tcg_gen_mov_i32(REG(B11_8), addr); \
1473 tcg_temp_free(addr); \
1474 } \
1475 return;
1476 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1477 LD(reg,ldnum,ldpnum,prechk) \
1478 ST(reg,stnum,stpnum,prechk)
1479 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1480 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1481 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1482 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1483 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1484 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1485 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1486 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1487 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1488 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1489 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1490 case 0x406a: /* lds Rm,FPSCR */
1491 CHECK_FPU_ENABLED
1492 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1493 ctx->bstate = BS_STOP;
1494 return;
1495 case 0x4066: /* lds.l @Rm+,FPSCR */
1496 CHECK_FPU_ENABLED
1497 {
1498 TCGv addr = tcg_temp_new();
1499 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1500 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1501 gen_helper_ld_fpscr(cpu_env, addr);
1502 tcg_temp_free(addr);
1503 ctx->bstate = BS_STOP;
1504 }
1505 return;
1506 case 0x006a: /* sts FPSCR,Rn */
1507 CHECK_FPU_ENABLED
1508 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1509 return;
1510 case 0x4062: /* sts FPSCR,@-Rn */
1511 CHECK_FPU_ENABLED
1512 {
1513 TCGv addr, val;
1514 val = tcg_temp_new();
1515 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1516 addr = tcg_temp_new();
1517 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1518 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1519 tcg_gen_mov_i32(REG(B11_8), addr);
1520 tcg_temp_free(addr);
1521 tcg_temp_free(val);
1522 }
1523 return;
1524 case 0x00c3: /* movca.l R0,@Rm */
1525 {
1526 TCGv val = tcg_temp_new();
1527 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1528 gen_helper_movcal(cpu_env, REG(B11_8), val);
1529 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1530 }
1531 ctx->has_movcal = 1;
1532 return;
1533 case 0x40a9:
1534 /* MOVUA.L @Rm,R0 (Rm) -> R0
1535 Load non-boundary-aligned data */
1536 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1537 return;
1538 case 0x40e9:
1539 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1540 Load non-boundary-aligned data */
1541 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1542 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1543 return;
1544 case 0x0029: /* movt Rn */
1545 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1546 return;
1547 case 0x0073:
1548 /* MOVCO.L
1549 LDST -> T
1550 If (T == 1) R0 -> (Rn)
1551 0 -> LDST
1552 */
1553 if (ctx->features & SH_FEATURE_SH4A) {
1554 int label = gen_new_label();
1555 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
1556 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1557 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1558 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1559 gen_set_label(label);
1560 tcg_gen_movi_i32(cpu_ldst, 0);
1561 return;
1562 } else
1563 break;
1564 case 0x0063:
1565 /* MOVLI.L @Rm,R0
1566 1 -> LDST
1567 (Rm) -> R0
1568 When interrupt/exception
1569 occurred 0 -> LDST
1570 */
1571 if (ctx->features & SH_FEATURE_SH4A) {
1572 tcg_gen_movi_i32(cpu_ldst, 0);
1573 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1574 tcg_gen_movi_i32(cpu_ldst, 1);
1575 return;
1576 } else
1577 break;
1578 case 0x0093: /* ocbi @Rn */
1579 {
1580 gen_helper_ocbi(cpu_env, REG(B11_8));
1581 }
1582 return;
1583 case 0x00a3: /* ocbp @Rn */
1584 case 0x00b3: /* ocbwb @Rn */
1585 /* These instructions are supposed to do nothing in case of
1586 a cache miss. Given that we only partially emulate caches
1587 it is safe to simply ignore them. */
1588 return;
1589 case 0x0083: /* pref @Rn */
1590 return;
1591 case 0x00d3: /* prefi @Rn */
1592 if (ctx->features & SH_FEATURE_SH4A)
1593 return;
1594 else
1595 break;
1596 case 0x00e3: /* icbi @Rn */
1597 if (ctx->features & SH_FEATURE_SH4A)
1598 return;
1599 else
1600 break;
1601 case 0x00ab: /* synco */
1602 if (ctx->features & SH_FEATURE_SH4A)
1603 return;
1604 else
1605 break;
1606 case 0x4024: /* rotcl Rn */
1607 {
1608 TCGv tmp = tcg_temp_new();
1609 tcg_gen_mov_i32(tmp, cpu_sr);
1610 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1611 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1612 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1613 tcg_temp_free(tmp);
1614 }
1615 return;
1616 case 0x4025: /* rotcr Rn */
1617 {
1618 TCGv tmp = tcg_temp_new();
1619 tcg_gen_mov_i32(tmp, cpu_sr);
1620 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1621 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1622 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1623 tcg_temp_free(tmp);
1624 }
1625 return;
1626 case 0x4004: /* rotl Rn */
1627 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1628 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1629 return;
1630 case 0x4005: /* rotr Rn */
1631 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1632 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1633 return;
1634 case 0x4000: /* shll Rn */
1635 case 0x4020: /* shal Rn */
1636 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1637 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1638 return;
1639 case 0x4021: /* shar Rn */
1640 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1641 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1642 return;
1643 case 0x4001: /* shlr Rn */
1644 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1645 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1646 return;
1647 case 0x4008: /* shll2 Rn */
1648 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1649 return;
1650 case 0x4018: /* shll8 Rn */
1651 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1652 return;
1653 case 0x4028: /* shll16 Rn */
1654 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1655 return;
1656 case 0x4009: /* shlr2 Rn */
1657 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1658 return;
1659 case 0x4019: /* shlr8 Rn */
1660 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1661 return;
1662 case 0x4029: /* shlr16 Rn */
1663 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1664 return;
1665 case 0x401b: /* tas.b @Rn */
1666 {
1667 TCGv addr, val;
1668 addr = tcg_temp_local_new();
1669 tcg_gen_mov_i32(addr, REG(B11_8));
1670 val = tcg_temp_local_new();
1671 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1672 gen_cmp_imm(TCG_COND_EQ, val, 0);
1673 tcg_gen_ori_i32(val, val, 0x80);
1674 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1675 tcg_temp_free(val);
1676 tcg_temp_free(addr);
1677 }
1678 return;
1679 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1680 CHECK_FPU_ENABLED
1681 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1682 return;
1683 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1684 CHECK_FPU_ENABLED
1685 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1686 return;
1687 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1688 CHECK_FPU_ENABLED
1689 if (ctx->flags & FPSCR_PR) {
1690 TCGv_i64 fp;
1691 if (ctx->opcode & 0x0100)
1692 break; /* illegal instruction */
1693 fp = tcg_temp_new_i64();
1694 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1695 gen_store_fpr64(fp, DREG(B11_8));
1696 tcg_temp_free_i64(fp);
1697 }
1698 else {
1699 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1700 }
1701 return;
1702 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1703 CHECK_FPU_ENABLED
1704 if (ctx->flags & FPSCR_PR) {
1705 TCGv_i64 fp;
1706 if (ctx->opcode & 0x0100)
1707 break; /* illegal instruction */
1708 fp = tcg_temp_new_i64();
1709 gen_load_fpr64(fp, DREG(B11_8));
1710 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1711 tcg_temp_free_i64(fp);
1712 }
1713 else {
1714 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1715 }
1716 return;
1717 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1718 CHECK_FPU_ENABLED
1719 {
1720 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1721 }
1722 return;
1723 case 0xf05d: /* fabs FRn/DRn */
1724 CHECK_FPU_ENABLED
1725 if (ctx->flags & FPSCR_PR) {
1726 if (ctx->opcode & 0x0100)
1727 break; /* illegal instruction */
1728 TCGv_i64 fp = tcg_temp_new_i64();
1729 gen_load_fpr64(fp, DREG(B11_8));
1730 gen_helper_fabs_DT(fp, fp);
1731 gen_store_fpr64(fp, DREG(B11_8));
1732 tcg_temp_free_i64(fp);
1733 } else {
1734 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1735 }
1736 return;
1737 case 0xf06d: /* fsqrt FRn */
1738 CHECK_FPU_ENABLED
1739 if (ctx->flags & FPSCR_PR) {
1740 if (ctx->opcode & 0x0100)
1741 break; /* illegal instruction */
1742 TCGv_i64 fp = tcg_temp_new_i64();
1743 gen_load_fpr64(fp, DREG(B11_8));
1744 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1745 gen_store_fpr64(fp, DREG(B11_8));
1746 tcg_temp_free_i64(fp);
1747 } else {
1748 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1749 cpu_fregs[FREG(B11_8)]);
1750 }
1751 return;
1752 case 0xf07d: /* fsrra FRn */
1753 CHECK_FPU_ENABLED
1754 break;
1755 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1756 CHECK_FPU_ENABLED
1757 if (!(ctx->flags & FPSCR_PR)) {
1758 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1759 }
1760 return;
1761 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1762 CHECK_FPU_ENABLED
1763 if (!(ctx->flags & FPSCR_PR)) {
1764 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1765 }
1766 return;
1767 case 0xf0ad: /* fcnvsd FPUL,DRn */
1768 CHECK_FPU_ENABLED
1769 {
1770 TCGv_i64 fp = tcg_temp_new_i64();
1771 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1772 gen_store_fpr64(fp, DREG(B11_8));
1773 tcg_temp_free_i64(fp);
1774 }
1775 return;
1776 case 0xf0bd: /* fcnvds DRn,FPUL */
1777 CHECK_FPU_ENABLED
1778 {
1779 TCGv_i64 fp = tcg_temp_new_i64();
1780 gen_load_fpr64(fp, DREG(B11_8));
1781 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1782 tcg_temp_free_i64(fp);
1783 }
1784 return;
1785 case 0xf0ed: /* fipr FVm,FVn */
1786 CHECK_FPU_ENABLED
1787 if ((ctx->flags & FPSCR_PR) == 0) {
1788 TCGv m, n;
1789 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1790 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1791 gen_helper_fipr(cpu_env, m, n);
1792 tcg_temp_free(m);
1793 tcg_temp_free(n);
1794 return;
1795 }
1796 break;
1797 case 0xf0fd: /* ftrv XMTRX,FVn */
1798 CHECK_FPU_ENABLED
1799 if ((ctx->opcode & 0x0300) == 0x0100 &&
1800 (ctx->flags & FPSCR_PR) == 0) {
1801 TCGv n;
1802 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1803 gen_helper_ftrv(cpu_env, n);
1804 tcg_temp_free(n);
1805 return;
1806 }
1807 break;
1808 }
1809 #if 0
1810 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1811 ctx->opcode, ctx->pc);
1812 fflush(stderr);
1813 #endif
1814 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1815 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1816 gen_helper_raise_slot_illegal_instruction(cpu_env);
1817 } else {
1818 gen_helper_raise_illegal_instruction(cpu_env);
1819 }
1820 ctx->bstate = BS_BRANCH;
1821 }
1822
1823 static void decode_opc(DisasContext * ctx)
1824 {
1825 uint32_t old_flags = ctx->flags;
1826
1827 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1828 tcg_gen_debug_insn_start(ctx->pc);
1829 }
1830
1831 _decode_opc(ctx);
1832
1833 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1834 if (ctx->flags & DELAY_SLOT_CLEARME) {
1835 gen_store_flags(0);
1836 } else {
1837 /* go out of the delay slot */
1838 uint32_t new_flags = ctx->flags;
1839 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1840 gen_store_flags(new_flags);
1841 }
1842 ctx->flags = 0;
1843 ctx->bstate = BS_BRANCH;
1844 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1845 gen_delayed_conditional_jump(ctx);
1846 } else if (old_flags & DELAY_SLOT) {
1847 gen_jump(ctx);
1848 }
1849
1850 }
1851
1852 /* go into a delay slot */
1853 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1854 gen_store_flags(ctx->flags);
1855 }
1856
1857 static inline void
1858 gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
1859 bool search_pc)
1860 {
1861 CPUState *cs = CPU(cpu);
1862 CPUSH4State *env = &cpu->env;
1863 DisasContext ctx;
1864 target_ulong pc_start;
1865 static uint16_t *gen_opc_end;
1866 CPUBreakpoint *bp;
1867 int i, ii;
1868 int num_insns;
1869 int max_insns;
1870
1871 pc_start = tb->pc;
1872 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1873 ctx.pc = pc_start;
1874 ctx.flags = (uint32_t)tb->flags;
1875 ctx.bstate = BS_NONE;
1876 ctx.memidx = (ctx.flags & SR_MD) == 0 ? 1 : 0;
1877 /* We don't know if the delayed pc came from a dynamic or static branch,
1878 so assume it is a dynamic branch. */
1879 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1880 ctx.tb = tb;
1881 ctx.singlestep_enabled = cs->singlestep_enabled;
1882 ctx.features = env->features;
1883 ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
1884
1885 ii = -1;
1886 num_insns = 0;
1887 max_insns = tb->cflags & CF_COUNT_MASK;
1888 if (max_insns == 0)
1889 max_insns = CF_COUNT_MASK;
1890 gen_tb_start();
1891 while (ctx.bstate == BS_NONE && tcg_ctx.gen_opc_ptr < gen_opc_end) {
1892 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
1893 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
1894 if (ctx.pc == bp->pc) {
1895 /* We have hit a breakpoint - make sure PC is up-to-date */
1896 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1897 gen_helper_debug(cpu_env);
1898 ctx.bstate = BS_BRANCH;
1899 break;
1900 }
1901 }
1902 }
1903 if (search_pc) {
1904 i = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1905 if (ii < i) {
1906 ii++;
1907 while (ii < i)
1908 tcg_ctx.gen_opc_instr_start[ii++] = 0;
1909 }
1910 tcg_ctx.gen_opc_pc[ii] = ctx.pc;
1911 gen_opc_hflags[ii] = ctx.flags;
1912 tcg_ctx.gen_opc_instr_start[ii] = 1;
1913 tcg_ctx.gen_opc_icount[ii] = num_insns;
1914 }
1915 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1916 gen_io_start();
1917 #if 0
1918 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1919 fflush(stderr);
1920 #endif
1921 ctx.opcode = cpu_lduw_code(env, ctx.pc);
1922 decode_opc(&ctx);
1923 num_insns++;
1924 ctx.pc += 2;
1925 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1926 break;
1927 if (cs->singlestep_enabled) {
1928 break;
1929 }
1930 if (num_insns >= max_insns)
1931 break;
1932 if (singlestep)
1933 break;
1934 }
1935 if (tb->cflags & CF_LAST_IO)
1936 gen_io_end();
1937 if (cs->singlestep_enabled) {
1938 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1939 gen_helper_debug(cpu_env);
1940 } else {
1941 switch (ctx.bstate) {
1942 case BS_STOP:
1943 /* gen_op_interrupt_restart(); */
1944 /* fall through */
1945 case BS_NONE:
1946 if (ctx.flags) {
1947 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1948 }
1949 gen_goto_tb(&ctx, 0, ctx.pc);
1950 break;
1951 case BS_EXCP:
1952 /* gen_op_interrupt_restart(); */
1953 tcg_gen_exit_tb(0);
1954 break;
1955 case BS_BRANCH:
1956 default:
1957 break;
1958 }
1959 }
1960
1961 gen_tb_end(tb, num_insns);
1962 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
1963 if (search_pc) {
1964 i = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1965 ii++;
1966 while (ii <= i)
1967 tcg_ctx.gen_opc_instr_start[ii++] = 0;
1968 } else {
1969 tb->size = ctx.pc - pc_start;
1970 tb->icount = num_insns;
1971 }
1972
1973 #ifdef DEBUG_DISAS
1974 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1975 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1976 log_target_disas(env, pc_start, ctx.pc - pc_start, 0);
1977 qemu_log("\n");
1978 }
1979 #endif
1980 }
1981
1982 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
1983 {
1984 gen_intermediate_code_internal(sh_env_get_cpu(env), tb, false);
1985 }
1986
1987 void gen_intermediate_code_pc(CPUSH4State * env, struct TranslationBlock *tb)
1988 {
1989 gen_intermediate_code_internal(sh_env_get_cpu(env), tb, true);
1990 }
1991
1992 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, int pc_pos)
1993 {
1994 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
1995 env->flags = gen_opc_hflags[pc_pos];
1996 }