vmsvga: don't process more than 1024 fifo commands at once
[qemu.git] / target-sh4 / translate.c
1 /*
2 * SH4 translation
3 *
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #define DEBUG_DISAS
21
22 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
28
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34
35
36 typedef struct DisasContext {
37 struct TranslationBlock *tb;
38 target_ulong pc;
39 uint16_t opcode;
40 uint32_t flags;
41 int bstate;
42 int memidx;
43 uint32_t delayed_pc;
44 int singlestep_enabled;
45 uint32_t features;
46 int has_movcal;
47 } DisasContext;
48
49 #if defined(CONFIG_USER_ONLY)
50 #define IS_USER(ctx) 1
51 #else
52 #define IS_USER(ctx) (!(ctx->flags & (1u << SR_MD)))
53 #endif
54
55 enum {
56 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
57 * exception condition
58 */
59 BS_STOP = 1, /* We want to stop translation for any reason */
60 BS_BRANCH = 2, /* We reached a branch condition */
61 BS_EXCP = 3, /* We reached an exception condition */
62 };
63
64 /* global register indexes */
65 static TCGv_env cpu_env;
66 static TCGv cpu_gregs[24];
67 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
68 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
69 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
70 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
71 static TCGv cpu_fregs[32];
72
73 /* internal register indexes */
74 static TCGv cpu_flags, cpu_delayed_pc;
75
76 #include "exec/gen-icount.h"
77
78 void sh4_translate_init(void)
79 {
80 int i;
81 static int done_init = 0;
82 static const char * const gregnames[24] = {
83 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
84 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
85 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
86 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
87 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
88 };
89 static const char * const fregnames[32] = {
90 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
91 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
92 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
93 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
94 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
95 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
96 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
97 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
98 };
99
100 if (done_init)
101 return;
102
103 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
104
105 for (i = 0; i < 24; i++)
106 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
107 offsetof(CPUSH4State, gregs[i]),
108 gregnames[i]);
109
110 cpu_pc = tcg_global_mem_new_i32(cpu_env,
111 offsetof(CPUSH4State, pc), "PC");
112 cpu_sr = tcg_global_mem_new_i32(cpu_env,
113 offsetof(CPUSH4State, sr), "SR");
114 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
115 offsetof(CPUSH4State, sr_m), "SR_M");
116 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
117 offsetof(CPUSH4State, sr_q), "SR_Q");
118 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
119 offsetof(CPUSH4State, sr_t), "SR_T");
120 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
121 offsetof(CPUSH4State, ssr), "SSR");
122 cpu_spc = tcg_global_mem_new_i32(cpu_env,
123 offsetof(CPUSH4State, spc), "SPC");
124 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
125 offsetof(CPUSH4State, gbr), "GBR");
126 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
127 offsetof(CPUSH4State, vbr), "VBR");
128 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
129 offsetof(CPUSH4State, sgr), "SGR");
130 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
131 offsetof(CPUSH4State, dbr), "DBR");
132 cpu_mach = tcg_global_mem_new_i32(cpu_env,
133 offsetof(CPUSH4State, mach), "MACH");
134 cpu_macl = tcg_global_mem_new_i32(cpu_env,
135 offsetof(CPUSH4State, macl), "MACL");
136 cpu_pr = tcg_global_mem_new_i32(cpu_env,
137 offsetof(CPUSH4State, pr), "PR");
138 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
139 offsetof(CPUSH4State, fpscr), "FPSCR");
140 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
141 offsetof(CPUSH4State, fpul), "FPUL");
142
143 cpu_flags = tcg_global_mem_new_i32(cpu_env,
144 offsetof(CPUSH4State, flags), "_flags_");
145 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
146 offsetof(CPUSH4State, delayed_pc),
147 "_delayed_pc_");
148 cpu_ldst = tcg_global_mem_new_i32(cpu_env,
149 offsetof(CPUSH4State, ldst), "_ldst_");
150
151 for (i = 0; i < 32; i++)
152 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
153 offsetof(CPUSH4State, fregs[i]),
154 fregnames[i]);
155
156 done_init = 1;
157 }
158
159 void superh_cpu_dump_state(CPUState *cs, FILE *f,
160 fprintf_function cpu_fprintf, int flags)
161 {
162 SuperHCPU *cpu = SUPERH_CPU(cs);
163 CPUSH4State *env = &cpu->env;
164 int i;
165 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
166 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
167 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
168 env->spc, env->ssr, env->gbr, env->vbr);
169 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
170 env->sgr, env->dbr, env->delayed_pc, env->fpul);
171 for (i = 0; i < 24; i += 4) {
172 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
173 i, env->gregs[i], i + 1, env->gregs[i + 1],
174 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
175 }
176 if (env->flags & DELAY_SLOT) {
177 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
178 env->delayed_pc);
179 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
180 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
181 env->delayed_pc);
182 }
183 }
184
185 static void gen_read_sr(TCGv dst)
186 {
187 TCGv t0 = tcg_temp_new();
188 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
189 tcg_gen_or_i32(dst, dst, t0);
190 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
191 tcg_gen_or_i32(dst, dst, t0);
192 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
193 tcg_gen_or_i32(dst, cpu_sr, t0);
194 tcg_temp_free_i32(t0);
195 }
196
197 static void gen_write_sr(TCGv src)
198 {
199 tcg_gen_andi_i32(cpu_sr, src,
200 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
201 tcg_gen_shri_i32(cpu_sr_q, src, SR_Q);
202 tcg_gen_andi_i32(cpu_sr_q, cpu_sr_q, 1);
203 tcg_gen_shri_i32(cpu_sr_m, src, SR_M);
204 tcg_gen_andi_i32(cpu_sr_m, cpu_sr_m, 1);
205 tcg_gen_shri_i32(cpu_sr_t, src, SR_T);
206 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
207 }
208
209 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
210 {
211 if (unlikely(ctx->singlestep_enabled)) {
212 return false;
213 }
214
215 #ifndef CONFIG_USER_ONLY
216 return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
217 #else
218 return true;
219 #endif
220 }
221
222 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
223 {
224 if (use_goto_tb(ctx, dest)) {
225 /* Use a direct jump if in same page and singlestep not enabled */
226 tcg_gen_goto_tb(n);
227 tcg_gen_movi_i32(cpu_pc, dest);
228 tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
229 } else {
230 tcg_gen_movi_i32(cpu_pc, dest);
231 if (ctx->singlestep_enabled)
232 gen_helper_debug(cpu_env);
233 tcg_gen_exit_tb(0);
234 }
235 }
236
237 static void gen_jump(DisasContext * ctx)
238 {
239 if (ctx->delayed_pc == (uint32_t) - 1) {
240 /* Target is not statically known, it comes necessarily from a
241 delayed jump as immediate jump are conditinal jumps */
242 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
243 if (ctx->singlestep_enabled)
244 gen_helper_debug(cpu_env);
245 tcg_gen_exit_tb(0);
246 } else {
247 gen_goto_tb(ctx, 0, ctx->delayed_pc);
248 }
249 }
250
251 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
252 {
253 TCGLabel *label = gen_new_label();
254 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
255 tcg_gen_brcondi_i32(t ? TCG_COND_EQ : TCG_COND_NE, cpu_sr_t, 0, label);
256 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
257 gen_set_label(label);
258 }
259
260 /* Immediate conditional jump (bt or bf) */
261 static void gen_conditional_jump(DisasContext * ctx,
262 target_ulong ift, target_ulong ifnott)
263 {
264 TCGLabel *l1 = gen_new_label();
265 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_sr_t, 0, l1);
266 gen_goto_tb(ctx, 0, ifnott);
267 gen_set_label(l1);
268 gen_goto_tb(ctx, 1, ift);
269 }
270
271 /* Delayed conditional jump (bt or bf) */
272 static void gen_delayed_conditional_jump(DisasContext * ctx)
273 {
274 TCGLabel *l1;
275 TCGv ds;
276
277 l1 = gen_new_label();
278 ds = tcg_temp_new();
279 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
280 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
281 gen_goto_tb(ctx, 1, ctx->pc + 2);
282 gen_set_label(l1);
283 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
284 gen_jump(ctx);
285 }
286
287 static inline void gen_store_flags(uint32_t flags)
288 {
289 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
290 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
291 }
292
293 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
294 {
295 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
296 }
297
298 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
299 {
300 TCGv_i32 tmp = tcg_temp_new_i32();
301 tcg_gen_extrl_i64_i32(tmp, t);
302 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
303 tcg_gen_shri_i64(t, t, 32);
304 tcg_gen_extrl_i64_i32(tmp, t);
305 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
306 tcg_temp_free_i32(tmp);
307 }
308
309 #define B3_0 (ctx->opcode & 0xf)
310 #define B6_4 ((ctx->opcode >> 4) & 0x7)
311 #define B7_4 ((ctx->opcode >> 4) & 0xf)
312 #define B7_0 (ctx->opcode & 0xff)
313 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
314 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
315 (ctx->opcode & 0xfff))
316 #define B11_8 ((ctx->opcode >> 8) & 0xf)
317 #define B15_12 ((ctx->opcode >> 12) & 0xf)
318
319 #define REG(x) ((x) < 8 && (ctx->flags & (1u << SR_MD))\
320 && (ctx->flags & (1u << SR_RB))\
321 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
322
323 #define ALTREG(x) ((x) < 8 && (!(ctx->flags & (1u << SR_MD))\
324 || !(ctx->flags & (1u << SR_RB)))\
325 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
326
327 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
328 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
329 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
330 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
331
332 #define CHECK_NOT_DELAY_SLOT \
333 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
334 { \
335 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
336 gen_helper_raise_slot_illegal_instruction(cpu_env); \
337 ctx->bstate = BS_BRANCH; \
338 return; \
339 }
340
341 #define CHECK_PRIVILEGED \
342 if (IS_USER(ctx)) { \
343 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
344 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
345 gen_helper_raise_slot_illegal_instruction(cpu_env); \
346 } else { \
347 gen_helper_raise_illegal_instruction(cpu_env); \
348 } \
349 ctx->bstate = BS_BRANCH; \
350 return; \
351 }
352
353 #define CHECK_FPU_ENABLED \
354 if (ctx->flags & (1u << SR_FD)) { \
355 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
356 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
357 gen_helper_raise_slot_fpu_disable(cpu_env); \
358 } else { \
359 gen_helper_raise_fpu_disable(cpu_env); \
360 } \
361 ctx->bstate = BS_BRANCH; \
362 return; \
363 }
364
365 static void _decode_opc(DisasContext * ctx)
366 {
367 /* This code tries to make movcal emulation sufficiently
368 accurate for Linux purposes. This instruction writes
369 memory, and prior to that, always allocates a cache line.
370 It is used in two contexts:
371 - in memcpy, where data is copied in blocks, the first write
372 of to a block uses movca.l for performance.
373 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
374 to flush the cache. Here, the data written by movcal.l is never
375 written to memory, and the data written is just bogus.
376
377 To simulate this, we simulate movcal.l, we store the value to memory,
378 but we also remember the previous content. If we see ocbi, we check
379 if movcal.l for that address was done previously. If so, the write should
380 not have hit the memory, so we restore the previous content.
381 When we see an instruction that is neither movca.l
382 nor ocbi, the previous content is discarded.
383
384 To optimize, we only try to flush stores when we're at the start of
385 TB, or if we already saw movca.l in this TB and did not flush stores
386 yet. */
387 if (ctx->has_movcal)
388 {
389 int opcode = ctx->opcode & 0xf0ff;
390 if (opcode != 0x0093 /* ocbi */
391 && opcode != 0x00c3 /* movca.l */)
392 {
393 gen_helper_discard_movcal_backup(cpu_env);
394 ctx->has_movcal = 0;
395 }
396 }
397
398 #if 0
399 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
400 #endif
401
402 switch (ctx->opcode) {
403 case 0x0019: /* div0u */
404 tcg_gen_movi_i32(cpu_sr_m, 0);
405 tcg_gen_movi_i32(cpu_sr_q, 0);
406 tcg_gen_movi_i32(cpu_sr_t, 0);
407 return;
408 case 0x000b: /* rts */
409 CHECK_NOT_DELAY_SLOT
410 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
411 ctx->flags |= DELAY_SLOT;
412 ctx->delayed_pc = (uint32_t) - 1;
413 return;
414 case 0x0028: /* clrmac */
415 tcg_gen_movi_i32(cpu_mach, 0);
416 tcg_gen_movi_i32(cpu_macl, 0);
417 return;
418 case 0x0048: /* clrs */
419 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
420 return;
421 case 0x0008: /* clrt */
422 tcg_gen_movi_i32(cpu_sr_t, 0);
423 return;
424 case 0x0038: /* ldtlb */
425 CHECK_PRIVILEGED
426 gen_helper_ldtlb(cpu_env);
427 return;
428 case 0x002b: /* rte */
429 CHECK_PRIVILEGED
430 CHECK_NOT_DELAY_SLOT
431 gen_write_sr(cpu_ssr);
432 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
433 ctx->flags |= DELAY_SLOT;
434 ctx->delayed_pc = (uint32_t) - 1;
435 return;
436 case 0x0058: /* sets */
437 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
438 return;
439 case 0x0018: /* sett */
440 tcg_gen_movi_i32(cpu_sr_t, 1);
441 return;
442 case 0xfbfd: /* frchg */
443 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
444 ctx->bstate = BS_STOP;
445 return;
446 case 0xf3fd: /* fschg */
447 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
448 ctx->bstate = BS_STOP;
449 return;
450 case 0x0009: /* nop */
451 return;
452 case 0x001b: /* sleep */
453 CHECK_PRIVILEGED
454 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
455 gen_helper_sleep(cpu_env);
456 return;
457 }
458
459 switch (ctx->opcode & 0xf000) {
460 case 0x1000: /* mov.l Rm,@(disp,Rn) */
461 {
462 TCGv addr = tcg_temp_new();
463 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
464 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
465 tcg_temp_free(addr);
466 }
467 return;
468 case 0x5000: /* mov.l @(disp,Rm),Rn */
469 {
470 TCGv addr = tcg_temp_new();
471 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
472 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
473 tcg_temp_free(addr);
474 }
475 return;
476 case 0xe000: /* mov #imm,Rn */
477 tcg_gen_movi_i32(REG(B11_8), B7_0s);
478 return;
479 case 0x9000: /* mov.w @(disp,PC),Rn */
480 {
481 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
482 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
483 tcg_temp_free(addr);
484 }
485 return;
486 case 0xd000: /* mov.l @(disp,PC),Rn */
487 {
488 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
489 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
490 tcg_temp_free(addr);
491 }
492 return;
493 case 0x7000: /* add #imm,Rn */
494 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
495 return;
496 case 0xa000: /* bra disp */
497 CHECK_NOT_DELAY_SLOT
498 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
499 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
500 ctx->flags |= DELAY_SLOT;
501 return;
502 case 0xb000: /* bsr disp */
503 CHECK_NOT_DELAY_SLOT
504 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
505 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
506 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
507 ctx->flags |= DELAY_SLOT;
508 return;
509 }
510
511 switch (ctx->opcode & 0xf00f) {
512 case 0x6003: /* mov Rm,Rn */
513 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
514 return;
515 case 0x2000: /* mov.b Rm,@Rn */
516 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
517 return;
518 case 0x2001: /* mov.w Rm,@Rn */
519 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
520 return;
521 case 0x2002: /* mov.l Rm,@Rn */
522 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
523 return;
524 case 0x6000: /* mov.b @Rm,Rn */
525 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
526 return;
527 case 0x6001: /* mov.w @Rm,Rn */
528 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
529 return;
530 case 0x6002: /* mov.l @Rm,Rn */
531 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
532 return;
533 case 0x2004: /* mov.b Rm,@-Rn */
534 {
535 TCGv addr = tcg_temp_new();
536 tcg_gen_subi_i32(addr, REG(B11_8), 1);
537 /* might cause re-execution */
538 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
539 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
540 tcg_temp_free(addr);
541 }
542 return;
543 case 0x2005: /* mov.w Rm,@-Rn */
544 {
545 TCGv addr = tcg_temp_new();
546 tcg_gen_subi_i32(addr, REG(B11_8), 2);
547 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
548 tcg_gen_mov_i32(REG(B11_8), addr);
549 tcg_temp_free(addr);
550 }
551 return;
552 case 0x2006: /* mov.l Rm,@-Rn */
553 {
554 TCGv addr = tcg_temp_new();
555 tcg_gen_subi_i32(addr, REG(B11_8), 4);
556 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
557 tcg_gen_mov_i32(REG(B11_8), addr);
558 }
559 return;
560 case 0x6004: /* mov.b @Rm+,Rn */
561 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
562 if ( B11_8 != B7_4 )
563 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
564 return;
565 case 0x6005: /* mov.w @Rm+,Rn */
566 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
567 if ( B11_8 != B7_4 )
568 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
569 return;
570 case 0x6006: /* mov.l @Rm+,Rn */
571 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
572 if ( B11_8 != B7_4 )
573 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
574 return;
575 case 0x0004: /* mov.b Rm,@(R0,Rn) */
576 {
577 TCGv addr = tcg_temp_new();
578 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
579 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
580 tcg_temp_free(addr);
581 }
582 return;
583 case 0x0005: /* mov.w Rm,@(R0,Rn) */
584 {
585 TCGv addr = tcg_temp_new();
586 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
587 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
588 tcg_temp_free(addr);
589 }
590 return;
591 case 0x0006: /* mov.l Rm,@(R0,Rn) */
592 {
593 TCGv addr = tcg_temp_new();
594 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
595 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
596 tcg_temp_free(addr);
597 }
598 return;
599 case 0x000c: /* mov.b @(R0,Rm),Rn */
600 {
601 TCGv addr = tcg_temp_new();
602 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
603 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
604 tcg_temp_free(addr);
605 }
606 return;
607 case 0x000d: /* mov.w @(R0,Rm),Rn */
608 {
609 TCGv addr = tcg_temp_new();
610 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
611 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
612 tcg_temp_free(addr);
613 }
614 return;
615 case 0x000e: /* mov.l @(R0,Rm),Rn */
616 {
617 TCGv addr = tcg_temp_new();
618 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
619 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
620 tcg_temp_free(addr);
621 }
622 return;
623 case 0x6008: /* swap.b Rm,Rn */
624 {
625 TCGv low = tcg_temp_new();;
626 tcg_gen_ext16u_i32(low, REG(B7_4));
627 tcg_gen_bswap16_i32(low, low);
628 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
629 tcg_temp_free(low);
630 }
631 return;
632 case 0x6009: /* swap.w Rm,Rn */
633 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
634 return;
635 case 0x200d: /* xtrct Rm,Rn */
636 {
637 TCGv high, low;
638 high = tcg_temp_new();
639 tcg_gen_shli_i32(high, REG(B7_4), 16);
640 low = tcg_temp_new();
641 tcg_gen_shri_i32(low, REG(B11_8), 16);
642 tcg_gen_or_i32(REG(B11_8), high, low);
643 tcg_temp_free(low);
644 tcg_temp_free(high);
645 }
646 return;
647 case 0x300c: /* add Rm,Rn */
648 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
649 return;
650 case 0x300e: /* addc Rm,Rn */
651 {
652 TCGv t0, t1;
653 t0 = tcg_const_tl(0);
654 t1 = tcg_temp_new();
655 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
656 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
657 REG(B11_8), t0, t1, cpu_sr_t);
658 tcg_temp_free(t0);
659 tcg_temp_free(t1);
660 }
661 return;
662 case 0x300f: /* addv Rm,Rn */
663 {
664 TCGv t0, t1, t2;
665 t0 = tcg_temp_new();
666 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
667 t1 = tcg_temp_new();
668 tcg_gen_xor_i32(t1, t0, REG(B11_8));
669 t2 = tcg_temp_new();
670 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
671 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
672 tcg_temp_free(t2);
673 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
674 tcg_temp_free(t1);
675 tcg_gen_mov_i32(REG(B7_4), t0);
676 tcg_temp_free(t0);
677 }
678 return;
679 case 0x2009: /* and Rm,Rn */
680 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
681 return;
682 case 0x3000: /* cmp/eq Rm,Rn */
683 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
684 return;
685 case 0x3003: /* cmp/ge Rm,Rn */
686 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
687 return;
688 case 0x3007: /* cmp/gt Rm,Rn */
689 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
690 return;
691 case 0x3006: /* cmp/hi Rm,Rn */
692 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
693 return;
694 case 0x3002: /* cmp/hs Rm,Rn */
695 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
696 return;
697 case 0x200c: /* cmp/str Rm,Rn */
698 {
699 TCGv cmp1 = tcg_temp_new();
700 TCGv cmp2 = tcg_temp_new();
701 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
702 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
703 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
704 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
705 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
706 tcg_temp_free(cmp2);
707 tcg_temp_free(cmp1);
708 }
709 return;
710 case 0x2007: /* div0s Rm,Rn */
711 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
712 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
713 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
714 return;
715 case 0x3004: /* div1 Rm,Rn */
716 {
717 TCGv t0 = tcg_temp_new();
718 TCGv t1 = tcg_temp_new();
719 TCGv t2 = tcg_temp_new();
720 TCGv zero = tcg_const_i32(0);
721
722 /* shift left arg1, saving the bit being pushed out and inserting
723 T on the right */
724 tcg_gen_shri_i32(t0, REG(B11_8), 31);
725 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
726 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
727
728 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
729 using 64-bit temps, we compute arg0's high part from q ^ m, so
730 that it is 0x00000000 when adding the value or 0xffffffff when
731 subtracting it. */
732 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
733 tcg_gen_subi_i32(t1, t1, 1);
734 tcg_gen_neg_i32(t2, REG(B7_4));
735 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
736 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
737
738 /* compute T and Q depending on carry */
739 tcg_gen_andi_i32(t1, t1, 1);
740 tcg_gen_xor_i32(t1, t1, t0);
741 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
742 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
743
744 tcg_temp_free(zero);
745 tcg_temp_free(t2);
746 tcg_temp_free(t1);
747 tcg_temp_free(t0);
748 }
749 return;
750 case 0x300d: /* dmuls.l Rm,Rn */
751 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
752 return;
753 case 0x3005: /* dmulu.l Rm,Rn */
754 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
755 return;
756 case 0x600e: /* exts.b Rm,Rn */
757 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
758 return;
759 case 0x600f: /* exts.w Rm,Rn */
760 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
761 return;
762 case 0x600c: /* extu.b Rm,Rn */
763 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
764 return;
765 case 0x600d: /* extu.w Rm,Rn */
766 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
767 return;
768 case 0x000f: /* mac.l @Rm+,@Rn+ */
769 {
770 TCGv arg0, arg1;
771 arg0 = tcg_temp_new();
772 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
773 arg1 = tcg_temp_new();
774 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
775 gen_helper_macl(cpu_env, arg0, arg1);
776 tcg_temp_free(arg1);
777 tcg_temp_free(arg0);
778 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
779 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
780 }
781 return;
782 case 0x400f: /* mac.w @Rm+,@Rn+ */
783 {
784 TCGv arg0, arg1;
785 arg0 = tcg_temp_new();
786 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
787 arg1 = tcg_temp_new();
788 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
789 gen_helper_macw(cpu_env, arg0, arg1);
790 tcg_temp_free(arg1);
791 tcg_temp_free(arg0);
792 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
793 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
794 }
795 return;
796 case 0x0007: /* mul.l Rm,Rn */
797 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
798 return;
799 case 0x200f: /* muls.w Rm,Rn */
800 {
801 TCGv arg0, arg1;
802 arg0 = tcg_temp_new();
803 tcg_gen_ext16s_i32(arg0, REG(B7_4));
804 arg1 = tcg_temp_new();
805 tcg_gen_ext16s_i32(arg1, REG(B11_8));
806 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
807 tcg_temp_free(arg1);
808 tcg_temp_free(arg0);
809 }
810 return;
811 case 0x200e: /* mulu.w Rm,Rn */
812 {
813 TCGv arg0, arg1;
814 arg0 = tcg_temp_new();
815 tcg_gen_ext16u_i32(arg0, REG(B7_4));
816 arg1 = tcg_temp_new();
817 tcg_gen_ext16u_i32(arg1, REG(B11_8));
818 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
819 tcg_temp_free(arg1);
820 tcg_temp_free(arg0);
821 }
822 return;
823 case 0x600b: /* neg Rm,Rn */
824 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
825 return;
826 case 0x600a: /* negc Rm,Rn */
827 {
828 TCGv t0 = tcg_const_i32(0);
829 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
830 REG(B7_4), t0, cpu_sr_t, t0);
831 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
832 t0, t0, REG(B11_8), cpu_sr_t);
833 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
834 tcg_temp_free(t0);
835 }
836 return;
837 case 0x6007: /* not Rm,Rn */
838 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
839 return;
840 case 0x200b: /* or Rm,Rn */
841 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
842 return;
843 case 0x400c: /* shad Rm,Rn */
844 {
845 TCGv t0 = tcg_temp_new();
846 TCGv t1 = tcg_temp_new();
847 TCGv t2 = tcg_temp_new();
848
849 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
850
851 /* positive case: shift to the left */
852 tcg_gen_shl_i32(t1, REG(B11_8), t0);
853
854 /* negative case: shift to the right in two steps to
855 correctly handle the -32 case */
856 tcg_gen_xori_i32(t0, t0, 0x1f);
857 tcg_gen_sar_i32(t2, REG(B11_8), t0);
858 tcg_gen_sari_i32(t2, t2, 1);
859
860 /* select between the two cases */
861 tcg_gen_movi_i32(t0, 0);
862 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
863
864 tcg_temp_free(t0);
865 tcg_temp_free(t1);
866 tcg_temp_free(t2);
867 }
868 return;
869 case 0x400d: /* shld Rm,Rn */
870 {
871 TCGv t0 = tcg_temp_new();
872 TCGv t1 = tcg_temp_new();
873 TCGv t2 = tcg_temp_new();
874
875 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
876
877 /* positive case: shift to the left */
878 tcg_gen_shl_i32(t1, REG(B11_8), t0);
879
880 /* negative case: shift to the right in two steps to
881 correctly handle the -32 case */
882 tcg_gen_xori_i32(t0, t0, 0x1f);
883 tcg_gen_shr_i32(t2, REG(B11_8), t0);
884 tcg_gen_shri_i32(t2, t2, 1);
885
886 /* select between the two cases */
887 tcg_gen_movi_i32(t0, 0);
888 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
889
890 tcg_temp_free(t0);
891 tcg_temp_free(t1);
892 tcg_temp_free(t2);
893 }
894 return;
895 case 0x3008: /* sub Rm,Rn */
896 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
897 return;
898 case 0x300a: /* subc Rm,Rn */
899 {
900 TCGv t0, t1;
901 t0 = tcg_const_tl(0);
902 t1 = tcg_temp_new();
903 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
904 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
905 REG(B11_8), t0, t1, cpu_sr_t);
906 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
907 tcg_temp_free(t0);
908 tcg_temp_free(t1);
909 }
910 return;
911 case 0x300b: /* subv Rm,Rn */
912 {
913 TCGv t0, t1, t2;
914 t0 = tcg_temp_new();
915 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
916 t1 = tcg_temp_new();
917 tcg_gen_xor_i32(t1, t0, REG(B7_4));
918 t2 = tcg_temp_new();
919 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
920 tcg_gen_and_i32(t1, t1, t2);
921 tcg_temp_free(t2);
922 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
923 tcg_temp_free(t1);
924 tcg_gen_mov_i32(REG(B11_8), t0);
925 tcg_temp_free(t0);
926 }
927 return;
928 case 0x2008: /* tst Rm,Rn */
929 {
930 TCGv val = tcg_temp_new();
931 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
932 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
933 tcg_temp_free(val);
934 }
935 return;
936 case 0x200a: /* xor Rm,Rn */
937 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
938 return;
939 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
940 CHECK_FPU_ENABLED
941 if (ctx->flags & FPSCR_SZ) {
942 TCGv_i64 fp = tcg_temp_new_i64();
943 gen_load_fpr64(fp, XREG(B7_4));
944 gen_store_fpr64(fp, XREG(B11_8));
945 tcg_temp_free_i64(fp);
946 } else {
947 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
948 }
949 return;
950 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
951 CHECK_FPU_ENABLED
952 if (ctx->flags & FPSCR_SZ) {
953 TCGv addr_hi = tcg_temp_new();
954 int fr = XREG(B7_4);
955 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
956 tcg_gen_qemu_st_i32(cpu_fregs[fr], REG(B11_8),
957 ctx->memidx, MO_TEUL);
958 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr_hi,
959 ctx->memidx, MO_TEUL);
960 tcg_temp_free(addr_hi);
961 } else {
962 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], REG(B11_8),
963 ctx->memidx, MO_TEUL);
964 }
965 return;
966 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
967 CHECK_FPU_ENABLED
968 if (ctx->flags & FPSCR_SZ) {
969 TCGv addr_hi = tcg_temp_new();
970 int fr = XREG(B11_8);
971 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
972 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
973 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
974 tcg_temp_free(addr_hi);
975 } else {
976 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
977 ctx->memidx, MO_TEUL);
978 }
979 return;
980 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
981 CHECK_FPU_ENABLED
982 if (ctx->flags & FPSCR_SZ) {
983 TCGv addr_hi = tcg_temp_new();
984 int fr = XREG(B11_8);
985 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
986 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
987 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
988 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
989 tcg_temp_free(addr_hi);
990 } else {
991 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
992 ctx->memidx, MO_TEUL);
993 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
994 }
995 return;
996 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
997 CHECK_FPU_ENABLED
998 TCGv addr = tcg_temp_new_i32();
999 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1000 if (ctx->flags & FPSCR_SZ) {
1001 int fr = XREG(B7_4);
1002 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL);
1003 tcg_gen_subi_i32(addr, addr, 4);
1004 tcg_gen_qemu_st_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL);
1005 } else {
1006 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1007 ctx->memidx, MO_TEUL);
1008 }
1009 tcg_gen_mov_i32(REG(B11_8), addr);
1010 tcg_temp_free(addr);
1011 return;
1012 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1013 CHECK_FPU_ENABLED
1014 {
1015 TCGv addr = tcg_temp_new_i32();
1016 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1017 if (ctx->flags & FPSCR_SZ) {
1018 int fr = XREG(B11_8);
1019 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1020 ctx->memidx, MO_TEUL);
1021 tcg_gen_addi_i32(addr, addr, 4);
1022 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1023 ctx->memidx, MO_TEUL);
1024 } else {
1025 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], addr,
1026 ctx->memidx, MO_TEUL);
1027 }
1028 tcg_temp_free(addr);
1029 }
1030 return;
1031 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1032 CHECK_FPU_ENABLED
1033 {
1034 TCGv addr = tcg_temp_new();
1035 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1036 if (ctx->flags & FPSCR_SZ) {
1037 int fr = XREG(B7_4);
1038 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1039 ctx->memidx, MO_TEUL);
1040 tcg_gen_addi_i32(addr, addr, 4);
1041 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1042 ctx->memidx, MO_TEUL);
1043 } else {
1044 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1045 ctx->memidx, MO_TEUL);
1046 }
1047 tcg_temp_free(addr);
1048 }
1049 return;
1050 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1051 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1052 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1053 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1054 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1055 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1056 {
1057 CHECK_FPU_ENABLED
1058 if (ctx->flags & FPSCR_PR) {
1059 TCGv_i64 fp0, fp1;
1060
1061 if (ctx->opcode & 0x0110)
1062 break; /* illegal instruction */
1063 fp0 = tcg_temp_new_i64();
1064 fp1 = tcg_temp_new_i64();
1065 gen_load_fpr64(fp0, DREG(B11_8));
1066 gen_load_fpr64(fp1, DREG(B7_4));
1067 switch (ctx->opcode & 0xf00f) {
1068 case 0xf000: /* fadd Rm,Rn */
1069 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1070 break;
1071 case 0xf001: /* fsub Rm,Rn */
1072 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1073 break;
1074 case 0xf002: /* fmul Rm,Rn */
1075 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1076 break;
1077 case 0xf003: /* fdiv Rm,Rn */
1078 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1079 break;
1080 case 0xf004: /* fcmp/eq Rm,Rn */
1081 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1082 return;
1083 case 0xf005: /* fcmp/gt Rm,Rn */
1084 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1085 return;
1086 }
1087 gen_store_fpr64(fp0, DREG(B11_8));
1088 tcg_temp_free_i64(fp0);
1089 tcg_temp_free_i64(fp1);
1090 } else {
1091 switch (ctx->opcode & 0xf00f) {
1092 case 0xf000: /* fadd Rm,Rn */
1093 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1094 cpu_fregs[FREG(B11_8)],
1095 cpu_fregs[FREG(B7_4)]);
1096 break;
1097 case 0xf001: /* fsub Rm,Rn */
1098 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1099 cpu_fregs[FREG(B11_8)],
1100 cpu_fregs[FREG(B7_4)]);
1101 break;
1102 case 0xf002: /* fmul Rm,Rn */
1103 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1104 cpu_fregs[FREG(B11_8)],
1105 cpu_fregs[FREG(B7_4)]);
1106 break;
1107 case 0xf003: /* fdiv Rm,Rn */
1108 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1109 cpu_fregs[FREG(B11_8)],
1110 cpu_fregs[FREG(B7_4)]);
1111 break;
1112 case 0xf004: /* fcmp/eq Rm,Rn */
1113 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1114 cpu_fregs[FREG(B7_4)]);
1115 return;
1116 case 0xf005: /* fcmp/gt Rm,Rn */
1117 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1118 cpu_fregs[FREG(B7_4)]);
1119 return;
1120 }
1121 }
1122 }
1123 return;
1124 case 0xf00e: /* fmac FR0,RM,Rn */
1125 {
1126 CHECK_FPU_ENABLED
1127 if (ctx->flags & FPSCR_PR) {
1128 break; /* illegal instruction */
1129 } else {
1130 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1131 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1132 cpu_fregs[FREG(B11_8)]);
1133 return;
1134 }
1135 }
1136 }
1137
1138 switch (ctx->opcode & 0xff00) {
1139 case 0xc900: /* and #imm,R0 */
1140 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1141 return;
1142 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1143 {
1144 TCGv addr, val;
1145 addr = tcg_temp_new();
1146 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1147 val = tcg_temp_new();
1148 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1149 tcg_gen_andi_i32(val, val, B7_0);
1150 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1151 tcg_temp_free(val);
1152 tcg_temp_free(addr);
1153 }
1154 return;
1155 case 0x8b00: /* bf label */
1156 CHECK_NOT_DELAY_SLOT
1157 gen_conditional_jump(ctx, ctx->pc + 2,
1158 ctx->pc + 4 + B7_0s * 2);
1159 ctx->bstate = BS_BRANCH;
1160 return;
1161 case 0x8f00: /* bf/s label */
1162 CHECK_NOT_DELAY_SLOT
1163 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1164 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1165 return;
1166 case 0x8900: /* bt label */
1167 CHECK_NOT_DELAY_SLOT
1168 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1169 ctx->pc + 2);
1170 ctx->bstate = BS_BRANCH;
1171 return;
1172 case 0x8d00: /* bt/s label */
1173 CHECK_NOT_DELAY_SLOT
1174 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1175 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1176 return;
1177 case 0x8800: /* cmp/eq #imm,R0 */
1178 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1179 return;
1180 case 0xc400: /* mov.b @(disp,GBR),R0 */
1181 {
1182 TCGv addr = tcg_temp_new();
1183 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1184 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1185 tcg_temp_free(addr);
1186 }
1187 return;
1188 case 0xc500: /* mov.w @(disp,GBR),R0 */
1189 {
1190 TCGv addr = tcg_temp_new();
1191 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1192 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1193 tcg_temp_free(addr);
1194 }
1195 return;
1196 case 0xc600: /* mov.l @(disp,GBR),R0 */
1197 {
1198 TCGv addr = tcg_temp_new();
1199 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1200 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1201 tcg_temp_free(addr);
1202 }
1203 return;
1204 case 0xc000: /* mov.b R0,@(disp,GBR) */
1205 {
1206 TCGv addr = tcg_temp_new();
1207 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1208 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1209 tcg_temp_free(addr);
1210 }
1211 return;
1212 case 0xc100: /* mov.w R0,@(disp,GBR) */
1213 {
1214 TCGv addr = tcg_temp_new();
1215 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1216 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1217 tcg_temp_free(addr);
1218 }
1219 return;
1220 case 0xc200: /* mov.l R0,@(disp,GBR) */
1221 {
1222 TCGv addr = tcg_temp_new();
1223 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1224 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1225 tcg_temp_free(addr);
1226 }
1227 return;
1228 case 0x8000: /* mov.b R0,@(disp,Rn) */
1229 {
1230 TCGv addr = tcg_temp_new();
1231 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1232 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1233 tcg_temp_free(addr);
1234 }
1235 return;
1236 case 0x8100: /* mov.w R0,@(disp,Rn) */
1237 {
1238 TCGv addr = tcg_temp_new();
1239 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1240 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1241 tcg_temp_free(addr);
1242 }
1243 return;
1244 case 0x8400: /* mov.b @(disp,Rn),R0 */
1245 {
1246 TCGv addr = tcg_temp_new();
1247 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1248 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1249 tcg_temp_free(addr);
1250 }
1251 return;
1252 case 0x8500: /* mov.w @(disp,Rn),R0 */
1253 {
1254 TCGv addr = tcg_temp_new();
1255 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1256 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1257 tcg_temp_free(addr);
1258 }
1259 return;
1260 case 0xc700: /* mova @(disp,PC),R0 */
1261 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1262 return;
1263 case 0xcb00: /* or #imm,R0 */
1264 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1265 return;
1266 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1267 {
1268 TCGv addr, val;
1269 addr = tcg_temp_new();
1270 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1271 val = tcg_temp_new();
1272 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1273 tcg_gen_ori_i32(val, val, B7_0);
1274 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1275 tcg_temp_free(val);
1276 tcg_temp_free(addr);
1277 }
1278 return;
1279 case 0xc300: /* trapa #imm */
1280 {
1281 TCGv imm;
1282 CHECK_NOT_DELAY_SLOT
1283 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1284 imm = tcg_const_i32(B7_0);
1285 gen_helper_trapa(cpu_env, imm);
1286 tcg_temp_free(imm);
1287 ctx->bstate = BS_BRANCH;
1288 }
1289 return;
1290 case 0xc800: /* tst #imm,R0 */
1291 {
1292 TCGv val = tcg_temp_new();
1293 tcg_gen_andi_i32(val, REG(0), B7_0);
1294 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1295 tcg_temp_free(val);
1296 }
1297 return;
1298 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1299 {
1300 TCGv val = tcg_temp_new();
1301 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1302 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1303 tcg_gen_andi_i32(val, val, B7_0);
1304 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1305 tcg_temp_free(val);
1306 }
1307 return;
1308 case 0xca00: /* xor #imm,R0 */
1309 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1310 return;
1311 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1312 {
1313 TCGv addr, val;
1314 addr = tcg_temp_new();
1315 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1316 val = tcg_temp_new();
1317 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1318 tcg_gen_xori_i32(val, val, B7_0);
1319 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1320 tcg_temp_free(val);
1321 tcg_temp_free(addr);
1322 }
1323 return;
1324 }
1325
1326 switch (ctx->opcode & 0xf08f) {
1327 case 0x408e: /* ldc Rm,Rn_BANK */
1328 CHECK_PRIVILEGED
1329 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1330 return;
1331 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1332 CHECK_PRIVILEGED
1333 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1334 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1335 return;
1336 case 0x0082: /* stc Rm_BANK,Rn */
1337 CHECK_PRIVILEGED
1338 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1339 return;
1340 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1341 CHECK_PRIVILEGED
1342 {
1343 TCGv addr = tcg_temp_new();
1344 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1345 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1346 tcg_gen_mov_i32(REG(B11_8), addr);
1347 tcg_temp_free(addr);
1348 }
1349 return;
1350 }
1351
1352 switch (ctx->opcode & 0xf0ff) {
1353 case 0x0023: /* braf Rn */
1354 CHECK_NOT_DELAY_SLOT
1355 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1356 ctx->flags |= DELAY_SLOT;
1357 ctx->delayed_pc = (uint32_t) - 1;
1358 return;
1359 case 0x0003: /* bsrf Rn */
1360 CHECK_NOT_DELAY_SLOT
1361 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1362 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1363 ctx->flags |= DELAY_SLOT;
1364 ctx->delayed_pc = (uint32_t) - 1;
1365 return;
1366 case 0x4015: /* cmp/pl Rn */
1367 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1368 return;
1369 case 0x4011: /* cmp/pz Rn */
1370 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1371 return;
1372 case 0x4010: /* dt Rn */
1373 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1374 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1375 return;
1376 case 0x402b: /* jmp @Rn */
1377 CHECK_NOT_DELAY_SLOT
1378 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1379 ctx->flags |= DELAY_SLOT;
1380 ctx->delayed_pc = (uint32_t) - 1;
1381 return;
1382 case 0x400b: /* jsr @Rn */
1383 CHECK_NOT_DELAY_SLOT
1384 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1385 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1386 ctx->flags |= DELAY_SLOT;
1387 ctx->delayed_pc = (uint32_t) - 1;
1388 return;
1389 case 0x400e: /* ldc Rm,SR */
1390 CHECK_PRIVILEGED
1391 {
1392 TCGv val = tcg_temp_new();
1393 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1394 gen_write_sr(val);
1395 tcg_temp_free(val);
1396 ctx->bstate = BS_STOP;
1397 }
1398 return;
1399 case 0x4007: /* ldc.l @Rm+,SR */
1400 CHECK_PRIVILEGED
1401 {
1402 TCGv val = tcg_temp_new();
1403 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1404 tcg_gen_andi_i32(val, val, 0x700083f3);
1405 gen_write_sr(val);
1406 tcg_temp_free(val);
1407 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1408 ctx->bstate = BS_STOP;
1409 }
1410 return;
1411 case 0x0002: /* stc SR,Rn */
1412 CHECK_PRIVILEGED
1413 gen_read_sr(REG(B11_8));
1414 return;
1415 case 0x4003: /* stc SR,@-Rn */
1416 CHECK_PRIVILEGED
1417 {
1418 TCGv addr = tcg_temp_new();
1419 TCGv val = tcg_temp_new();
1420 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1421 gen_read_sr(val);
1422 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1423 tcg_gen_mov_i32(REG(B11_8), addr);
1424 tcg_temp_free(val);
1425 tcg_temp_free(addr);
1426 }
1427 return;
1428 #define LD(reg,ldnum,ldpnum,prechk) \
1429 case ldnum: \
1430 prechk \
1431 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1432 return; \
1433 case ldpnum: \
1434 prechk \
1435 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1436 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1437 return;
1438 #define ST(reg,stnum,stpnum,prechk) \
1439 case stnum: \
1440 prechk \
1441 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1442 return; \
1443 case stpnum: \
1444 prechk \
1445 { \
1446 TCGv addr = tcg_temp_new(); \
1447 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1448 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1449 tcg_gen_mov_i32(REG(B11_8), addr); \
1450 tcg_temp_free(addr); \
1451 } \
1452 return;
1453 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1454 LD(reg,ldnum,ldpnum,prechk) \
1455 ST(reg,stnum,stpnum,prechk)
1456 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1457 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1458 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1459 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1460 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1461 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1462 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1463 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1464 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1465 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1466 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1467 case 0x406a: /* lds Rm,FPSCR */
1468 CHECK_FPU_ENABLED
1469 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1470 ctx->bstate = BS_STOP;
1471 return;
1472 case 0x4066: /* lds.l @Rm+,FPSCR */
1473 CHECK_FPU_ENABLED
1474 {
1475 TCGv addr = tcg_temp_new();
1476 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1477 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1478 gen_helper_ld_fpscr(cpu_env, addr);
1479 tcg_temp_free(addr);
1480 ctx->bstate = BS_STOP;
1481 }
1482 return;
1483 case 0x006a: /* sts FPSCR,Rn */
1484 CHECK_FPU_ENABLED
1485 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1486 return;
1487 case 0x4062: /* sts FPSCR,@-Rn */
1488 CHECK_FPU_ENABLED
1489 {
1490 TCGv addr, val;
1491 val = tcg_temp_new();
1492 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1493 addr = tcg_temp_new();
1494 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1495 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1496 tcg_gen_mov_i32(REG(B11_8), addr);
1497 tcg_temp_free(addr);
1498 tcg_temp_free(val);
1499 }
1500 return;
1501 case 0x00c3: /* movca.l R0,@Rm */
1502 {
1503 TCGv val = tcg_temp_new();
1504 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1505 gen_helper_movcal(cpu_env, REG(B11_8), val);
1506 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1507 }
1508 ctx->has_movcal = 1;
1509 return;
1510 case 0x40a9:
1511 /* MOVUA.L @Rm,R0 (Rm) -> R0
1512 Load non-boundary-aligned data */
1513 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1514 return;
1515 case 0x40e9:
1516 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1517 Load non-boundary-aligned data */
1518 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1519 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1520 return;
1521 case 0x0029: /* movt Rn */
1522 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1523 return;
1524 case 0x0073:
1525 /* MOVCO.L
1526 LDST -> T
1527 If (T == 1) R0 -> (Rn)
1528 0 -> LDST
1529 */
1530 if (ctx->features & SH_FEATURE_SH4A) {
1531 TCGLabel *label = gen_new_label();
1532 tcg_gen_mov_i32(cpu_sr_t, cpu_ldst);
1533 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1534 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1535 gen_set_label(label);
1536 tcg_gen_movi_i32(cpu_ldst, 0);
1537 return;
1538 } else
1539 break;
1540 case 0x0063:
1541 /* MOVLI.L @Rm,R0
1542 1 -> LDST
1543 (Rm) -> R0
1544 When interrupt/exception
1545 occurred 0 -> LDST
1546 */
1547 if (ctx->features & SH_FEATURE_SH4A) {
1548 tcg_gen_movi_i32(cpu_ldst, 0);
1549 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1550 tcg_gen_movi_i32(cpu_ldst, 1);
1551 return;
1552 } else
1553 break;
1554 case 0x0093: /* ocbi @Rn */
1555 {
1556 gen_helper_ocbi(cpu_env, REG(B11_8));
1557 }
1558 return;
1559 case 0x00a3: /* ocbp @Rn */
1560 case 0x00b3: /* ocbwb @Rn */
1561 /* These instructions are supposed to do nothing in case of
1562 a cache miss. Given that we only partially emulate caches
1563 it is safe to simply ignore them. */
1564 return;
1565 case 0x0083: /* pref @Rn */
1566 return;
1567 case 0x00d3: /* prefi @Rn */
1568 if (ctx->features & SH_FEATURE_SH4A)
1569 return;
1570 else
1571 break;
1572 case 0x00e3: /* icbi @Rn */
1573 if (ctx->features & SH_FEATURE_SH4A)
1574 return;
1575 else
1576 break;
1577 case 0x00ab: /* synco */
1578 if (ctx->features & SH_FEATURE_SH4A)
1579 return;
1580 else
1581 break;
1582 case 0x4024: /* rotcl Rn */
1583 {
1584 TCGv tmp = tcg_temp_new();
1585 tcg_gen_mov_i32(tmp, cpu_sr_t);
1586 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1587 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1588 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1589 tcg_temp_free(tmp);
1590 }
1591 return;
1592 case 0x4025: /* rotcr Rn */
1593 {
1594 TCGv tmp = tcg_temp_new();
1595 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1596 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1597 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1598 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1599 tcg_temp_free(tmp);
1600 }
1601 return;
1602 case 0x4004: /* rotl Rn */
1603 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1604 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1605 return;
1606 case 0x4005: /* rotr Rn */
1607 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1608 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1609 return;
1610 case 0x4000: /* shll Rn */
1611 case 0x4020: /* shal Rn */
1612 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1613 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1614 return;
1615 case 0x4021: /* shar Rn */
1616 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1617 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1618 return;
1619 case 0x4001: /* shlr Rn */
1620 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1621 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1622 return;
1623 case 0x4008: /* shll2 Rn */
1624 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1625 return;
1626 case 0x4018: /* shll8 Rn */
1627 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1628 return;
1629 case 0x4028: /* shll16 Rn */
1630 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1631 return;
1632 case 0x4009: /* shlr2 Rn */
1633 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1634 return;
1635 case 0x4019: /* shlr8 Rn */
1636 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1637 return;
1638 case 0x4029: /* shlr16 Rn */
1639 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1640 return;
1641 case 0x401b: /* tas.b @Rn */
1642 {
1643 TCGv addr, val;
1644 addr = tcg_temp_local_new();
1645 tcg_gen_mov_i32(addr, REG(B11_8));
1646 val = tcg_temp_local_new();
1647 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1648 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1649 tcg_gen_ori_i32(val, val, 0x80);
1650 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1651 tcg_temp_free(val);
1652 tcg_temp_free(addr);
1653 }
1654 return;
1655 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1656 CHECK_FPU_ENABLED
1657 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1658 return;
1659 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1660 CHECK_FPU_ENABLED
1661 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1662 return;
1663 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1664 CHECK_FPU_ENABLED
1665 if (ctx->flags & FPSCR_PR) {
1666 TCGv_i64 fp;
1667 if (ctx->opcode & 0x0100)
1668 break; /* illegal instruction */
1669 fp = tcg_temp_new_i64();
1670 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1671 gen_store_fpr64(fp, DREG(B11_8));
1672 tcg_temp_free_i64(fp);
1673 }
1674 else {
1675 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1676 }
1677 return;
1678 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1679 CHECK_FPU_ENABLED
1680 if (ctx->flags & FPSCR_PR) {
1681 TCGv_i64 fp;
1682 if (ctx->opcode & 0x0100)
1683 break; /* illegal instruction */
1684 fp = tcg_temp_new_i64();
1685 gen_load_fpr64(fp, DREG(B11_8));
1686 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1687 tcg_temp_free_i64(fp);
1688 }
1689 else {
1690 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1691 }
1692 return;
1693 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1694 CHECK_FPU_ENABLED
1695 {
1696 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1697 }
1698 return;
1699 case 0xf05d: /* fabs FRn/DRn */
1700 CHECK_FPU_ENABLED
1701 if (ctx->flags & FPSCR_PR) {
1702 if (ctx->opcode & 0x0100)
1703 break; /* illegal instruction */
1704 TCGv_i64 fp = tcg_temp_new_i64();
1705 gen_load_fpr64(fp, DREG(B11_8));
1706 gen_helper_fabs_DT(fp, fp);
1707 gen_store_fpr64(fp, DREG(B11_8));
1708 tcg_temp_free_i64(fp);
1709 } else {
1710 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1711 }
1712 return;
1713 case 0xf06d: /* fsqrt FRn */
1714 CHECK_FPU_ENABLED
1715 if (ctx->flags & FPSCR_PR) {
1716 if (ctx->opcode & 0x0100)
1717 break; /* illegal instruction */
1718 TCGv_i64 fp = tcg_temp_new_i64();
1719 gen_load_fpr64(fp, DREG(B11_8));
1720 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1721 gen_store_fpr64(fp, DREG(B11_8));
1722 tcg_temp_free_i64(fp);
1723 } else {
1724 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1725 cpu_fregs[FREG(B11_8)]);
1726 }
1727 return;
1728 case 0xf07d: /* fsrra FRn */
1729 CHECK_FPU_ENABLED
1730 break;
1731 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1732 CHECK_FPU_ENABLED
1733 if (!(ctx->flags & FPSCR_PR)) {
1734 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1735 }
1736 return;
1737 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1738 CHECK_FPU_ENABLED
1739 if (!(ctx->flags & FPSCR_PR)) {
1740 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1741 }
1742 return;
1743 case 0xf0ad: /* fcnvsd FPUL,DRn */
1744 CHECK_FPU_ENABLED
1745 {
1746 TCGv_i64 fp = tcg_temp_new_i64();
1747 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1748 gen_store_fpr64(fp, DREG(B11_8));
1749 tcg_temp_free_i64(fp);
1750 }
1751 return;
1752 case 0xf0bd: /* fcnvds DRn,FPUL */
1753 CHECK_FPU_ENABLED
1754 {
1755 TCGv_i64 fp = tcg_temp_new_i64();
1756 gen_load_fpr64(fp, DREG(B11_8));
1757 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1758 tcg_temp_free_i64(fp);
1759 }
1760 return;
1761 case 0xf0ed: /* fipr FVm,FVn */
1762 CHECK_FPU_ENABLED
1763 if ((ctx->flags & FPSCR_PR) == 0) {
1764 TCGv m, n;
1765 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1766 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1767 gen_helper_fipr(cpu_env, m, n);
1768 tcg_temp_free(m);
1769 tcg_temp_free(n);
1770 return;
1771 }
1772 break;
1773 case 0xf0fd: /* ftrv XMTRX,FVn */
1774 CHECK_FPU_ENABLED
1775 if ((ctx->opcode & 0x0300) == 0x0100 &&
1776 (ctx->flags & FPSCR_PR) == 0) {
1777 TCGv n;
1778 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1779 gen_helper_ftrv(cpu_env, n);
1780 tcg_temp_free(n);
1781 return;
1782 }
1783 break;
1784 }
1785 #if 0
1786 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1787 ctx->opcode, ctx->pc);
1788 fflush(stderr);
1789 #endif
1790 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1791 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1792 gen_helper_raise_slot_illegal_instruction(cpu_env);
1793 } else {
1794 gen_helper_raise_illegal_instruction(cpu_env);
1795 }
1796 ctx->bstate = BS_BRANCH;
1797 }
1798
1799 static void decode_opc(DisasContext * ctx)
1800 {
1801 uint32_t old_flags = ctx->flags;
1802
1803 _decode_opc(ctx);
1804
1805 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1806 if (ctx->flags & DELAY_SLOT_CLEARME) {
1807 gen_store_flags(0);
1808 } else {
1809 /* go out of the delay slot */
1810 uint32_t new_flags = ctx->flags;
1811 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1812 gen_store_flags(new_flags);
1813 }
1814 ctx->flags = 0;
1815 ctx->bstate = BS_BRANCH;
1816 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1817 gen_delayed_conditional_jump(ctx);
1818 } else if (old_flags & DELAY_SLOT) {
1819 gen_jump(ctx);
1820 }
1821
1822 }
1823
1824 /* go into a delay slot */
1825 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1826 gen_store_flags(ctx->flags);
1827 }
1828
1829 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
1830 {
1831 SuperHCPU *cpu = sh_env_get_cpu(env);
1832 CPUState *cs = CPU(cpu);
1833 DisasContext ctx;
1834 target_ulong pc_start;
1835 int num_insns;
1836 int max_insns;
1837
1838 pc_start = tb->pc;
1839 ctx.pc = pc_start;
1840 ctx.flags = (uint32_t)tb->flags;
1841 ctx.bstate = BS_NONE;
1842 ctx.memidx = (ctx.flags & (1u << SR_MD)) == 0 ? 1 : 0;
1843 /* We don't know if the delayed pc came from a dynamic or static branch,
1844 so assume it is a dynamic branch. */
1845 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1846 ctx.tb = tb;
1847 ctx.singlestep_enabled = cs->singlestep_enabled;
1848 ctx.features = env->features;
1849 ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
1850
1851 num_insns = 0;
1852 max_insns = tb->cflags & CF_COUNT_MASK;
1853 if (max_insns == 0) {
1854 max_insns = CF_COUNT_MASK;
1855 }
1856 if (max_insns > TCG_MAX_INSNS) {
1857 max_insns = TCG_MAX_INSNS;
1858 }
1859
1860 gen_tb_start(tb);
1861 while (ctx.bstate == BS_NONE && !tcg_op_buf_full()) {
1862 tcg_gen_insn_start(ctx.pc, ctx.flags);
1863 num_insns++;
1864
1865 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
1866 /* We have hit a breakpoint - make sure PC is up-to-date */
1867 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1868 gen_helper_debug(cpu_env);
1869 ctx.bstate = BS_BRANCH;
1870 /* The address covered by the breakpoint must be included in
1871 [tb->pc, tb->pc + tb->size) in order to for it to be
1872 properly cleared -- thus we increment the PC here so that
1873 the logic setting tb->size below does the right thing. */
1874 ctx.pc += 2;
1875 break;
1876 }
1877
1878 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1879 gen_io_start();
1880 }
1881
1882 ctx.opcode = cpu_lduw_code(env, ctx.pc);
1883 decode_opc(&ctx);
1884 ctx.pc += 2;
1885 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1886 break;
1887 if (cs->singlestep_enabled) {
1888 break;
1889 }
1890 if (num_insns >= max_insns)
1891 break;
1892 if (singlestep)
1893 break;
1894 }
1895 if (tb->cflags & CF_LAST_IO)
1896 gen_io_end();
1897 if (cs->singlestep_enabled) {
1898 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1899 gen_helper_debug(cpu_env);
1900 } else {
1901 switch (ctx.bstate) {
1902 case BS_STOP:
1903 /* gen_op_interrupt_restart(); */
1904 /* fall through */
1905 case BS_NONE:
1906 if (ctx.flags) {
1907 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1908 }
1909 gen_goto_tb(&ctx, 0, ctx.pc);
1910 break;
1911 case BS_EXCP:
1912 /* gen_op_interrupt_restart(); */
1913 tcg_gen_exit_tb(0);
1914 break;
1915 case BS_BRANCH:
1916 default:
1917 break;
1918 }
1919 }
1920
1921 gen_tb_end(tb, num_insns);
1922
1923 tb->size = ctx.pc - pc_start;
1924 tb->icount = num_insns;
1925
1926 #ifdef DEBUG_DISAS
1927 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1928 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1929 log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
1930 qemu_log("\n");
1931 }
1932 #endif
1933 }
1934
1935 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
1936 target_ulong *data)
1937 {
1938 env->pc = data[0];
1939 env->flags = data[1];
1940 }