docs/qdev-device-use: Don't suggest -drive and -net can do USB
[qemu.git] / target / m68k / translate.c
1 /*
2 * m68k translation
3 *
4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "qemu/log.h"
27 #include "qemu/qemu-print.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/translator.h"
30
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33
34 #include "trace-tcg.h"
35 #include "exec/log.h"
36 #include "fpu/softfloat.h"
37
38
39 //#define DEBUG_DISPATCH 1
40
41 #define DEFO32(name, offset) static TCGv QREG_##name;
42 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
43 #include "qregs.def"
44 #undef DEFO32
45 #undef DEFO64
46
47 static TCGv_i32 cpu_halted;
48 static TCGv_i32 cpu_exception_index;
49
50 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
51 static TCGv cpu_dregs[8];
52 static TCGv cpu_aregs[8];
53 static TCGv_i64 cpu_macc[4];
54
55 #define REG(insn, pos) (((insn) >> (pos)) & 7)
56 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
57 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
58 #define MACREG(acc) cpu_macc[acc]
59 #define QREG_SP get_areg(s, 7)
60
61 static TCGv NULL_QREG;
62 #define IS_NULL_QREG(t) (t == NULL_QREG)
63 /* Used to distinguish stores from bad addressing modes. */
64 static TCGv store_dummy;
65
66 #include "exec/gen-icount.h"
67
68 void m68k_tcg_init(void)
69 {
70 char *p;
71 int i;
72
73 #define DEFO32(name, offset) \
74 QREG_##name = tcg_global_mem_new_i32(cpu_env, \
75 offsetof(CPUM68KState, offset), #name);
76 #define DEFO64(name, offset) \
77 QREG_##name = tcg_global_mem_new_i64(cpu_env, \
78 offsetof(CPUM68KState, offset), #name);
79 #include "qregs.def"
80 #undef DEFO32
81 #undef DEFO64
82
83 cpu_halted = tcg_global_mem_new_i32(cpu_env,
84 -offsetof(M68kCPU, env) +
85 offsetof(CPUState, halted), "HALTED");
86 cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
87 -offsetof(M68kCPU, env) +
88 offsetof(CPUState, exception_index),
89 "EXCEPTION");
90
91 p = cpu_reg_names;
92 for (i = 0; i < 8; i++) {
93 sprintf(p, "D%d", i);
94 cpu_dregs[i] = tcg_global_mem_new(cpu_env,
95 offsetof(CPUM68KState, dregs[i]), p);
96 p += 3;
97 sprintf(p, "A%d", i);
98 cpu_aregs[i] = tcg_global_mem_new(cpu_env,
99 offsetof(CPUM68KState, aregs[i]), p);
100 p += 3;
101 }
102 for (i = 0; i < 4; i++) {
103 sprintf(p, "ACC%d", i);
104 cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
105 offsetof(CPUM68KState, macc[i]), p);
106 p += 5;
107 }
108
109 NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
110 store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
111 }
112
113 /* internal defines */
114 typedef struct DisasContext {
115 DisasContextBase base;
116 CPUM68KState *env;
117 target_ulong pc;
118 CCOp cc_op; /* Current CC operation */
119 int cc_op_synced;
120 TCGv_i64 mactmp;
121 int done_mac;
122 int writeback_mask;
123 TCGv writeback[8];
124 #define MAX_TO_RELEASE 8
125 int release_count;
126 TCGv release[MAX_TO_RELEASE];
127 } DisasContext;
128
129 static void init_release_array(DisasContext *s)
130 {
131 #ifdef CONFIG_DEBUG_TCG
132 memset(s->release, 0, sizeof(s->release));
133 #endif
134 s->release_count = 0;
135 }
136
137 static void do_release(DisasContext *s)
138 {
139 int i;
140 for (i = 0; i < s->release_count; i++) {
141 tcg_temp_free(s->release[i]);
142 }
143 init_release_array(s);
144 }
145
146 static TCGv mark_to_release(DisasContext *s, TCGv tmp)
147 {
148 g_assert(s->release_count < MAX_TO_RELEASE);
149 return s->release[s->release_count++] = tmp;
150 }
151
152 static TCGv get_areg(DisasContext *s, unsigned regno)
153 {
154 if (s->writeback_mask & (1 << regno)) {
155 return s->writeback[regno];
156 } else {
157 return cpu_aregs[regno];
158 }
159 }
160
161 static void delay_set_areg(DisasContext *s, unsigned regno,
162 TCGv val, bool give_temp)
163 {
164 if (s->writeback_mask & (1 << regno)) {
165 if (give_temp) {
166 tcg_temp_free(s->writeback[regno]);
167 s->writeback[regno] = val;
168 } else {
169 tcg_gen_mov_i32(s->writeback[regno], val);
170 }
171 } else {
172 s->writeback_mask |= 1 << regno;
173 if (give_temp) {
174 s->writeback[regno] = val;
175 } else {
176 TCGv tmp = tcg_temp_new();
177 s->writeback[regno] = tmp;
178 tcg_gen_mov_i32(tmp, val);
179 }
180 }
181 }
182
183 static void do_writebacks(DisasContext *s)
184 {
185 unsigned mask = s->writeback_mask;
186 if (mask) {
187 s->writeback_mask = 0;
188 do {
189 unsigned regno = ctz32(mask);
190 tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
191 tcg_temp_free(s->writeback[regno]);
192 mask &= mask - 1;
193 } while (mask);
194 }
195 }
196
197 /* is_jmp field values */
198 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
199 #define DISAS_EXIT DISAS_TARGET_1 /* cpu state was modified dynamically */
200
201 #if defined(CONFIG_USER_ONLY)
202 #define IS_USER(s) 1
203 #else
204 #define IS_USER(s) (!(s->base.tb->flags & TB_FLAGS_MSR_S))
205 #define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
206 MMU_KERNEL_IDX : MMU_USER_IDX)
207 #define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
208 MMU_KERNEL_IDX : MMU_USER_IDX)
209 #endif
210
211 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
212
213 #ifdef DEBUG_DISPATCH
214 #define DISAS_INSN(name) \
215 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
216 uint16_t insn); \
217 static void disas_##name(CPUM68KState *env, DisasContext *s, \
218 uint16_t insn) \
219 { \
220 qemu_log("Dispatch " #name "\n"); \
221 real_disas_##name(env, s, insn); \
222 } \
223 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
224 uint16_t insn)
225 #else
226 #define DISAS_INSN(name) \
227 static void disas_##name(CPUM68KState *env, DisasContext *s, \
228 uint16_t insn)
229 #endif
230
231 static const uint8_t cc_op_live[CC_OP_NB] = {
232 [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
233 [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
234 [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
235 [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
236 [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
237 [CC_OP_LOGIC] = CCF_X | CCF_N
238 };
239
240 static void set_cc_op(DisasContext *s, CCOp op)
241 {
242 CCOp old_op = s->cc_op;
243 int dead;
244
245 if (old_op == op) {
246 return;
247 }
248 s->cc_op = op;
249 s->cc_op_synced = 0;
250
251 /*
252 * Discard CC computation that will no longer be used.
253 * Note that X and N are never dead.
254 */
255 dead = cc_op_live[old_op] & ~cc_op_live[op];
256 if (dead & CCF_C) {
257 tcg_gen_discard_i32(QREG_CC_C);
258 }
259 if (dead & CCF_Z) {
260 tcg_gen_discard_i32(QREG_CC_Z);
261 }
262 if (dead & CCF_V) {
263 tcg_gen_discard_i32(QREG_CC_V);
264 }
265 }
266
267 /* Update the CPU env CC_OP state. */
268 static void update_cc_op(DisasContext *s)
269 {
270 if (!s->cc_op_synced) {
271 s->cc_op_synced = 1;
272 tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
273 }
274 }
275
276 /* Generate a jump to an immediate address. */
277 static void gen_jmp_im(DisasContext *s, uint32_t dest)
278 {
279 update_cc_op(s);
280 tcg_gen_movi_i32(QREG_PC, dest);
281 s->base.is_jmp = DISAS_JUMP;
282 }
283
284 /* Generate a jump to the address in qreg DEST. */
285 static void gen_jmp(DisasContext *s, TCGv dest)
286 {
287 update_cc_op(s);
288 tcg_gen_mov_i32(QREG_PC, dest);
289 s->base.is_jmp = DISAS_JUMP;
290 }
291
292 static void gen_raise_exception(int nr)
293 {
294 TCGv_i32 tmp;
295
296 tmp = tcg_const_i32(nr);
297 gen_helper_raise_exception(cpu_env, tmp);
298 tcg_temp_free_i32(tmp);
299 }
300
301 static void gen_exception(DisasContext *s, uint32_t dest, int nr)
302 {
303 update_cc_op(s);
304 tcg_gen_movi_i32(QREG_PC, dest);
305
306 gen_raise_exception(nr);
307
308 s->base.is_jmp = DISAS_NORETURN;
309 }
310
311 static inline void gen_addr_fault(DisasContext *s)
312 {
313 gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
314 }
315
316 /*
317 * Generate a load from the specified address. Narrow values are
318 * sign extended to full register width.
319 */
320 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
321 int sign, int index)
322 {
323 TCGv tmp;
324 tmp = tcg_temp_new_i32();
325 switch(opsize) {
326 case OS_BYTE:
327 if (sign)
328 tcg_gen_qemu_ld8s(tmp, addr, index);
329 else
330 tcg_gen_qemu_ld8u(tmp, addr, index);
331 break;
332 case OS_WORD:
333 if (sign)
334 tcg_gen_qemu_ld16s(tmp, addr, index);
335 else
336 tcg_gen_qemu_ld16u(tmp, addr, index);
337 break;
338 case OS_LONG:
339 tcg_gen_qemu_ld32u(tmp, addr, index);
340 break;
341 default:
342 g_assert_not_reached();
343 }
344 return tmp;
345 }
346
347 /* Generate a store. */
348 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
349 int index)
350 {
351 switch(opsize) {
352 case OS_BYTE:
353 tcg_gen_qemu_st8(val, addr, index);
354 break;
355 case OS_WORD:
356 tcg_gen_qemu_st16(val, addr, index);
357 break;
358 case OS_LONG:
359 tcg_gen_qemu_st32(val, addr, index);
360 break;
361 default:
362 g_assert_not_reached();
363 }
364 }
365
366 typedef enum {
367 EA_STORE,
368 EA_LOADU,
369 EA_LOADS
370 } ea_what;
371
372 /*
373 * Generate an unsigned load if VAL is 0 a signed load if val is -1,
374 * otherwise generate a store.
375 */
376 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
377 ea_what what, int index)
378 {
379 if (what == EA_STORE) {
380 gen_store(s, opsize, addr, val, index);
381 return store_dummy;
382 } else {
383 return mark_to_release(s, gen_load(s, opsize, addr,
384 what == EA_LOADS, index));
385 }
386 }
387
388 /* Read a 16-bit immediate constant */
389 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
390 {
391 uint16_t im;
392 im = translator_lduw(env, s->pc);
393 s->pc += 2;
394 return im;
395 }
396
397 /* Read an 8-bit immediate constant */
398 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
399 {
400 return read_im16(env, s);
401 }
402
403 /* Read a 32-bit immediate constant. */
404 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
405 {
406 uint32_t im;
407 im = read_im16(env, s) << 16;
408 im |= 0xffff & read_im16(env, s);
409 return im;
410 }
411
412 /* Read a 64-bit immediate constant. */
413 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
414 {
415 uint64_t im;
416 im = (uint64_t)read_im32(env, s) << 32;
417 im |= (uint64_t)read_im32(env, s);
418 return im;
419 }
420
421 /* Calculate and address index. */
422 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
423 {
424 TCGv add;
425 int scale;
426
427 add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
428 if ((ext & 0x800) == 0) {
429 tcg_gen_ext16s_i32(tmp, add);
430 add = tmp;
431 }
432 scale = (ext >> 9) & 3;
433 if (scale != 0) {
434 tcg_gen_shli_i32(tmp, add, scale);
435 add = tmp;
436 }
437 return add;
438 }
439
440 /*
441 * Handle a base + index + displacement effective addresss.
442 * A NULL_QREG base means pc-relative.
443 */
444 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
445 {
446 uint32_t offset;
447 uint16_t ext;
448 TCGv add;
449 TCGv tmp;
450 uint32_t bd, od;
451
452 offset = s->pc;
453 ext = read_im16(env, s);
454
455 if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
456 return NULL_QREG;
457
458 if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
459 !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
460 ext &= ~(3 << 9);
461 }
462
463 if (ext & 0x100) {
464 /* full extension word format */
465 if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
466 return NULL_QREG;
467
468 if ((ext & 0x30) > 0x10) {
469 /* base displacement */
470 if ((ext & 0x30) == 0x20) {
471 bd = (int16_t)read_im16(env, s);
472 } else {
473 bd = read_im32(env, s);
474 }
475 } else {
476 bd = 0;
477 }
478 tmp = mark_to_release(s, tcg_temp_new());
479 if ((ext & 0x44) == 0) {
480 /* pre-index */
481 add = gen_addr_index(s, ext, tmp);
482 } else {
483 add = NULL_QREG;
484 }
485 if ((ext & 0x80) == 0) {
486 /* base not suppressed */
487 if (IS_NULL_QREG(base)) {
488 base = mark_to_release(s, tcg_const_i32(offset + bd));
489 bd = 0;
490 }
491 if (!IS_NULL_QREG(add)) {
492 tcg_gen_add_i32(tmp, add, base);
493 add = tmp;
494 } else {
495 add = base;
496 }
497 }
498 if (!IS_NULL_QREG(add)) {
499 if (bd != 0) {
500 tcg_gen_addi_i32(tmp, add, bd);
501 add = tmp;
502 }
503 } else {
504 add = mark_to_release(s, tcg_const_i32(bd));
505 }
506 if ((ext & 3) != 0) {
507 /* memory indirect */
508 base = mark_to_release(s, gen_load(s, OS_LONG, add, 0, IS_USER(s)));
509 if ((ext & 0x44) == 4) {
510 add = gen_addr_index(s, ext, tmp);
511 tcg_gen_add_i32(tmp, add, base);
512 add = tmp;
513 } else {
514 add = base;
515 }
516 if ((ext & 3) > 1) {
517 /* outer displacement */
518 if ((ext & 3) == 2) {
519 od = (int16_t)read_im16(env, s);
520 } else {
521 od = read_im32(env, s);
522 }
523 } else {
524 od = 0;
525 }
526 if (od != 0) {
527 tcg_gen_addi_i32(tmp, add, od);
528 add = tmp;
529 }
530 }
531 } else {
532 /* brief extension word format */
533 tmp = mark_to_release(s, tcg_temp_new());
534 add = gen_addr_index(s, ext, tmp);
535 if (!IS_NULL_QREG(base)) {
536 tcg_gen_add_i32(tmp, add, base);
537 if ((int8_t)ext)
538 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
539 } else {
540 tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
541 }
542 add = tmp;
543 }
544 return add;
545 }
546
547 /* Sign or zero extend a value. */
548
549 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
550 {
551 switch (opsize) {
552 case OS_BYTE:
553 if (sign) {
554 tcg_gen_ext8s_i32(res, val);
555 } else {
556 tcg_gen_ext8u_i32(res, val);
557 }
558 break;
559 case OS_WORD:
560 if (sign) {
561 tcg_gen_ext16s_i32(res, val);
562 } else {
563 tcg_gen_ext16u_i32(res, val);
564 }
565 break;
566 case OS_LONG:
567 tcg_gen_mov_i32(res, val);
568 break;
569 default:
570 g_assert_not_reached();
571 }
572 }
573
574 /* Evaluate all the CC flags. */
575
576 static void gen_flush_flags(DisasContext *s)
577 {
578 TCGv t0, t1;
579
580 switch (s->cc_op) {
581 case CC_OP_FLAGS:
582 return;
583
584 case CC_OP_ADDB:
585 case CC_OP_ADDW:
586 case CC_OP_ADDL:
587 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
588 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
589 /* Compute signed overflow for addition. */
590 t0 = tcg_temp_new();
591 t1 = tcg_temp_new();
592 tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
593 gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
594 tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
595 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
596 tcg_temp_free(t0);
597 tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
598 tcg_temp_free(t1);
599 break;
600
601 case CC_OP_SUBB:
602 case CC_OP_SUBW:
603 case CC_OP_SUBL:
604 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
605 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
606 /* Compute signed overflow for subtraction. */
607 t0 = tcg_temp_new();
608 t1 = tcg_temp_new();
609 tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
610 gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
611 tcg_gen_xor_i32(t1, QREG_CC_N, t0);
612 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
613 tcg_temp_free(t0);
614 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
615 tcg_temp_free(t1);
616 break;
617
618 case CC_OP_CMPB:
619 case CC_OP_CMPW:
620 case CC_OP_CMPL:
621 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
622 tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
623 gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
624 /* Compute signed overflow for subtraction. */
625 t0 = tcg_temp_new();
626 tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
627 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
628 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
629 tcg_temp_free(t0);
630 tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
631 break;
632
633 case CC_OP_LOGIC:
634 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
635 tcg_gen_movi_i32(QREG_CC_C, 0);
636 tcg_gen_movi_i32(QREG_CC_V, 0);
637 break;
638
639 case CC_OP_DYNAMIC:
640 gen_helper_flush_flags(cpu_env, QREG_CC_OP);
641 s->cc_op_synced = 1;
642 break;
643
644 default:
645 t0 = tcg_const_i32(s->cc_op);
646 gen_helper_flush_flags(cpu_env, t0);
647 tcg_temp_free(t0);
648 s->cc_op_synced = 1;
649 break;
650 }
651
652 /* Note that flush_flags also assigned to env->cc_op. */
653 s->cc_op = CC_OP_FLAGS;
654 }
655
656 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
657 {
658 TCGv tmp;
659
660 if (opsize == OS_LONG) {
661 tmp = val;
662 } else {
663 tmp = mark_to_release(s, tcg_temp_new());
664 gen_ext(tmp, val, opsize, sign);
665 }
666
667 return tmp;
668 }
669
670 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
671 {
672 gen_ext(QREG_CC_N, val, opsize, 1);
673 set_cc_op(s, CC_OP_LOGIC);
674 }
675
676 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
677 {
678 tcg_gen_mov_i32(QREG_CC_N, dest);
679 tcg_gen_mov_i32(QREG_CC_V, src);
680 set_cc_op(s, CC_OP_CMPB + opsize);
681 }
682
683 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
684 {
685 gen_ext(QREG_CC_N, dest, opsize, 1);
686 tcg_gen_mov_i32(QREG_CC_V, src);
687 }
688
689 static inline int opsize_bytes(int opsize)
690 {
691 switch (opsize) {
692 case OS_BYTE: return 1;
693 case OS_WORD: return 2;
694 case OS_LONG: return 4;
695 case OS_SINGLE: return 4;
696 case OS_DOUBLE: return 8;
697 case OS_EXTENDED: return 12;
698 case OS_PACKED: return 12;
699 default:
700 g_assert_not_reached();
701 }
702 }
703
704 static inline int insn_opsize(int insn)
705 {
706 switch ((insn >> 6) & 3) {
707 case 0: return OS_BYTE;
708 case 1: return OS_WORD;
709 case 2: return OS_LONG;
710 default:
711 g_assert_not_reached();
712 }
713 }
714
715 static inline int ext_opsize(int ext, int pos)
716 {
717 switch ((ext >> pos) & 7) {
718 case 0: return OS_LONG;
719 case 1: return OS_SINGLE;
720 case 2: return OS_EXTENDED;
721 case 3: return OS_PACKED;
722 case 4: return OS_WORD;
723 case 5: return OS_DOUBLE;
724 case 6: return OS_BYTE;
725 default:
726 g_assert_not_reached();
727 }
728 }
729
730 /*
731 * Assign value to a register. If the width is less than the register width
732 * only the low part of the register is set.
733 */
734 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
735 {
736 TCGv tmp;
737 switch (opsize) {
738 case OS_BYTE:
739 tcg_gen_andi_i32(reg, reg, 0xffffff00);
740 tmp = tcg_temp_new();
741 tcg_gen_ext8u_i32(tmp, val);
742 tcg_gen_or_i32(reg, reg, tmp);
743 tcg_temp_free(tmp);
744 break;
745 case OS_WORD:
746 tcg_gen_andi_i32(reg, reg, 0xffff0000);
747 tmp = tcg_temp_new();
748 tcg_gen_ext16u_i32(tmp, val);
749 tcg_gen_or_i32(reg, reg, tmp);
750 tcg_temp_free(tmp);
751 break;
752 case OS_LONG:
753 case OS_SINGLE:
754 tcg_gen_mov_i32(reg, val);
755 break;
756 default:
757 g_assert_not_reached();
758 }
759 }
760
761 /*
762 * Generate code for an "effective address". Does not adjust the base
763 * register for autoincrement addressing modes.
764 */
765 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
766 int mode, int reg0, int opsize)
767 {
768 TCGv reg;
769 TCGv tmp;
770 uint16_t ext;
771 uint32_t offset;
772
773 switch (mode) {
774 case 0: /* Data register direct. */
775 case 1: /* Address register direct. */
776 return NULL_QREG;
777 case 3: /* Indirect postincrement. */
778 if (opsize == OS_UNSIZED) {
779 return NULL_QREG;
780 }
781 /* fallthru */
782 case 2: /* Indirect register */
783 return get_areg(s, reg0);
784 case 4: /* Indirect predecrememnt. */
785 if (opsize == OS_UNSIZED) {
786 return NULL_QREG;
787 }
788 reg = get_areg(s, reg0);
789 tmp = mark_to_release(s, tcg_temp_new());
790 if (reg0 == 7 && opsize == OS_BYTE &&
791 m68k_feature(s->env, M68K_FEATURE_M68000)) {
792 tcg_gen_subi_i32(tmp, reg, 2);
793 } else {
794 tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
795 }
796 return tmp;
797 case 5: /* Indirect displacement. */
798 reg = get_areg(s, reg0);
799 tmp = mark_to_release(s, tcg_temp_new());
800 ext = read_im16(env, s);
801 tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
802 return tmp;
803 case 6: /* Indirect index + displacement. */
804 reg = get_areg(s, reg0);
805 return gen_lea_indexed(env, s, reg);
806 case 7: /* Other */
807 switch (reg0) {
808 case 0: /* Absolute short. */
809 offset = (int16_t)read_im16(env, s);
810 return mark_to_release(s, tcg_const_i32(offset));
811 case 1: /* Absolute long. */
812 offset = read_im32(env, s);
813 return mark_to_release(s, tcg_const_i32(offset));
814 case 2: /* pc displacement */
815 offset = s->pc;
816 offset += (int16_t)read_im16(env, s);
817 return mark_to_release(s, tcg_const_i32(offset));
818 case 3: /* pc index+displacement. */
819 return gen_lea_indexed(env, s, NULL_QREG);
820 case 4: /* Immediate. */
821 default:
822 return NULL_QREG;
823 }
824 }
825 /* Should never happen. */
826 return NULL_QREG;
827 }
828
829 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
830 int opsize)
831 {
832 int mode = extract32(insn, 3, 3);
833 int reg0 = REG(insn, 0);
834 return gen_lea_mode(env, s, mode, reg0, opsize);
835 }
836
837 /*
838 * Generate code to load/store a value from/into an EA. If WHAT > 0 this is
839 * a write otherwise it is a read (0 == sign extend, -1 == zero extend).
840 * ADDRP is non-null for readwrite operands.
841 */
842 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
843 int opsize, TCGv val, TCGv *addrp, ea_what what,
844 int index)
845 {
846 TCGv reg, tmp, result;
847 int32_t offset;
848
849 switch (mode) {
850 case 0: /* Data register direct. */
851 reg = cpu_dregs[reg0];
852 if (what == EA_STORE) {
853 gen_partset_reg(opsize, reg, val);
854 return store_dummy;
855 } else {
856 return gen_extend(s, reg, opsize, what == EA_LOADS);
857 }
858 case 1: /* Address register direct. */
859 reg = get_areg(s, reg0);
860 if (what == EA_STORE) {
861 tcg_gen_mov_i32(reg, val);
862 return store_dummy;
863 } else {
864 return gen_extend(s, reg, opsize, what == EA_LOADS);
865 }
866 case 2: /* Indirect register */
867 reg = get_areg(s, reg0);
868 return gen_ldst(s, opsize, reg, val, what, index);
869 case 3: /* Indirect postincrement. */
870 reg = get_areg(s, reg0);
871 result = gen_ldst(s, opsize, reg, val, what, index);
872 if (what == EA_STORE || !addrp) {
873 TCGv tmp = tcg_temp_new();
874 if (reg0 == 7 && opsize == OS_BYTE &&
875 m68k_feature(s->env, M68K_FEATURE_M68000)) {
876 tcg_gen_addi_i32(tmp, reg, 2);
877 } else {
878 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
879 }
880 delay_set_areg(s, reg0, tmp, true);
881 }
882 return result;
883 case 4: /* Indirect predecrememnt. */
884 if (addrp && what == EA_STORE) {
885 tmp = *addrp;
886 } else {
887 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
888 if (IS_NULL_QREG(tmp)) {
889 return tmp;
890 }
891 if (addrp) {
892 *addrp = tmp;
893 }
894 }
895 result = gen_ldst(s, opsize, tmp, val, what, index);
896 if (what == EA_STORE || !addrp) {
897 delay_set_areg(s, reg0, tmp, false);
898 }
899 return result;
900 case 5: /* Indirect displacement. */
901 case 6: /* Indirect index + displacement. */
902 do_indirect:
903 if (addrp && what == EA_STORE) {
904 tmp = *addrp;
905 } else {
906 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
907 if (IS_NULL_QREG(tmp)) {
908 return tmp;
909 }
910 if (addrp) {
911 *addrp = tmp;
912 }
913 }
914 return gen_ldst(s, opsize, tmp, val, what, index);
915 case 7: /* Other */
916 switch (reg0) {
917 case 0: /* Absolute short. */
918 case 1: /* Absolute long. */
919 case 2: /* pc displacement */
920 case 3: /* pc index+displacement. */
921 goto do_indirect;
922 case 4: /* Immediate. */
923 /* Sign extend values for consistency. */
924 switch (opsize) {
925 case OS_BYTE:
926 if (what == EA_LOADS) {
927 offset = (int8_t)read_im8(env, s);
928 } else {
929 offset = read_im8(env, s);
930 }
931 break;
932 case OS_WORD:
933 if (what == EA_LOADS) {
934 offset = (int16_t)read_im16(env, s);
935 } else {
936 offset = read_im16(env, s);
937 }
938 break;
939 case OS_LONG:
940 offset = read_im32(env, s);
941 break;
942 default:
943 g_assert_not_reached();
944 }
945 return mark_to_release(s, tcg_const_i32(offset));
946 default:
947 return NULL_QREG;
948 }
949 }
950 /* Should never happen. */
951 return NULL_QREG;
952 }
953
954 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
955 int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
956 {
957 int mode = extract32(insn, 3, 3);
958 int reg0 = REG(insn, 0);
959 return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
960 }
961
962 static TCGv_ptr gen_fp_ptr(int freg)
963 {
964 TCGv_ptr fp = tcg_temp_new_ptr();
965 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
966 return fp;
967 }
968
969 static TCGv_ptr gen_fp_result_ptr(void)
970 {
971 TCGv_ptr fp = tcg_temp_new_ptr();
972 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
973 return fp;
974 }
975
976 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
977 {
978 TCGv t32;
979 TCGv_i64 t64;
980
981 t32 = tcg_temp_new();
982 tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
983 tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
984 tcg_temp_free(t32);
985
986 t64 = tcg_temp_new_i64();
987 tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
988 tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
989 tcg_temp_free_i64(t64);
990 }
991
992 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
993 int index)
994 {
995 TCGv tmp;
996 TCGv_i64 t64;
997
998 t64 = tcg_temp_new_i64();
999 tmp = tcg_temp_new();
1000 switch (opsize) {
1001 case OS_BYTE:
1002 tcg_gen_qemu_ld8s(tmp, addr, index);
1003 gen_helper_exts32(cpu_env, fp, tmp);
1004 break;
1005 case OS_WORD:
1006 tcg_gen_qemu_ld16s(tmp, addr, index);
1007 gen_helper_exts32(cpu_env, fp, tmp);
1008 break;
1009 case OS_LONG:
1010 tcg_gen_qemu_ld32u(tmp, addr, index);
1011 gen_helper_exts32(cpu_env, fp, tmp);
1012 break;
1013 case OS_SINGLE:
1014 tcg_gen_qemu_ld32u(tmp, addr, index);
1015 gen_helper_extf32(cpu_env, fp, tmp);
1016 break;
1017 case OS_DOUBLE:
1018 tcg_gen_qemu_ld64(t64, addr, index);
1019 gen_helper_extf64(cpu_env, fp, t64);
1020 break;
1021 case OS_EXTENDED:
1022 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1023 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1024 break;
1025 }
1026 tcg_gen_qemu_ld32u(tmp, addr, index);
1027 tcg_gen_shri_i32(tmp, tmp, 16);
1028 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1029 tcg_gen_addi_i32(tmp, addr, 4);
1030 tcg_gen_qemu_ld64(t64, tmp, index);
1031 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1032 break;
1033 case OS_PACKED:
1034 /*
1035 * unimplemented data type on 68040/ColdFire
1036 * FIXME if needed for another FPU
1037 */
1038 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1039 break;
1040 default:
1041 g_assert_not_reached();
1042 }
1043 tcg_temp_free(tmp);
1044 tcg_temp_free_i64(t64);
1045 }
1046
1047 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
1048 int index)
1049 {
1050 TCGv tmp;
1051 TCGv_i64 t64;
1052
1053 t64 = tcg_temp_new_i64();
1054 tmp = tcg_temp_new();
1055 switch (opsize) {
1056 case OS_BYTE:
1057 gen_helper_reds32(tmp, cpu_env, fp);
1058 tcg_gen_qemu_st8(tmp, addr, index);
1059 break;
1060 case OS_WORD:
1061 gen_helper_reds32(tmp, cpu_env, fp);
1062 tcg_gen_qemu_st16(tmp, addr, index);
1063 break;
1064 case OS_LONG:
1065 gen_helper_reds32(tmp, cpu_env, fp);
1066 tcg_gen_qemu_st32(tmp, addr, index);
1067 break;
1068 case OS_SINGLE:
1069 gen_helper_redf32(tmp, cpu_env, fp);
1070 tcg_gen_qemu_st32(tmp, addr, index);
1071 break;
1072 case OS_DOUBLE:
1073 gen_helper_redf64(t64, cpu_env, fp);
1074 tcg_gen_qemu_st64(t64, addr, index);
1075 break;
1076 case OS_EXTENDED:
1077 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1078 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1079 break;
1080 }
1081 tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1082 tcg_gen_shli_i32(tmp, tmp, 16);
1083 tcg_gen_qemu_st32(tmp, addr, index);
1084 tcg_gen_addi_i32(tmp, addr, 4);
1085 tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1086 tcg_gen_qemu_st64(t64, tmp, index);
1087 break;
1088 case OS_PACKED:
1089 /*
1090 * unimplemented data type on 68040/ColdFire
1091 * FIXME if needed for another FPU
1092 */
1093 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1094 break;
1095 default:
1096 g_assert_not_reached();
1097 }
1098 tcg_temp_free(tmp);
1099 tcg_temp_free_i64(t64);
1100 }
1101
1102 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1103 TCGv_ptr fp, ea_what what, int index)
1104 {
1105 if (what == EA_STORE) {
1106 gen_store_fp(s, opsize, addr, fp, index);
1107 } else {
1108 gen_load_fp(s, opsize, addr, fp, index);
1109 }
1110 }
1111
1112 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1113 int reg0, int opsize, TCGv_ptr fp, ea_what what,
1114 int index)
1115 {
1116 TCGv reg, addr, tmp;
1117 TCGv_i64 t64;
1118
1119 switch (mode) {
1120 case 0: /* Data register direct. */
1121 reg = cpu_dregs[reg0];
1122 if (what == EA_STORE) {
1123 switch (opsize) {
1124 case OS_BYTE:
1125 case OS_WORD:
1126 case OS_LONG:
1127 gen_helper_reds32(reg, cpu_env, fp);
1128 break;
1129 case OS_SINGLE:
1130 gen_helper_redf32(reg, cpu_env, fp);
1131 break;
1132 default:
1133 g_assert_not_reached();
1134 }
1135 } else {
1136 tmp = tcg_temp_new();
1137 switch (opsize) {
1138 case OS_BYTE:
1139 tcg_gen_ext8s_i32(tmp, reg);
1140 gen_helper_exts32(cpu_env, fp, tmp);
1141 break;
1142 case OS_WORD:
1143 tcg_gen_ext16s_i32(tmp, reg);
1144 gen_helper_exts32(cpu_env, fp, tmp);
1145 break;
1146 case OS_LONG:
1147 gen_helper_exts32(cpu_env, fp, reg);
1148 break;
1149 case OS_SINGLE:
1150 gen_helper_extf32(cpu_env, fp, reg);
1151 break;
1152 default:
1153 g_assert_not_reached();
1154 }
1155 tcg_temp_free(tmp);
1156 }
1157 return 0;
1158 case 1: /* Address register direct. */
1159 return -1;
1160 case 2: /* Indirect register */
1161 addr = get_areg(s, reg0);
1162 gen_ldst_fp(s, opsize, addr, fp, what, index);
1163 return 0;
1164 case 3: /* Indirect postincrement. */
1165 addr = cpu_aregs[reg0];
1166 gen_ldst_fp(s, opsize, addr, fp, what, index);
1167 tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1168 return 0;
1169 case 4: /* Indirect predecrememnt. */
1170 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1171 if (IS_NULL_QREG(addr)) {
1172 return -1;
1173 }
1174 gen_ldst_fp(s, opsize, addr, fp, what, index);
1175 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1176 return 0;
1177 case 5: /* Indirect displacement. */
1178 case 6: /* Indirect index + displacement. */
1179 do_indirect:
1180 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1181 if (IS_NULL_QREG(addr)) {
1182 return -1;
1183 }
1184 gen_ldst_fp(s, opsize, addr, fp, what, index);
1185 return 0;
1186 case 7: /* Other */
1187 switch (reg0) {
1188 case 0: /* Absolute short. */
1189 case 1: /* Absolute long. */
1190 case 2: /* pc displacement */
1191 case 3: /* pc index+displacement. */
1192 goto do_indirect;
1193 case 4: /* Immediate. */
1194 if (what == EA_STORE) {
1195 return -1;
1196 }
1197 switch (opsize) {
1198 case OS_BYTE:
1199 tmp = tcg_const_i32((int8_t)read_im8(env, s));
1200 gen_helper_exts32(cpu_env, fp, tmp);
1201 tcg_temp_free(tmp);
1202 break;
1203 case OS_WORD:
1204 tmp = tcg_const_i32((int16_t)read_im16(env, s));
1205 gen_helper_exts32(cpu_env, fp, tmp);
1206 tcg_temp_free(tmp);
1207 break;
1208 case OS_LONG:
1209 tmp = tcg_const_i32(read_im32(env, s));
1210 gen_helper_exts32(cpu_env, fp, tmp);
1211 tcg_temp_free(tmp);
1212 break;
1213 case OS_SINGLE:
1214 tmp = tcg_const_i32(read_im32(env, s));
1215 gen_helper_extf32(cpu_env, fp, tmp);
1216 tcg_temp_free(tmp);
1217 break;
1218 case OS_DOUBLE:
1219 t64 = tcg_const_i64(read_im64(env, s));
1220 gen_helper_extf64(cpu_env, fp, t64);
1221 tcg_temp_free_i64(t64);
1222 break;
1223 case OS_EXTENDED:
1224 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1225 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1226 break;
1227 }
1228 tmp = tcg_const_i32(read_im32(env, s) >> 16);
1229 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1230 tcg_temp_free(tmp);
1231 t64 = tcg_const_i64(read_im64(env, s));
1232 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1233 tcg_temp_free_i64(t64);
1234 break;
1235 case OS_PACKED:
1236 /*
1237 * unimplemented data type on 68040/ColdFire
1238 * FIXME if needed for another FPU
1239 */
1240 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1241 break;
1242 default:
1243 g_assert_not_reached();
1244 }
1245 return 0;
1246 default:
1247 return -1;
1248 }
1249 }
1250 return -1;
1251 }
1252
1253 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1254 int opsize, TCGv_ptr fp, ea_what what, int index)
1255 {
1256 int mode = extract32(insn, 3, 3);
1257 int reg0 = REG(insn, 0);
1258 return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1259 }
1260
1261 typedef struct {
1262 TCGCond tcond;
1263 bool g1;
1264 bool g2;
1265 TCGv v1;
1266 TCGv v2;
1267 } DisasCompare;
1268
1269 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1270 {
1271 TCGv tmp, tmp2;
1272 TCGCond tcond;
1273 CCOp op = s->cc_op;
1274
1275 /* The CC_OP_CMP form can handle most normal comparisons directly. */
1276 if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1277 c->g1 = c->g2 = 1;
1278 c->v1 = QREG_CC_N;
1279 c->v2 = QREG_CC_V;
1280 switch (cond) {
1281 case 2: /* HI */
1282 case 3: /* LS */
1283 tcond = TCG_COND_LEU;
1284 goto done;
1285 case 4: /* CC */
1286 case 5: /* CS */
1287 tcond = TCG_COND_LTU;
1288 goto done;
1289 case 6: /* NE */
1290 case 7: /* EQ */
1291 tcond = TCG_COND_EQ;
1292 goto done;
1293 case 10: /* PL */
1294 case 11: /* MI */
1295 c->g1 = c->g2 = 0;
1296 c->v2 = tcg_const_i32(0);
1297 c->v1 = tmp = tcg_temp_new();
1298 tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1299 gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1300 /* fallthru */
1301 case 12: /* GE */
1302 case 13: /* LT */
1303 tcond = TCG_COND_LT;
1304 goto done;
1305 case 14: /* GT */
1306 case 15: /* LE */
1307 tcond = TCG_COND_LE;
1308 goto done;
1309 }
1310 }
1311
1312 c->g1 = 1;
1313 c->g2 = 0;
1314 c->v2 = tcg_const_i32(0);
1315
1316 switch (cond) {
1317 case 0: /* T */
1318 case 1: /* F */
1319 c->v1 = c->v2;
1320 tcond = TCG_COND_NEVER;
1321 goto done;
1322 case 14: /* GT (!(Z || (N ^ V))) */
1323 case 15: /* LE (Z || (N ^ V)) */
1324 /*
1325 * Logic operations clear V, which simplifies LE to (Z || N),
1326 * and since Z and N are co-located, this becomes a normal
1327 * comparison vs N.
1328 */
1329 if (op == CC_OP_LOGIC) {
1330 c->v1 = QREG_CC_N;
1331 tcond = TCG_COND_LE;
1332 goto done;
1333 }
1334 break;
1335 case 12: /* GE (!(N ^ V)) */
1336 case 13: /* LT (N ^ V) */
1337 /* Logic operations clear V, which simplifies this to N. */
1338 if (op != CC_OP_LOGIC) {
1339 break;
1340 }
1341 /* fallthru */
1342 case 10: /* PL (!N) */
1343 case 11: /* MI (N) */
1344 /* Several cases represent N normally. */
1345 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1346 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1347 op == CC_OP_LOGIC) {
1348 c->v1 = QREG_CC_N;
1349 tcond = TCG_COND_LT;
1350 goto done;
1351 }
1352 break;
1353 case 6: /* NE (!Z) */
1354 case 7: /* EQ (Z) */
1355 /* Some cases fold Z into N. */
1356 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1357 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1358 op == CC_OP_LOGIC) {
1359 tcond = TCG_COND_EQ;
1360 c->v1 = QREG_CC_N;
1361 goto done;
1362 }
1363 break;
1364 case 4: /* CC (!C) */
1365 case 5: /* CS (C) */
1366 /* Some cases fold C into X. */
1367 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1368 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1369 tcond = TCG_COND_NE;
1370 c->v1 = QREG_CC_X;
1371 goto done;
1372 }
1373 /* fallthru */
1374 case 8: /* VC (!V) */
1375 case 9: /* VS (V) */
1376 /* Logic operations clear V and C. */
1377 if (op == CC_OP_LOGIC) {
1378 tcond = TCG_COND_NEVER;
1379 c->v1 = c->v2;
1380 goto done;
1381 }
1382 break;
1383 }
1384
1385 /* Otherwise, flush flag state to CC_OP_FLAGS. */
1386 gen_flush_flags(s);
1387
1388 switch (cond) {
1389 case 0: /* T */
1390 case 1: /* F */
1391 default:
1392 /* Invalid, or handled above. */
1393 abort();
1394 case 2: /* HI (!C && !Z) -> !(C || Z)*/
1395 case 3: /* LS (C || Z) */
1396 c->v1 = tmp = tcg_temp_new();
1397 c->g1 = 0;
1398 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1399 tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1400 tcond = TCG_COND_NE;
1401 break;
1402 case 4: /* CC (!C) */
1403 case 5: /* CS (C) */
1404 c->v1 = QREG_CC_C;
1405 tcond = TCG_COND_NE;
1406 break;
1407 case 6: /* NE (!Z) */
1408 case 7: /* EQ (Z) */
1409 c->v1 = QREG_CC_Z;
1410 tcond = TCG_COND_EQ;
1411 break;
1412 case 8: /* VC (!V) */
1413 case 9: /* VS (V) */
1414 c->v1 = QREG_CC_V;
1415 tcond = TCG_COND_LT;
1416 break;
1417 case 10: /* PL (!N) */
1418 case 11: /* MI (N) */
1419 c->v1 = QREG_CC_N;
1420 tcond = TCG_COND_LT;
1421 break;
1422 case 12: /* GE (!(N ^ V)) */
1423 case 13: /* LT (N ^ V) */
1424 c->v1 = tmp = tcg_temp_new();
1425 c->g1 = 0;
1426 tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1427 tcond = TCG_COND_LT;
1428 break;
1429 case 14: /* GT (!(Z || (N ^ V))) */
1430 case 15: /* LE (Z || (N ^ V)) */
1431 c->v1 = tmp = tcg_temp_new();
1432 c->g1 = 0;
1433 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1434 tcg_gen_neg_i32(tmp, tmp);
1435 tmp2 = tcg_temp_new();
1436 tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1437 tcg_gen_or_i32(tmp, tmp, tmp2);
1438 tcg_temp_free(tmp2);
1439 tcond = TCG_COND_LT;
1440 break;
1441 }
1442
1443 done:
1444 if ((cond & 1) == 0) {
1445 tcond = tcg_invert_cond(tcond);
1446 }
1447 c->tcond = tcond;
1448 }
1449
1450 static void free_cond(DisasCompare *c)
1451 {
1452 if (!c->g1) {
1453 tcg_temp_free(c->v1);
1454 }
1455 if (!c->g2) {
1456 tcg_temp_free(c->v2);
1457 }
1458 }
1459
1460 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1461 {
1462 DisasCompare c;
1463
1464 gen_cc_cond(&c, s, cond);
1465 update_cc_op(s);
1466 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1467 free_cond(&c);
1468 }
1469
1470 /* Force a TB lookup after an instruction that changes the CPU state. */
1471 static void gen_exit_tb(DisasContext *s)
1472 {
1473 update_cc_op(s);
1474 tcg_gen_movi_i32(QREG_PC, s->pc);
1475 s->base.is_jmp = DISAS_EXIT;
1476 }
1477
1478 #define SRC_EA(env, result, opsize, op_sign, addrp) do { \
1479 result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
1480 op_sign ? EA_LOADS : EA_LOADU, IS_USER(s)); \
1481 if (IS_NULL_QREG(result)) { \
1482 gen_addr_fault(s); \
1483 return; \
1484 } \
1485 } while (0)
1486
1487 #define DEST_EA(env, insn, opsize, val, addrp) do { \
1488 TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, \
1489 EA_STORE, IS_USER(s)); \
1490 if (IS_NULL_QREG(ea_result)) { \
1491 gen_addr_fault(s); \
1492 return; \
1493 } \
1494 } while (0)
1495
1496 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1497 {
1498 #ifndef CONFIG_USER_ONLY
1499 return (s->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)
1500 || (s->base.pc_next & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1501 #else
1502 return true;
1503 #endif
1504 }
1505
1506 /* Generate a jump to an immediate address. */
1507 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1508 {
1509 if (unlikely(s->base.singlestep_enabled)) {
1510 gen_exception(s, dest, EXCP_DEBUG);
1511 } else if (use_goto_tb(s, dest)) {
1512 tcg_gen_goto_tb(n);
1513 tcg_gen_movi_i32(QREG_PC, dest);
1514 tcg_gen_exit_tb(s->base.tb, n);
1515 } else {
1516 gen_jmp_im(s, dest);
1517 tcg_gen_exit_tb(NULL, 0);
1518 }
1519 s->base.is_jmp = DISAS_NORETURN;
1520 }
1521
1522 DISAS_INSN(scc)
1523 {
1524 DisasCompare c;
1525 int cond;
1526 TCGv tmp;
1527
1528 cond = (insn >> 8) & 0xf;
1529 gen_cc_cond(&c, s, cond);
1530
1531 tmp = tcg_temp_new();
1532 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1533 free_cond(&c);
1534
1535 tcg_gen_neg_i32(tmp, tmp);
1536 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1537 tcg_temp_free(tmp);
1538 }
1539
1540 DISAS_INSN(dbcc)
1541 {
1542 TCGLabel *l1;
1543 TCGv reg;
1544 TCGv tmp;
1545 int16_t offset;
1546 uint32_t base;
1547
1548 reg = DREG(insn, 0);
1549 base = s->pc;
1550 offset = (int16_t)read_im16(env, s);
1551 l1 = gen_new_label();
1552 gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1553
1554 tmp = tcg_temp_new();
1555 tcg_gen_ext16s_i32(tmp, reg);
1556 tcg_gen_addi_i32(tmp, tmp, -1);
1557 gen_partset_reg(OS_WORD, reg, tmp);
1558 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1559 gen_jmp_tb(s, 1, base + offset);
1560 gen_set_label(l1);
1561 gen_jmp_tb(s, 0, s->pc);
1562 }
1563
1564 DISAS_INSN(undef_mac)
1565 {
1566 gen_exception(s, s->base.pc_next, EXCP_LINEA);
1567 }
1568
1569 DISAS_INSN(undef_fpu)
1570 {
1571 gen_exception(s, s->base.pc_next, EXCP_LINEF);
1572 }
1573
1574 DISAS_INSN(undef)
1575 {
1576 /*
1577 * ??? This is both instructions that are as yet unimplemented
1578 * for the 680x0 series, as well as those that are implemented
1579 * but actually illegal for CPU32 or pre-68020.
1580 */
1581 qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n",
1582 insn, s->base.pc_next);
1583 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1584 }
1585
1586 DISAS_INSN(mulw)
1587 {
1588 TCGv reg;
1589 TCGv tmp;
1590 TCGv src;
1591 int sign;
1592
1593 sign = (insn & 0x100) != 0;
1594 reg = DREG(insn, 9);
1595 tmp = tcg_temp_new();
1596 if (sign)
1597 tcg_gen_ext16s_i32(tmp, reg);
1598 else
1599 tcg_gen_ext16u_i32(tmp, reg);
1600 SRC_EA(env, src, OS_WORD, sign, NULL);
1601 tcg_gen_mul_i32(tmp, tmp, src);
1602 tcg_gen_mov_i32(reg, tmp);
1603 gen_logic_cc(s, tmp, OS_LONG);
1604 tcg_temp_free(tmp);
1605 }
1606
1607 DISAS_INSN(divw)
1608 {
1609 int sign;
1610 TCGv src;
1611 TCGv destr;
1612
1613 /* divX.w <EA>,Dn 32/16 -> 16r:16q */
1614
1615 sign = (insn & 0x100) != 0;
1616
1617 /* dest.l / src.w */
1618
1619 SRC_EA(env, src, OS_WORD, sign, NULL);
1620 destr = tcg_const_i32(REG(insn, 9));
1621 if (sign) {
1622 gen_helper_divsw(cpu_env, destr, src);
1623 } else {
1624 gen_helper_divuw(cpu_env, destr, src);
1625 }
1626 tcg_temp_free(destr);
1627
1628 set_cc_op(s, CC_OP_FLAGS);
1629 }
1630
1631 DISAS_INSN(divl)
1632 {
1633 TCGv num, reg, den;
1634 int sign;
1635 uint16_t ext;
1636
1637 ext = read_im16(env, s);
1638
1639 sign = (ext & 0x0800) != 0;
1640
1641 if (ext & 0x400) {
1642 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1643 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1644 return;
1645 }
1646
1647 /* divX.l <EA>, Dr:Dq 64/32 -> 32r:32q */
1648
1649 SRC_EA(env, den, OS_LONG, 0, NULL);
1650 num = tcg_const_i32(REG(ext, 12));
1651 reg = tcg_const_i32(REG(ext, 0));
1652 if (sign) {
1653 gen_helper_divsll(cpu_env, num, reg, den);
1654 } else {
1655 gen_helper_divull(cpu_env, num, reg, den);
1656 }
1657 tcg_temp_free(reg);
1658 tcg_temp_free(num);
1659 set_cc_op(s, CC_OP_FLAGS);
1660 return;
1661 }
1662
1663 /* divX.l <EA>, Dq 32/32 -> 32q */
1664 /* divXl.l <EA>, Dr:Dq 32/32 -> 32r:32q */
1665
1666 SRC_EA(env, den, OS_LONG, 0, NULL);
1667 num = tcg_const_i32(REG(ext, 12));
1668 reg = tcg_const_i32(REG(ext, 0));
1669 if (sign) {
1670 gen_helper_divsl(cpu_env, num, reg, den);
1671 } else {
1672 gen_helper_divul(cpu_env, num, reg, den);
1673 }
1674 tcg_temp_free(reg);
1675 tcg_temp_free(num);
1676
1677 set_cc_op(s, CC_OP_FLAGS);
1678 }
1679
1680 static void bcd_add(TCGv dest, TCGv src)
1681 {
1682 TCGv t0, t1;
1683
1684 /*
1685 * dest10 = dest10 + src10 + X
1686 *
1687 * t1 = src
1688 * t2 = t1 + 0x066
1689 * t3 = t2 + dest + X
1690 * t4 = t2 ^ dest
1691 * t5 = t3 ^ t4
1692 * t6 = ~t5 & 0x110
1693 * t7 = (t6 >> 2) | (t6 >> 3)
1694 * return t3 - t7
1695 */
1696
1697 /*
1698 * t1 = (src + 0x066) + dest + X
1699 * = result with some possible exceding 0x6
1700 */
1701
1702 t0 = tcg_const_i32(0x066);
1703 tcg_gen_add_i32(t0, t0, src);
1704
1705 t1 = tcg_temp_new();
1706 tcg_gen_add_i32(t1, t0, dest);
1707 tcg_gen_add_i32(t1, t1, QREG_CC_X);
1708
1709 /* we will remove exceding 0x6 where there is no carry */
1710
1711 /*
1712 * t0 = (src + 0x0066) ^ dest
1713 * = t1 without carries
1714 */
1715
1716 tcg_gen_xor_i32(t0, t0, dest);
1717
1718 /*
1719 * extract the carries
1720 * t0 = t0 ^ t1
1721 * = only the carries
1722 */
1723
1724 tcg_gen_xor_i32(t0, t0, t1);
1725
1726 /*
1727 * generate 0x1 where there is no carry
1728 * and for each 0x10, generate a 0x6
1729 */
1730
1731 tcg_gen_shri_i32(t0, t0, 3);
1732 tcg_gen_not_i32(t0, t0);
1733 tcg_gen_andi_i32(t0, t0, 0x22);
1734 tcg_gen_add_i32(dest, t0, t0);
1735 tcg_gen_add_i32(dest, dest, t0);
1736 tcg_temp_free(t0);
1737
1738 /*
1739 * remove the exceding 0x6
1740 * for digits that have not generated a carry
1741 */
1742
1743 tcg_gen_sub_i32(dest, t1, dest);
1744 tcg_temp_free(t1);
1745 }
1746
1747 static void bcd_sub(TCGv dest, TCGv src)
1748 {
1749 TCGv t0, t1, t2;
1750
1751 /*
1752 * dest10 = dest10 - src10 - X
1753 * = bcd_add(dest + 1 - X, 0x199 - src)
1754 */
1755
1756 /* t0 = 0x066 + (0x199 - src) */
1757
1758 t0 = tcg_temp_new();
1759 tcg_gen_subfi_i32(t0, 0x1ff, src);
1760
1761 /* t1 = t0 + dest + 1 - X*/
1762
1763 t1 = tcg_temp_new();
1764 tcg_gen_add_i32(t1, t0, dest);
1765 tcg_gen_addi_i32(t1, t1, 1);
1766 tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1767
1768 /* t2 = t0 ^ dest */
1769
1770 t2 = tcg_temp_new();
1771 tcg_gen_xor_i32(t2, t0, dest);
1772
1773 /* t0 = t1 ^ t2 */
1774
1775 tcg_gen_xor_i32(t0, t1, t2);
1776
1777 /*
1778 * t2 = ~t0 & 0x110
1779 * t0 = (t2 >> 2) | (t2 >> 3)
1780 *
1781 * to fit on 8bit operands, changed in:
1782 *
1783 * t2 = ~(t0 >> 3) & 0x22
1784 * t0 = t2 + t2
1785 * t0 = t0 + t2
1786 */
1787
1788 tcg_gen_shri_i32(t2, t0, 3);
1789 tcg_gen_not_i32(t2, t2);
1790 tcg_gen_andi_i32(t2, t2, 0x22);
1791 tcg_gen_add_i32(t0, t2, t2);
1792 tcg_gen_add_i32(t0, t0, t2);
1793 tcg_temp_free(t2);
1794
1795 /* return t1 - t0 */
1796
1797 tcg_gen_sub_i32(dest, t1, t0);
1798 tcg_temp_free(t0);
1799 tcg_temp_free(t1);
1800 }
1801
1802 static void bcd_flags(TCGv val)
1803 {
1804 tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1805 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1806
1807 tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1808
1809 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1810 }
1811
1812 DISAS_INSN(abcd_reg)
1813 {
1814 TCGv src;
1815 TCGv dest;
1816
1817 gen_flush_flags(s); /* !Z is sticky */
1818
1819 src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1820 dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1821 bcd_add(dest, src);
1822 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1823
1824 bcd_flags(dest);
1825 }
1826
1827 DISAS_INSN(abcd_mem)
1828 {
1829 TCGv src, dest, addr;
1830
1831 gen_flush_flags(s); /* !Z is sticky */
1832
1833 /* Indirect pre-decrement load (mode 4) */
1834
1835 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1836 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1837 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1838 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1839
1840 bcd_add(dest, src);
1841
1842 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1843 EA_STORE, IS_USER(s));
1844
1845 bcd_flags(dest);
1846 }
1847
1848 DISAS_INSN(sbcd_reg)
1849 {
1850 TCGv src, dest;
1851
1852 gen_flush_flags(s); /* !Z is sticky */
1853
1854 src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1855 dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1856
1857 bcd_sub(dest, src);
1858
1859 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1860
1861 bcd_flags(dest);
1862 }
1863
1864 DISAS_INSN(sbcd_mem)
1865 {
1866 TCGv src, dest, addr;
1867
1868 gen_flush_flags(s); /* !Z is sticky */
1869
1870 /* Indirect pre-decrement load (mode 4) */
1871
1872 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1873 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1874 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1875 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1876
1877 bcd_sub(dest, src);
1878
1879 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1880 EA_STORE, IS_USER(s));
1881
1882 bcd_flags(dest);
1883 }
1884
1885 DISAS_INSN(nbcd)
1886 {
1887 TCGv src, dest;
1888 TCGv addr;
1889
1890 gen_flush_flags(s); /* !Z is sticky */
1891
1892 SRC_EA(env, src, OS_BYTE, 0, &addr);
1893
1894 dest = tcg_const_i32(0);
1895 bcd_sub(dest, src);
1896
1897 DEST_EA(env, insn, OS_BYTE, dest, &addr);
1898
1899 bcd_flags(dest);
1900
1901 tcg_temp_free(dest);
1902 }
1903
1904 DISAS_INSN(addsub)
1905 {
1906 TCGv reg;
1907 TCGv dest;
1908 TCGv src;
1909 TCGv tmp;
1910 TCGv addr;
1911 int add;
1912 int opsize;
1913
1914 add = (insn & 0x4000) != 0;
1915 opsize = insn_opsize(insn);
1916 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1917 dest = tcg_temp_new();
1918 if (insn & 0x100) {
1919 SRC_EA(env, tmp, opsize, 1, &addr);
1920 src = reg;
1921 } else {
1922 tmp = reg;
1923 SRC_EA(env, src, opsize, 1, NULL);
1924 }
1925 if (add) {
1926 tcg_gen_add_i32(dest, tmp, src);
1927 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1928 set_cc_op(s, CC_OP_ADDB + opsize);
1929 } else {
1930 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1931 tcg_gen_sub_i32(dest, tmp, src);
1932 set_cc_op(s, CC_OP_SUBB + opsize);
1933 }
1934 gen_update_cc_add(dest, src, opsize);
1935 if (insn & 0x100) {
1936 DEST_EA(env, insn, opsize, dest, &addr);
1937 } else {
1938 gen_partset_reg(opsize, DREG(insn, 9), dest);
1939 }
1940 tcg_temp_free(dest);
1941 }
1942
1943 /* Reverse the order of the bits in REG. */
1944 DISAS_INSN(bitrev)
1945 {
1946 TCGv reg;
1947 reg = DREG(insn, 0);
1948 gen_helper_bitrev(reg, reg);
1949 }
1950
1951 DISAS_INSN(bitop_reg)
1952 {
1953 int opsize;
1954 int op;
1955 TCGv src1;
1956 TCGv src2;
1957 TCGv tmp;
1958 TCGv addr;
1959 TCGv dest;
1960
1961 if ((insn & 0x38) != 0)
1962 opsize = OS_BYTE;
1963 else
1964 opsize = OS_LONG;
1965 op = (insn >> 6) & 3;
1966 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1967
1968 gen_flush_flags(s);
1969 src2 = tcg_temp_new();
1970 if (opsize == OS_BYTE)
1971 tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1972 else
1973 tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1974
1975 tmp = tcg_const_i32(1);
1976 tcg_gen_shl_i32(tmp, tmp, src2);
1977 tcg_temp_free(src2);
1978
1979 tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1980
1981 dest = tcg_temp_new();
1982 switch (op) {
1983 case 1: /* bchg */
1984 tcg_gen_xor_i32(dest, src1, tmp);
1985 break;
1986 case 2: /* bclr */
1987 tcg_gen_andc_i32(dest, src1, tmp);
1988 break;
1989 case 3: /* bset */
1990 tcg_gen_or_i32(dest, src1, tmp);
1991 break;
1992 default: /* btst */
1993 break;
1994 }
1995 tcg_temp_free(tmp);
1996 if (op) {
1997 DEST_EA(env, insn, opsize, dest, &addr);
1998 }
1999 tcg_temp_free(dest);
2000 }
2001
2002 DISAS_INSN(sats)
2003 {
2004 TCGv reg;
2005 reg = DREG(insn, 0);
2006 gen_flush_flags(s);
2007 gen_helper_sats(reg, reg, QREG_CC_V);
2008 gen_logic_cc(s, reg, OS_LONG);
2009 }
2010
2011 static void gen_push(DisasContext *s, TCGv val)
2012 {
2013 TCGv tmp;
2014
2015 tmp = tcg_temp_new();
2016 tcg_gen_subi_i32(tmp, QREG_SP, 4);
2017 gen_store(s, OS_LONG, tmp, val, IS_USER(s));
2018 tcg_gen_mov_i32(QREG_SP, tmp);
2019 tcg_temp_free(tmp);
2020 }
2021
2022 static TCGv mreg(int reg)
2023 {
2024 if (reg < 8) {
2025 /* Dx */
2026 return cpu_dregs[reg];
2027 }
2028 /* Ax */
2029 return cpu_aregs[reg & 7];
2030 }
2031
2032 DISAS_INSN(movem)
2033 {
2034 TCGv addr, incr, tmp, r[16];
2035 int is_load = (insn & 0x0400) != 0;
2036 int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
2037 uint16_t mask = read_im16(env, s);
2038 int mode = extract32(insn, 3, 3);
2039 int reg0 = REG(insn, 0);
2040 int i;
2041
2042 tmp = cpu_aregs[reg0];
2043
2044 switch (mode) {
2045 case 0: /* data register direct */
2046 case 1: /* addr register direct */
2047 do_addr_fault:
2048 gen_addr_fault(s);
2049 return;
2050
2051 case 2: /* indirect */
2052 break;
2053
2054 case 3: /* indirect post-increment */
2055 if (!is_load) {
2056 /* post-increment is not allowed */
2057 goto do_addr_fault;
2058 }
2059 break;
2060
2061 case 4: /* indirect pre-decrement */
2062 if (is_load) {
2063 /* pre-decrement is not allowed */
2064 goto do_addr_fault;
2065 }
2066 /*
2067 * We want a bare copy of the address reg, without any pre-decrement
2068 * adjustment, as gen_lea would provide.
2069 */
2070 break;
2071
2072 default:
2073 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
2074 if (IS_NULL_QREG(tmp)) {
2075 goto do_addr_fault;
2076 }
2077 break;
2078 }
2079
2080 addr = tcg_temp_new();
2081 tcg_gen_mov_i32(addr, tmp);
2082 incr = tcg_const_i32(opsize_bytes(opsize));
2083
2084 if (is_load) {
2085 /* memory to register */
2086 for (i = 0; i < 16; i++) {
2087 if (mask & (1 << i)) {
2088 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
2089 tcg_gen_add_i32(addr, addr, incr);
2090 }
2091 }
2092 for (i = 0; i < 16; i++) {
2093 if (mask & (1 << i)) {
2094 tcg_gen_mov_i32(mreg(i), r[i]);
2095 tcg_temp_free(r[i]);
2096 }
2097 }
2098 if (mode == 3) {
2099 /* post-increment: movem (An)+,X */
2100 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2101 }
2102 } else {
2103 /* register to memory */
2104 if (mode == 4) {
2105 /* pre-decrement: movem X,-(An) */
2106 for (i = 15; i >= 0; i--) {
2107 if ((mask << i) & 0x8000) {
2108 tcg_gen_sub_i32(addr, addr, incr);
2109 if (reg0 + 8 == i &&
2110 m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2111 /*
2112 * M68020+: if the addressing register is the
2113 * register moved to memory, the value written
2114 * is the initial value decremented by the size of
2115 * the operation, regardless of how many actual
2116 * stores have been performed until this point.
2117 * M68000/M68010: the value is the initial value.
2118 */
2119 tmp = tcg_temp_new();
2120 tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2121 gen_store(s, opsize, addr, tmp, IS_USER(s));
2122 tcg_temp_free(tmp);
2123 } else {
2124 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2125 }
2126 }
2127 }
2128 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2129 } else {
2130 for (i = 0; i < 16; i++) {
2131 if (mask & (1 << i)) {
2132 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2133 tcg_gen_add_i32(addr, addr, incr);
2134 }
2135 }
2136 }
2137 }
2138
2139 tcg_temp_free(incr);
2140 tcg_temp_free(addr);
2141 }
2142
2143 DISAS_INSN(movep)
2144 {
2145 uint8_t i;
2146 int16_t displ;
2147 TCGv reg;
2148 TCGv addr;
2149 TCGv abuf;
2150 TCGv dbuf;
2151
2152 displ = read_im16(env, s);
2153
2154 addr = AREG(insn, 0);
2155 reg = DREG(insn, 9);
2156
2157 abuf = tcg_temp_new();
2158 tcg_gen_addi_i32(abuf, addr, displ);
2159 dbuf = tcg_temp_new();
2160
2161 if (insn & 0x40) {
2162 i = 4;
2163 } else {
2164 i = 2;
2165 }
2166
2167 if (insn & 0x80) {
2168 for ( ; i > 0 ; i--) {
2169 tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2170 tcg_gen_qemu_st8(dbuf, abuf, IS_USER(s));
2171 if (i > 1) {
2172 tcg_gen_addi_i32(abuf, abuf, 2);
2173 }
2174 }
2175 } else {
2176 for ( ; i > 0 ; i--) {
2177 tcg_gen_qemu_ld8u(dbuf, abuf, IS_USER(s));
2178 tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2179 if (i > 1) {
2180 tcg_gen_addi_i32(abuf, abuf, 2);
2181 }
2182 }
2183 }
2184 tcg_temp_free(abuf);
2185 tcg_temp_free(dbuf);
2186 }
2187
2188 DISAS_INSN(bitop_im)
2189 {
2190 int opsize;
2191 int op;
2192 TCGv src1;
2193 uint32_t mask;
2194 int bitnum;
2195 TCGv tmp;
2196 TCGv addr;
2197
2198 if ((insn & 0x38) != 0)
2199 opsize = OS_BYTE;
2200 else
2201 opsize = OS_LONG;
2202 op = (insn >> 6) & 3;
2203
2204 bitnum = read_im16(env, s);
2205 if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2206 if (bitnum & 0xfe00) {
2207 disas_undef(env, s, insn);
2208 return;
2209 }
2210 } else {
2211 if (bitnum & 0xff00) {
2212 disas_undef(env, s, insn);
2213 return;
2214 }
2215 }
2216
2217 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2218
2219 gen_flush_flags(s);
2220 if (opsize == OS_BYTE)
2221 bitnum &= 7;
2222 else
2223 bitnum &= 31;
2224 mask = 1 << bitnum;
2225
2226 tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2227
2228 if (op) {
2229 tmp = tcg_temp_new();
2230 switch (op) {
2231 case 1: /* bchg */
2232 tcg_gen_xori_i32(tmp, src1, mask);
2233 break;
2234 case 2: /* bclr */
2235 tcg_gen_andi_i32(tmp, src1, ~mask);
2236 break;
2237 case 3: /* bset */
2238 tcg_gen_ori_i32(tmp, src1, mask);
2239 break;
2240 default: /* btst */
2241 break;
2242 }
2243 DEST_EA(env, insn, opsize, tmp, &addr);
2244 tcg_temp_free(tmp);
2245 }
2246 }
2247
2248 static TCGv gen_get_ccr(DisasContext *s)
2249 {
2250 TCGv dest;
2251
2252 update_cc_op(s);
2253 dest = tcg_temp_new();
2254 gen_helper_get_ccr(dest, cpu_env);
2255 return dest;
2256 }
2257
2258 static TCGv gen_get_sr(DisasContext *s)
2259 {
2260 TCGv ccr;
2261 TCGv sr;
2262
2263 ccr = gen_get_ccr(s);
2264 sr = tcg_temp_new();
2265 tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2266 tcg_gen_or_i32(sr, sr, ccr);
2267 tcg_temp_free(ccr);
2268 return sr;
2269 }
2270
2271 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2272 {
2273 if (ccr_only) {
2274 tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2275 tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2276 tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2277 tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2278 tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2279 } else {
2280 TCGv sr = tcg_const_i32(val);
2281 gen_helper_set_sr(cpu_env, sr);
2282 tcg_temp_free(sr);
2283 }
2284 set_cc_op(s, CC_OP_FLAGS);
2285 }
2286
2287 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2288 {
2289 if (ccr_only) {
2290 gen_helper_set_ccr(cpu_env, val);
2291 } else {
2292 gen_helper_set_sr(cpu_env, val);
2293 }
2294 set_cc_op(s, CC_OP_FLAGS);
2295 }
2296
2297 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2298 bool ccr_only)
2299 {
2300 if ((insn & 0x3f) == 0x3c) {
2301 uint16_t val;
2302 val = read_im16(env, s);
2303 gen_set_sr_im(s, val, ccr_only);
2304 } else {
2305 TCGv src;
2306 SRC_EA(env, src, OS_WORD, 0, NULL);
2307 gen_set_sr(s, src, ccr_only);
2308 }
2309 }
2310
2311 DISAS_INSN(arith_im)
2312 {
2313 int op;
2314 TCGv im;
2315 TCGv src1;
2316 TCGv dest;
2317 TCGv addr;
2318 int opsize;
2319 bool with_SR = ((insn & 0x3f) == 0x3c);
2320
2321 op = (insn >> 9) & 7;
2322 opsize = insn_opsize(insn);
2323 switch (opsize) {
2324 case OS_BYTE:
2325 im = tcg_const_i32((int8_t)read_im8(env, s));
2326 break;
2327 case OS_WORD:
2328 im = tcg_const_i32((int16_t)read_im16(env, s));
2329 break;
2330 case OS_LONG:
2331 im = tcg_const_i32(read_im32(env, s));
2332 break;
2333 default:
2334 g_assert_not_reached();
2335 }
2336
2337 if (with_SR) {
2338 /* SR/CCR can only be used with andi/eori/ori */
2339 if (op == 2 || op == 3 || op == 6) {
2340 disas_undef(env, s, insn);
2341 return;
2342 }
2343 switch (opsize) {
2344 case OS_BYTE:
2345 src1 = gen_get_ccr(s);
2346 break;
2347 case OS_WORD:
2348 if (IS_USER(s)) {
2349 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2350 return;
2351 }
2352 src1 = gen_get_sr(s);
2353 break;
2354 default:
2355 /* OS_LONG; others already g_assert_not_reached. */
2356 disas_undef(env, s, insn);
2357 return;
2358 }
2359 } else {
2360 SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2361 }
2362 dest = tcg_temp_new();
2363 switch (op) {
2364 case 0: /* ori */
2365 tcg_gen_or_i32(dest, src1, im);
2366 if (with_SR) {
2367 gen_set_sr(s, dest, opsize == OS_BYTE);
2368 } else {
2369 DEST_EA(env, insn, opsize, dest, &addr);
2370 gen_logic_cc(s, dest, opsize);
2371 }
2372 break;
2373 case 1: /* andi */
2374 tcg_gen_and_i32(dest, src1, im);
2375 if (with_SR) {
2376 gen_set_sr(s, dest, opsize == OS_BYTE);
2377 } else {
2378 DEST_EA(env, insn, opsize, dest, &addr);
2379 gen_logic_cc(s, dest, opsize);
2380 }
2381 break;
2382 case 2: /* subi */
2383 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2384 tcg_gen_sub_i32(dest, src1, im);
2385 gen_update_cc_add(dest, im, opsize);
2386 set_cc_op(s, CC_OP_SUBB + opsize);
2387 DEST_EA(env, insn, opsize, dest, &addr);
2388 break;
2389 case 3: /* addi */
2390 tcg_gen_add_i32(dest, src1, im);
2391 gen_update_cc_add(dest, im, opsize);
2392 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2393 set_cc_op(s, CC_OP_ADDB + opsize);
2394 DEST_EA(env, insn, opsize, dest, &addr);
2395 break;
2396 case 5: /* eori */
2397 tcg_gen_xor_i32(dest, src1, im);
2398 if (with_SR) {
2399 gen_set_sr(s, dest, opsize == OS_BYTE);
2400 } else {
2401 DEST_EA(env, insn, opsize, dest, &addr);
2402 gen_logic_cc(s, dest, opsize);
2403 }
2404 break;
2405 case 6: /* cmpi */
2406 gen_update_cc_cmp(s, src1, im, opsize);
2407 break;
2408 default:
2409 abort();
2410 }
2411 tcg_temp_free(im);
2412 tcg_temp_free(dest);
2413 }
2414
2415 DISAS_INSN(cas)
2416 {
2417 int opsize;
2418 TCGv addr;
2419 uint16_t ext;
2420 TCGv load;
2421 TCGv cmp;
2422 MemOp opc;
2423
2424 switch ((insn >> 9) & 3) {
2425 case 1:
2426 opsize = OS_BYTE;
2427 opc = MO_SB;
2428 break;
2429 case 2:
2430 opsize = OS_WORD;
2431 opc = MO_TESW;
2432 break;
2433 case 3:
2434 opsize = OS_LONG;
2435 opc = MO_TESL;
2436 break;
2437 default:
2438 g_assert_not_reached();
2439 }
2440
2441 ext = read_im16(env, s);
2442
2443 /* cas Dc,Du,<EA> */
2444
2445 addr = gen_lea(env, s, insn, opsize);
2446 if (IS_NULL_QREG(addr)) {
2447 gen_addr_fault(s);
2448 return;
2449 }
2450
2451 cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2452
2453 /*
2454 * if <EA> == Dc then
2455 * <EA> = Du
2456 * Dc = <EA> (because <EA> == Dc)
2457 * else
2458 * Dc = <EA>
2459 */
2460
2461 load = tcg_temp_new();
2462 tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2463 IS_USER(s), opc);
2464 /* update flags before setting cmp to load */
2465 gen_update_cc_cmp(s, load, cmp, opsize);
2466 gen_partset_reg(opsize, DREG(ext, 0), load);
2467
2468 tcg_temp_free(load);
2469
2470 switch (extract32(insn, 3, 3)) {
2471 case 3: /* Indirect postincrement. */
2472 tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2473 break;
2474 case 4: /* Indirect predecrememnt. */
2475 tcg_gen_mov_i32(AREG(insn, 0), addr);
2476 break;
2477 }
2478 }
2479
2480 DISAS_INSN(cas2w)
2481 {
2482 uint16_t ext1, ext2;
2483 TCGv addr1, addr2;
2484 TCGv regs;
2485
2486 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2487
2488 ext1 = read_im16(env, s);
2489
2490 if (ext1 & 0x8000) {
2491 /* Address Register */
2492 addr1 = AREG(ext1, 12);
2493 } else {
2494 /* Data Register */
2495 addr1 = DREG(ext1, 12);
2496 }
2497
2498 ext2 = read_im16(env, s);
2499 if (ext2 & 0x8000) {
2500 /* Address Register */
2501 addr2 = AREG(ext2, 12);
2502 } else {
2503 /* Data Register */
2504 addr2 = DREG(ext2, 12);
2505 }
2506
2507 /*
2508 * if (R1) == Dc1 && (R2) == Dc2 then
2509 * (R1) = Du1
2510 * (R2) = Du2
2511 * else
2512 * Dc1 = (R1)
2513 * Dc2 = (R2)
2514 */
2515
2516 regs = tcg_const_i32(REG(ext2, 6) |
2517 (REG(ext1, 6) << 3) |
2518 (REG(ext2, 0) << 6) |
2519 (REG(ext1, 0) << 9));
2520 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2521 gen_helper_exit_atomic(cpu_env);
2522 } else {
2523 gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2524 }
2525 tcg_temp_free(regs);
2526
2527 /* Note that cas2w also assigned to env->cc_op. */
2528 s->cc_op = CC_OP_CMPW;
2529 s->cc_op_synced = 1;
2530 }
2531
2532 DISAS_INSN(cas2l)
2533 {
2534 uint16_t ext1, ext2;
2535 TCGv addr1, addr2, regs;
2536
2537 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2538
2539 ext1 = read_im16(env, s);
2540
2541 if (ext1 & 0x8000) {
2542 /* Address Register */
2543 addr1 = AREG(ext1, 12);
2544 } else {
2545 /* Data Register */
2546 addr1 = DREG(ext1, 12);
2547 }
2548
2549 ext2 = read_im16(env, s);
2550 if (ext2 & 0x8000) {
2551 /* Address Register */
2552 addr2 = AREG(ext2, 12);
2553 } else {
2554 /* Data Register */
2555 addr2 = DREG(ext2, 12);
2556 }
2557
2558 /*
2559 * if (R1) == Dc1 && (R2) == Dc2 then
2560 * (R1) = Du1
2561 * (R2) = Du2
2562 * else
2563 * Dc1 = (R1)
2564 * Dc2 = (R2)
2565 */
2566
2567 regs = tcg_const_i32(REG(ext2, 6) |
2568 (REG(ext1, 6) << 3) |
2569 (REG(ext2, 0) << 6) |
2570 (REG(ext1, 0) << 9));
2571 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2572 gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2573 } else {
2574 gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2575 }
2576 tcg_temp_free(regs);
2577
2578 /* Note that cas2l also assigned to env->cc_op. */
2579 s->cc_op = CC_OP_CMPL;
2580 s->cc_op_synced = 1;
2581 }
2582
2583 DISAS_INSN(byterev)
2584 {
2585 TCGv reg;
2586
2587 reg = DREG(insn, 0);
2588 tcg_gen_bswap32_i32(reg, reg);
2589 }
2590
2591 DISAS_INSN(move)
2592 {
2593 TCGv src;
2594 TCGv dest;
2595 int op;
2596 int opsize;
2597
2598 switch (insn >> 12) {
2599 case 1: /* move.b */
2600 opsize = OS_BYTE;
2601 break;
2602 case 2: /* move.l */
2603 opsize = OS_LONG;
2604 break;
2605 case 3: /* move.w */
2606 opsize = OS_WORD;
2607 break;
2608 default:
2609 abort();
2610 }
2611 SRC_EA(env, src, opsize, 1, NULL);
2612 op = (insn >> 6) & 7;
2613 if (op == 1) {
2614 /* movea */
2615 /* The value will already have been sign extended. */
2616 dest = AREG(insn, 9);
2617 tcg_gen_mov_i32(dest, src);
2618 } else {
2619 /* normal move */
2620 uint16_t dest_ea;
2621 dest_ea = ((insn >> 9) & 7) | (op << 3);
2622 DEST_EA(env, dest_ea, opsize, src, NULL);
2623 /* This will be correct because loads sign extend. */
2624 gen_logic_cc(s, src, opsize);
2625 }
2626 }
2627
2628 DISAS_INSN(negx)
2629 {
2630 TCGv z;
2631 TCGv src;
2632 TCGv addr;
2633 int opsize;
2634
2635 opsize = insn_opsize(insn);
2636 SRC_EA(env, src, opsize, 1, &addr);
2637
2638 gen_flush_flags(s); /* compute old Z */
2639
2640 /*
2641 * Perform substract with borrow.
2642 * (X, N) = -(src + X);
2643 */
2644
2645 z = tcg_const_i32(0);
2646 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2647 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2648 tcg_temp_free(z);
2649 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2650
2651 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2652
2653 /*
2654 * Compute signed-overflow for negation. The normal formula for
2655 * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2656 * this simplies to res & src.
2657 */
2658
2659 tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2660
2661 /* Copy the rest of the results into place. */
2662 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2663 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2664
2665 set_cc_op(s, CC_OP_FLAGS);
2666
2667 /* result is in QREG_CC_N */
2668
2669 DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2670 }
2671
2672 DISAS_INSN(lea)
2673 {
2674 TCGv reg;
2675 TCGv tmp;
2676
2677 reg = AREG(insn, 9);
2678 tmp = gen_lea(env, s, insn, OS_LONG);
2679 if (IS_NULL_QREG(tmp)) {
2680 gen_addr_fault(s);
2681 return;
2682 }
2683 tcg_gen_mov_i32(reg, tmp);
2684 }
2685
2686 DISAS_INSN(clr)
2687 {
2688 int opsize;
2689 TCGv zero;
2690
2691 zero = tcg_const_i32(0);
2692
2693 opsize = insn_opsize(insn);
2694 DEST_EA(env, insn, opsize, zero, NULL);
2695 gen_logic_cc(s, zero, opsize);
2696 tcg_temp_free(zero);
2697 }
2698
2699 DISAS_INSN(move_from_ccr)
2700 {
2701 TCGv ccr;
2702
2703 ccr = gen_get_ccr(s);
2704 DEST_EA(env, insn, OS_WORD, ccr, NULL);
2705 }
2706
2707 DISAS_INSN(neg)
2708 {
2709 TCGv src1;
2710 TCGv dest;
2711 TCGv addr;
2712 int opsize;
2713
2714 opsize = insn_opsize(insn);
2715 SRC_EA(env, src1, opsize, 1, &addr);
2716 dest = tcg_temp_new();
2717 tcg_gen_neg_i32(dest, src1);
2718 set_cc_op(s, CC_OP_SUBB + opsize);
2719 gen_update_cc_add(dest, src1, opsize);
2720 tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2721 DEST_EA(env, insn, opsize, dest, &addr);
2722 tcg_temp_free(dest);
2723 }
2724
2725 DISAS_INSN(move_to_ccr)
2726 {
2727 gen_move_to_sr(env, s, insn, true);
2728 }
2729
2730 DISAS_INSN(not)
2731 {
2732 TCGv src1;
2733 TCGv dest;
2734 TCGv addr;
2735 int opsize;
2736
2737 opsize = insn_opsize(insn);
2738 SRC_EA(env, src1, opsize, 1, &addr);
2739 dest = tcg_temp_new();
2740 tcg_gen_not_i32(dest, src1);
2741 DEST_EA(env, insn, opsize, dest, &addr);
2742 gen_logic_cc(s, dest, opsize);
2743 }
2744
2745 DISAS_INSN(swap)
2746 {
2747 TCGv src1;
2748 TCGv src2;
2749 TCGv reg;
2750
2751 src1 = tcg_temp_new();
2752 src2 = tcg_temp_new();
2753 reg = DREG(insn, 0);
2754 tcg_gen_shli_i32(src1, reg, 16);
2755 tcg_gen_shri_i32(src2, reg, 16);
2756 tcg_gen_or_i32(reg, src1, src2);
2757 tcg_temp_free(src2);
2758 tcg_temp_free(src1);
2759 gen_logic_cc(s, reg, OS_LONG);
2760 }
2761
2762 DISAS_INSN(bkpt)
2763 {
2764 gen_exception(s, s->base.pc_next, EXCP_DEBUG);
2765 }
2766
2767 DISAS_INSN(pea)
2768 {
2769 TCGv tmp;
2770
2771 tmp = gen_lea(env, s, insn, OS_LONG);
2772 if (IS_NULL_QREG(tmp)) {
2773 gen_addr_fault(s);
2774 return;
2775 }
2776 gen_push(s, tmp);
2777 }
2778
2779 DISAS_INSN(ext)
2780 {
2781 int op;
2782 TCGv reg;
2783 TCGv tmp;
2784
2785 reg = DREG(insn, 0);
2786 op = (insn >> 6) & 7;
2787 tmp = tcg_temp_new();
2788 if (op == 3)
2789 tcg_gen_ext16s_i32(tmp, reg);
2790 else
2791 tcg_gen_ext8s_i32(tmp, reg);
2792 if (op == 2)
2793 gen_partset_reg(OS_WORD, reg, tmp);
2794 else
2795 tcg_gen_mov_i32(reg, tmp);
2796 gen_logic_cc(s, tmp, OS_LONG);
2797 tcg_temp_free(tmp);
2798 }
2799
2800 DISAS_INSN(tst)
2801 {
2802 int opsize;
2803 TCGv tmp;
2804
2805 opsize = insn_opsize(insn);
2806 SRC_EA(env, tmp, opsize, 1, NULL);
2807 gen_logic_cc(s, tmp, opsize);
2808 }
2809
2810 DISAS_INSN(pulse)
2811 {
2812 /* Implemented as a NOP. */
2813 }
2814
2815 DISAS_INSN(illegal)
2816 {
2817 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2818 }
2819
2820 /* ??? This should be atomic. */
2821 DISAS_INSN(tas)
2822 {
2823 TCGv dest;
2824 TCGv src1;
2825 TCGv addr;
2826
2827 dest = tcg_temp_new();
2828 SRC_EA(env, src1, OS_BYTE, 1, &addr);
2829 gen_logic_cc(s, src1, OS_BYTE);
2830 tcg_gen_ori_i32(dest, src1, 0x80);
2831 DEST_EA(env, insn, OS_BYTE, dest, &addr);
2832 tcg_temp_free(dest);
2833 }
2834
2835 DISAS_INSN(mull)
2836 {
2837 uint16_t ext;
2838 TCGv src1;
2839 int sign;
2840
2841 ext = read_im16(env, s);
2842
2843 sign = ext & 0x800;
2844
2845 if (ext & 0x400) {
2846 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2847 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2848 return;
2849 }
2850
2851 SRC_EA(env, src1, OS_LONG, 0, NULL);
2852
2853 if (sign) {
2854 tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2855 } else {
2856 tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2857 }
2858 /* if Dl == Dh, 68040 returns low word */
2859 tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2860 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2861 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2862
2863 tcg_gen_movi_i32(QREG_CC_V, 0);
2864 tcg_gen_movi_i32(QREG_CC_C, 0);
2865
2866 set_cc_op(s, CC_OP_FLAGS);
2867 return;
2868 }
2869 SRC_EA(env, src1, OS_LONG, 0, NULL);
2870 if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2871 tcg_gen_movi_i32(QREG_CC_C, 0);
2872 if (sign) {
2873 tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2874 /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2875 tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2876 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2877 } else {
2878 tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2879 /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2880 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2881 }
2882 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2883 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2884
2885 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2886
2887 set_cc_op(s, CC_OP_FLAGS);
2888 } else {
2889 /*
2890 * The upper 32 bits of the product are discarded, so
2891 * muls.l and mulu.l are functionally equivalent.
2892 */
2893 tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2894 gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2895 }
2896 }
2897
2898 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2899 {
2900 TCGv reg;
2901 TCGv tmp;
2902
2903 reg = AREG(insn, 0);
2904 tmp = tcg_temp_new();
2905 tcg_gen_subi_i32(tmp, QREG_SP, 4);
2906 gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2907 if ((insn & 7) != 7) {
2908 tcg_gen_mov_i32(reg, tmp);
2909 }
2910 tcg_gen_addi_i32(QREG_SP, tmp, offset);
2911 tcg_temp_free(tmp);
2912 }
2913
2914 DISAS_INSN(link)
2915 {
2916 int16_t offset;
2917
2918 offset = read_im16(env, s);
2919 gen_link(s, insn, offset);
2920 }
2921
2922 DISAS_INSN(linkl)
2923 {
2924 int32_t offset;
2925
2926 offset = read_im32(env, s);
2927 gen_link(s, insn, offset);
2928 }
2929
2930 DISAS_INSN(unlk)
2931 {
2932 TCGv src;
2933 TCGv reg;
2934 TCGv tmp;
2935
2936 src = tcg_temp_new();
2937 reg = AREG(insn, 0);
2938 tcg_gen_mov_i32(src, reg);
2939 tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2940 tcg_gen_mov_i32(reg, tmp);
2941 tcg_gen_addi_i32(QREG_SP, src, 4);
2942 tcg_temp_free(src);
2943 tcg_temp_free(tmp);
2944 }
2945
2946 #if defined(CONFIG_SOFTMMU)
2947 DISAS_INSN(reset)
2948 {
2949 if (IS_USER(s)) {
2950 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2951 return;
2952 }
2953
2954 gen_helper_reset(cpu_env);
2955 }
2956 #endif
2957
2958 DISAS_INSN(nop)
2959 {
2960 }
2961
2962 DISAS_INSN(rtd)
2963 {
2964 TCGv tmp;
2965 int16_t offset = read_im16(env, s);
2966
2967 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2968 tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2969 gen_jmp(s, tmp);
2970 }
2971
2972 DISAS_INSN(rts)
2973 {
2974 TCGv tmp;
2975
2976 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2977 tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2978 gen_jmp(s, tmp);
2979 }
2980
2981 DISAS_INSN(jump)
2982 {
2983 TCGv tmp;
2984
2985 /*
2986 * Load the target address first to ensure correct exception
2987 * behavior.
2988 */
2989 tmp = gen_lea(env, s, insn, OS_LONG);
2990 if (IS_NULL_QREG(tmp)) {
2991 gen_addr_fault(s);
2992 return;
2993 }
2994 if ((insn & 0x40) == 0) {
2995 /* jsr */
2996 gen_push(s, tcg_const_i32(s->pc));
2997 }
2998 gen_jmp(s, tmp);
2999 }
3000
3001 DISAS_INSN(addsubq)
3002 {
3003 TCGv src;
3004 TCGv dest;
3005 TCGv val;
3006 int imm;
3007 TCGv addr;
3008 int opsize;
3009
3010 if ((insn & 070) == 010) {
3011 /* Operation on address register is always long. */
3012 opsize = OS_LONG;
3013 } else {
3014 opsize = insn_opsize(insn);
3015 }
3016 SRC_EA(env, src, opsize, 1, &addr);
3017 imm = (insn >> 9) & 7;
3018 if (imm == 0) {
3019 imm = 8;
3020 }
3021 val = tcg_const_i32(imm);
3022 dest = tcg_temp_new();
3023 tcg_gen_mov_i32(dest, src);
3024 if ((insn & 0x38) == 0x08) {
3025 /*
3026 * Don't update condition codes if the destination is an
3027 * address register.
3028 */
3029 if (insn & 0x0100) {
3030 tcg_gen_sub_i32(dest, dest, val);
3031 } else {
3032 tcg_gen_add_i32(dest, dest, val);
3033 }
3034 } else {
3035 if (insn & 0x0100) {
3036 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
3037 tcg_gen_sub_i32(dest, dest, val);
3038 set_cc_op(s, CC_OP_SUBB + opsize);
3039 } else {
3040 tcg_gen_add_i32(dest, dest, val);
3041 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
3042 set_cc_op(s, CC_OP_ADDB + opsize);
3043 }
3044 gen_update_cc_add(dest, val, opsize);
3045 }
3046 tcg_temp_free(val);
3047 DEST_EA(env, insn, opsize, dest, &addr);
3048 tcg_temp_free(dest);
3049 }
3050
3051 DISAS_INSN(tpf)
3052 {
3053 switch (insn & 7) {
3054 case 2: /* One extension word. */
3055 s->pc += 2;
3056 break;
3057 case 3: /* Two extension words. */
3058 s->pc += 4;
3059 break;
3060 case 4: /* No extension words. */
3061 break;
3062 default:
3063 disas_undef(env, s, insn);
3064 }
3065 }
3066
3067 DISAS_INSN(branch)
3068 {
3069 int32_t offset;
3070 uint32_t base;
3071 int op;
3072
3073 base = s->pc;
3074 op = (insn >> 8) & 0xf;
3075 offset = (int8_t)insn;
3076 if (offset == 0) {
3077 offset = (int16_t)read_im16(env, s);
3078 } else if (offset == -1) {
3079 offset = read_im32(env, s);
3080 }
3081 if (op == 1) {
3082 /* bsr */
3083 gen_push(s, tcg_const_i32(s->pc));
3084 }
3085 if (op > 1) {
3086 /* Bcc */
3087 TCGLabel *l1 = gen_new_label();
3088 gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
3089 gen_jmp_tb(s, 1, base + offset);
3090 gen_set_label(l1);
3091 gen_jmp_tb(s, 0, s->pc);
3092 } else {
3093 /* Unconditional branch. */
3094 update_cc_op(s);
3095 gen_jmp_tb(s, 0, base + offset);
3096 }
3097 }
3098
3099 DISAS_INSN(moveq)
3100 {
3101 tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
3102 gen_logic_cc(s, DREG(insn, 9), OS_LONG);
3103 }
3104
3105 DISAS_INSN(mvzs)
3106 {
3107 int opsize;
3108 TCGv src;
3109 TCGv reg;
3110
3111 if (insn & 0x40)
3112 opsize = OS_WORD;
3113 else
3114 opsize = OS_BYTE;
3115 SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3116 reg = DREG(insn, 9);
3117 tcg_gen_mov_i32(reg, src);
3118 gen_logic_cc(s, src, opsize);
3119 }
3120
3121 DISAS_INSN(or)
3122 {
3123 TCGv reg;
3124 TCGv dest;
3125 TCGv src;
3126 TCGv addr;
3127 int opsize;
3128
3129 opsize = insn_opsize(insn);
3130 reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3131 dest = tcg_temp_new();
3132 if (insn & 0x100) {
3133 SRC_EA(env, src, opsize, 0, &addr);
3134 tcg_gen_or_i32(dest, src, reg);
3135 DEST_EA(env, insn, opsize, dest, &addr);
3136 } else {
3137 SRC_EA(env, src, opsize, 0, NULL);
3138 tcg_gen_or_i32(dest, src, reg);
3139 gen_partset_reg(opsize, DREG(insn, 9), dest);
3140 }
3141 gen_logic_cc(s, dest, opsize);
3142 tcg_temp_free(dest);
3143 }
3144
3145 DISAS_INSN(suba)
3146 {
3147 TCGv src;
3148 TCGv reg;
3149
3150 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3151 reg = AREG(insn, 9);
3152 tcg_gen_sub_i32(reg, reg, src);
3153 }
3154
3155 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3156 {
3157 TCGv tmp;
3158
3159 gen_flush_flags(s); /* compute old Z */
3160
3161 /*
3162 * Perform substract with borrow.
3163 * (X, N) = dest - (src + X);
3164 */
3165
3166 tmp = tcg_const_i32(0);
3167 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
3168 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
3169 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3170 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3171
3172 /* Compute signed-overflow for substract. */
3173
3174 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3175 tcg_gen_xor_i32(tmp, dest, src);
3176 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3177 tcg_temp_free(tmp);
3178
3179 /* Copy the rest of the results into place. */
3180 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3181 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3182
3183 set_cc_op(s, CC_OP_FLAGS);
3184
3185 /* result is in QREG_CC_N */
3186 }
3187
3188 DISAS_INSN(subx_reg)
3189 {
3190 TCGv dest;
3191 TCGv src;
3192 int opsize;
3193
3194 opsize = insn_opsize(insn);
3195
3196 src = gen_extend(s, DREG(insn, 0), opsize, 1);
3197 dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3198
3199 gen_subx(s, src, dest, opsize);
3200
3201 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3202 }
3203
3204 DISAS_INSN(subx_mem)
3205 {
3206 TCGv src;
3207 TCGv addr_src;
3208 TCGv dest;
3209 TCGv addr_dest;
3210 int opsize;
3211
3212 opsize = insn_opsize(insn);
3213
3214 addr_src = AREG(insn, 0);
3215 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3216 src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3217
3218 addr_dest = AREG(insn, 9);
3219 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3220 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3221
3222 gen_subx(s, src, dest, opsize);
3223
3224 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3225
3226 tcg_temp_free(dest);
3227 tcg_temp_free(src);
3228 }
3229
3230 DISAS_INSN(mov3q)
3231 {
3232 TCGv src;
3233 int val;
3234
3235 val = (insn >> 9) & 7;
3236 if (val == 0)
3237 val = -1;
3238 src = tcg_const_i32(val);
3239 gen_logic_cc(s, src, OS_LONG);
3240 DEST_EA(env, insn, OS_LONG, src, NULL);
3241 tcg_temp_free(src);
3242 }
3243
3244 DISAS_INSN(cmp)
3245 {
3246 TCGv src;
3247 TCGv reg;
3248 int opsize;
3249
3250 opsize = insn_opsize(insn);
3251 SRC_EA(env, src, opsize, 1, NULL);
3252 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3253 gen_update_cc_cmp(s, reg, src, opsize);
3254 }
3255
3256 DISAS_INSN(cmpa)
3257 {
3258 int opsize;
3259 TCGv src;
3260 TCGv reg;
3261
3262 if (insn & 0x100) {
3263 opsize = OS_LONG;
3264 } else {
3265 opsize = OS_WORD;
3266 }
3267 SRC_EA(env, src, opsize, 1, NULL);
3268 reg = AREG(insn, 9);
3269 gen_update_cc_cmp(s, reg, src, OS_LONG);
3270 }
3271
3272 DISAS_INSN(cmpm)
3273 {
3274 int opsize = insn_opsize(insn);
3275 TCGv src, dst;
3276
3277 /* Post-increment load (mode 3) from Ay. */
3278 src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3279 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3280 /* Post-increment load (mode 3) from Ax. */
3281 dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3282 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3283
3284 gen_update_cc_cmp(s, dst, src, opsize);
3285 }
3286
3287 DISAS_INSN(eor)
3288 {
3289 TCGv src;
3290 TCGv dest;
3291 TCGv addr;
3292 int opsize;
3293
3294 opsize = insn_opsize(insn);
3295
3296 SRC_EA(env, src, opsize, 0, &addr);
3297 dest = tcg_temp_new();
3298 tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3299 gen_logic_cc(s, dest, opsize);
3300 DEST_EA(env, insn, opsize, dest, &addr);
3301 tcg_temp_free(dest);
3302 }