2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "tricore-opcodes.h"
42 static TCGv cpu_gpr_a
[16];
43 static TCGv cpu_gpr_d
[16];
45 static TCGv cpu_PSW_C
;
46 static TCGv cpu_PSW_V
;
47 static TCGv cpu_PSW_SV
;
48 static TCGv cpu_PSW_AV
;
49 static TCGv cpu_PSW_SAV
;
51 static TCGv_env cpu_env
;
53 #include "exec/gen-icount.h"
55 static const char *regnames_a
[] = {
56 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
57 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
58 "a12" , "a13" , "a14" , "a15",
61 static const char *regnames_d
[] = {
62 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
63 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
64 "d12" , "d13" , "d14" , "d15",
67 typedef struct DisasContext
{
68 struct TranslationBlock
*tb
;
69 target_ulong pc
, saved_pc
, next_pc
;
71 int singlestep_enabled
;
72 /* Routine used to access memory */
74 uint32_t hflags
, saved_hflags
;
93 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
94 fprintf_function cpu_fprintf
, int flags
)
96 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
97 CPUTriCoreState
*env
= &cpu
->env
;
103 cpu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
104 cpu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
105 cpu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
106 cpu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
107 cpu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
108 cpu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
110 for (i
= 0; i
< 16; ++i
) {
112 cpu_fprintf(f
, "\nGPR A%02d:", i
);
114 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
116 for (i
= 0; i
< 16; ++i
) {
118 cpu_fprintf(f
, "\nGPR D%02d:", i
);
120 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
122 cpu_fprintf(f
, "\n");
126 * Functions to generate micro-ops
129 /* Makros for generating helpers */
131 #define gen_helper_1arg(name, arg) do { \
132 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
133 gen_helper_##name(cpu_env, helper_tmp); \
134 tcg_temp_free_i32(helper_tmp); \
137 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
138 TCGv arg00 = tcg_temp_new(); \
139 TCGv arg01 = tcg_temp_new(); \
140 TCGv arg11 = tcg_temp_new(); \
141 tcg_gen_sari_tl(arg00, arg0, 16); \
142 tcg_gen_ext16s_tl(arg01, arg0); \
143 tcg_gen_ext16s_tl(arg11, arg1); \
144 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
145 tcg_temp_free(arg00); \
146 tcg_temp_free(arg01); \
147 tcg_temp_free(arg11); \
150 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
151 TCGv arg00 = tcg_temp_new(); \
152 TCGv arg01 = tcg_temp_new(); \
153 TCGv arg10 = tcg_temp_new(); \
154 TCGv arg11 = tcg_temp_new(); \
155 tcg_gen_sari_tl(arg00, arg0, 16); \
156 tcg_gen_ext16s_tl(arg01, arg0); \
157 tcg_gen_sari_tl(arg11, arg1, 16); \
158 tcg_gen_ext16s_tl(arg10, arg1); \
159 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
160 tcg_temp_free(arg00); \
161 tcg_temp_free(arg01); \
162 tcg_temp_free(arg10); \
163 tcg_temp_free(arg11); \
166 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
167 TCGv arg00 = tcg_temp_new(); \
168 TCGv arg01 = tcg_temp_new(); \
169 TCGv arg10 = tcg_temp_new(); \
170 TCGv arg11 = tcg_temp_new(); \
171 tcg_gen_sari_tl(arg00, arg0, 16); \
172 tcg_gen_ext16s_tl(arg01, arg0); \
173 tcg_gen_sari_tl(arg10, arg1, 16); \
174 tcg_gen_ext16s_tl(arg11, arg1); \
175 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
176 tcg_temp_free(arg00); \
177 tcg_temp_free(arg01); \
178 tcg_temp_free(arg10); \
179 tcg_temp_free(arg11); \
182 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
183 TCGv arg00 = tcg_temp_new(); \
184 TCGv arg01 = tcg_temp_new(); \
185 TCGv arg11 = tcg_temp_new(); \
186 tcg_gen_sari_tl(arg01, arg0, 16); \
187 tcg_gen_ext16s_tl(arg00, arg0); \
188 tcg_gen_sari_tl(arg11, arg1, 16); \
189 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
190 tcg_temp_free(arg00); \
191 tcg_temp_free(arg01); \
192 tcg_temp_free(arg11); \
195 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
196 TCGv_i64 ret = tcg_temp_new_i64(); \
197 TCGv_i64 arg1 = tcg_temp_new_i64(); \
199 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
200 gen_helper_##name(ret, arg1, arg2); \
201 tcg_gen_extr_i64_i32(rl, rh, ret); \
203 tcg_temp_free_i64(ret); \
204 tcg_temp_free_i64(arg1); \
207 #define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
208 TCGv_i64 ret = tcg_temp_new_i64(); \
210 gen_helper_##name(ret, cpu_env, arg1, arg2); \
211 tcg_gen_extr_i64_i32(rl, rh, ret); \
213 tcg_temp_free_i64(ret); \
216 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
217 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
218 ((offset & 0x0fffff) << 1))
220 /* For two 32-bit registers used a 64-bit register, the first
221 registernumber needs to be even. Otherwise we trap. */
222 static inline void generate_trap(DisasContext
*ctx
, int class, int tin
);
223 #define CHECK_REG_PAIR(reg) do { \
225 generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \
229 /* Functions for load/save to/from memory */
231 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
232 int16_t con
, TCGMemOp mop
)
234 TCGv temp
= tcg_temp_new();
235 tcg_gen_addi_tl(temp
, r2
, con
);
236 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
240 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
241 int16_t con
, TCGMemOp mop
)
243 TCGv temp
= tcg_temp_new();
244 tcg_gen_addi_tl(temp
, r2
, con
);
245 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
249 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
251 TCGv_i64 temp
= tcg_temp_new_i64();
253 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
254 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
256 tcg_temp_free_i64(temp
);
259 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
262 TCGv temp
= tcg_temp_new();
263 tcg_gen_addi_tl(temp
, base
, con
);
264 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
268 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
270 TCGv_i64 temp
= tcg_temp_new_i64();
272 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
273 /* write back to two 32 bit regs */
274 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
276 tcg_temp_free_i64(temp
);
279 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
282 TCGv temp
= tcg_temp_new();
283 tcg_gen_addi_tl(temp
, base
, con
);
284 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
288 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
291 TCGv temp
= tcg_temp_new();
292 tcg_gen_addi_tl(temp
, r2
, off
);
293 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
294 tcg_gen_mov_tl(r2
, temp
);
298 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
301 TCGv temp
= tcg_temp_new();
302 tcg_gen_addi_tl(temp
, r2
, off
);
303 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
304 tcg_gen_mov_tl(r2
, temp
);
308 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
309 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
311 TCGv temp
= tcg_temp_new();
312 TCGv temp2
= tcg_temp_new();
314 CHECK_REG_PAIR(ereg
);
315 /* temp = (M(EA, word) */
316 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
317 /* temp = temp & ~E[a][63:32]) */
318 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
319 /* temp2 = (E[a][31:0] & E[a][63:32]); */
320 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
321 /* temp = temp | temp2; */
322 tcg_gen_or_tl(temp
, temp
, temp2
);
323 /* M(EA, word) = temp; */
324 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
327 tcg_temp_free(temp2
);
330 /* tmp = M(EA, word);
333 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
335 TCGv temp
= tcg_temp_new();
337 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
338 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
339 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
344 static void gen_cmpswap(DisasContext
*ctx
, int reg
, TCGv ea
)
346 TCGv temp
= tcg_temp_new();
347 TCGv temp2
= tcg_temp_new();
348 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
349 tcg_gen_movcond_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[reg
+1], temp
,
350 cpu_gpr_d
[reg
], temp
);
351 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
352 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
355 tcg_temp_free(temp2
);
358 static void gen_swapmsk(DisasContext
*ctx
, int reg
, TCGv ea
)
360 TCGv temp
= tcg_temp_new();
361 TCGv temp2
= tcg_temp_new();
362 TCGv temp3
= tcg_temp_new();
364 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
365 tcg_gen_and_tl(temp2
, cpu_gpr_d
[reg
], cpu_gpr_d
[reg
+1]);
366 tcg_gen_andc_tl(temp3
, temp
, cpu_gpr_d
[reg
+1]);
367 tcg_gen_or_tl(temp2
, temp2
, temp3
);
368 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
369 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
372 tcg_temp_free(temp2
);
373 tcg_temp_free(temp3
);
377 /* We generate loads and store to core special function register (csfr) through
378 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
379 makros R, A and E, which allow read-only, all and endinit protected access.
380 These makros also specify in which ISA version the csfr was introduced. */
381 #define R(ADDRESS, REG, FEATURE) \
383 if (tricore_feature(env, FEATURE)) { \
384 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
387 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
388 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
389 static inline void gen_mfcr(CPUTriCoreState
*env
, TCGv ret
, int32_t offset
)
391 /* since we're caching PSW make this a special case */
392 if (offset
== 0xfe04) {
393 gen_helper_psw_read(ret
, cpu_env
);
404 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
405 since no execption occurs */
406 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
408 if (tricore_feature(env, FEATURE)) { \
409 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
412 /* Endinit protected registers
413 TODO: Since the endinit bit is in a register of a not yet implemented
414 watchdog device, we handle endinit protected registers like
415 all-access registers for now. */
416 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
417 static inline void gen_mtcr(CPUTriCoreState
*env
, DisasContext
*ctx
, TCGv r1
,
420 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
421 /* since we're caching PSW make this a special case */
422 if (offset
== 0xfe04) {
423 gen_helper_psw_write(cpu_env
, r1
);
430 /* generate privilege trap */
434 /* Functions for arithmetic instructions */
436 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
438 TCGv t0
= tcg_temp_new_i32();
439 TCGv result
= tcg_temp_new_i32();
440 /* Addition and set V/SV bits */
441 tcg_gen_add_tl(result
, r1
, r2
);
443 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
444 tcg_gen_xor_tl(t0
, r1
, r2
);
445 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
447 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
448 /* Calc AV/SAV bits */
449 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
450 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
452 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
453 /* write back result */
454 tcg_gen_mov_tl(ret
, result
);
456 tcg_temp_free(result
);
461 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
463 TCGv temp
= tcg_temp_new();
464 TCGv_i64 t0
= tcg_temp_new_i64();
465 TCGv_i64 t1
= tcg_temp_new_i64();
466 TCGv_i64 result
= tcg_temp_new_i64();
468 tcg_gen_add_i64(result
, r1
, r2
);
470 tcg_gen_xor_i64(t1
, result
, r1
);
471 tcg_gen_xor_i64(t0
, r1
, r2
);
472 tcg_gen_andc_i64(t1
, t1
, t0
);
473 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
475 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
476 /* calc AV/SAV bits */
477 tcg_gen_extrh_i64_i32(temp
, result
);
478 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
479 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
481 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
482 /* write back result */
483 tcg_gen_mov_i64(ret
, result
);
486 tcg_temp_free_i64(result
);
487 tcg_temp_free_i64(t0
);
488 tcg_temp_free_i64(t1
);
492 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
493 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
494 void(*op2
)(TCGv
, TCGv
, TCGv
))
496 TCGv temp
= tcg_temp_new();
497 TCGv temp2
= tcg_temp_new();
498 TCGv temp3
= tcg_temp_new();
499 TCGv temp4
= tcg_temp_new();
501 (*op1
)(temp
, r1_low
, r2
);
503 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
504 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
505 if (op1
== tcg_gen_add_tl
) {
506 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
508 tcg_gen_and_tl(temp2
, temp2
, temp3
);
511 (*op2
)(temp3
, r1_high
, r3
);
513 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
514 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
515 if (op2
== tcg_gen_add_tl
) {
516 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
518 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
520 /* combine V0/V1 bits */
521 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
523 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
525 tcg_gen_mov_tl(ret_low
, temp
);
526 tcg_gen_mov_tl(ret_high
, temp3
);
528 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
529 tcg_gen_xor_tl(temp
, temp
, ret_low
);
530 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
531 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
532 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
534 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
537 tcg_temp_free(temp2
);
538 tcg_temp_free(temp3
);
539 tcg_temp_free(temp4
);
542 /* ret = r2 + (r1 * r3); */
543 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
545 TCGv_i64 t1
= tcg_temp_new_i64();
546 TCGv_i64 t2
= tcg_temp_new_i64();
547 TCGv_i64 t3
= tcg_temp_new_i64();
549 tcg_gen_ext_i32_i64(t1
, r1
);
550 tcg_gen_ext_i32_i64(t2
, r2
);
551 tcg_gen_ext_i32_i64(t3
, r3
);
553 tcg_gen_mul_i64(t1
, t1
, t3
);
554 tcg_gen_add_i64(t1
, t2
, t1
);
556 tcg_gen_extrl_i64_i32(ret
, t1
);
559 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
560 /* t1 < -0x80000000 */
561 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
562 tcg_gen_or_i64(t2
, t2
, t3
);
563 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
564 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
566 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
567 /* Calc AV/SAV bits */
568 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
569 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
571 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
573 tcg_temp_free_i64(t1
);
574 tcg_temp_free_i64(t2
);
575 tcg_temp_free_i64(t3
);
578 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
580 TCGv temp
= tcg_const_i32(con
);
581 gen_madd32_d(ret
, r1
, r2
, temp
);
586 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
589 TCGv t1
= tcg_temp_new();
590 TCGv t2
= tcg_temp_new();
591 TCGv t3
= tcg_temp_new();
592 TCGv t4
= tcg_temp_new();
594 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
595 /* only the add can overflow */
596 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
598 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
599 tcg_gen_xor_tl(t1
, r2_high
, t2
);
600 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
602 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
603 /* Calc AV/SAV bits */
604 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
605 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
607 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
608 /* write back the result */
609 tcg_gen_mov_tl(ret_low
, t3
);
610 tcg_gen_mov_tl(ret_high
, t4
);
619 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
622 TCGv_i64 t1
= tcg_temp_new_i64();
623 TCGv_i64 t2
= tcg_temp_new_i64();
624 TCGv_i64 t3
= tcg_temp_new_i64();
626 tcg_gen_extu_i32_i64(t1
, r1
);
627 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
628 tcg_gen_extu_i32_i64(t3
, r3
);
630 tcg_gen_mul_i64(t1
, t1
, t3
);
631 tcg_gen_add_i64(t2
, t2
, t1
);
632 /* write back result */
633 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
634 /* only the add overflows, if t2 < t1
636 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
637 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
638 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
640 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
641 /* Calc AV/SAV bits */
642 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
643 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
645 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
647 tcg_temp_free_i64(t1
);
648 tcg_temp_free_i64(t2
);
649 tcg_temp_free_i64(t3
);
653 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
656 TCGv temp
= tcg_const_i32(con
);
657 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
662 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
665 TCGv temp
= tcg_const_i32(con
);
666 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
671 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
672 TCGv r3
, uint32_t n
, uint32_t mode
)
674 TCGv temp
= tcg_const_i32(n
);
675 TCGv temp2
= tcg_temp_new();
676 TCGv_i64 temp64
= tcg_temp_new_i64();
679 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
682 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
685 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
688 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
691 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
692 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
693 tcg_gen_add_tl
, tcg_gen_add_tl
);
695 tcg_temp_free(temp2
);
696 tcg_temp_free_i64(temp64
);
700 gen_maddsu_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
701 TCGv r3
, uint32_t n
, uint32_t mode
)
703 TCGv temp
= tcg_const_i32(n
);
704 TCGv temp2
= tcg_temp_new();
705 TCGv_i64 temp64
= tcg_temp_new_i64();
708 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
711 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
714 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
717 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
720 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
721 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
722 tcg_gen_sub_tl
, tcg_gen_add_tl
);
724 tcg_temp_free(temp2
);
725 tcg_temp_free_i64(temp64
);
729 gen_maddsum_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
730 TCGv r3
, uint32_t n
, uint32_t mode
)
732 TCGv temp
= tcg_const_i32(n
);
733 TCGv_i64 temp64
= tcg_temp_new_i64();
734 TCGv_i64 temp64_2
= tcg_temp_new_i64();
735 TCGv_i64 temp64_3
= tcg_temp_new_i64();
738 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
741 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
744 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
747 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
750 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
751 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
752 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
753 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
754 tcg_gen_shli_i64(temp64
, temp64
, 16);
756 gen_add64_d(temp64_2
, temp64_3
, temp64
);
757 /* write back result */
758 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
761 tcg_temp_free_i64(temp64
);
762 tcg_temp_free_i64(temp64_2
);
763 tcg_temp_free_i64(temp64_3
);
766 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
769 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
770 TCGv r3
, uint32_t n
, uint32_t mode
)
772 TCGv temp
= tcg_const_i32(n
);
773 TCGv temp2
= tcg_temp_new();
774 TCGv temp3
= tcg_temp_new();
775 TCGv_i64 temp64
= tcg_temp_new_i64();
779 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
782 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
785 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
788 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
791 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
792 gen_adds(ret_low
, r1_low
, temp
);
793 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
794 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
795 gen_adds(ret_high
, r1_high
, temp2
);
797 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
798 /* combine av bits */
799 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
802 tcg_temp_free(temp2
);
803 tcg_temp_free(temp3
);
804 tcg_temp_free_i64(temp64
);
808 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
);
811 gen_maddsus_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
812 TCGv r3
, uint32_t n
, uint32_t mode
)
814 TCGv temp
= tcg_const_i32(n
);
815 TCGv temp2
= tcg_temp_new();
816 TCGv temp3
= tcg_temp_new();
817 TCGv_i64 temp64
= tcg_temp_new_i64();
821 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
824 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
827 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
830 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
833 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
834 gen_subs(ret_low
, r1_low
, temp
);
835 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
836 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
837 gen_adds(ret_high
, r1_high
, temp2
);
839 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
840 /* combine av bits */
841 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
844 tcg_temp_free(temp2
);
845 tcg_temp_free(temp3
);
846 tcg_temp_free_i64(temp64
);
851 gen_maddsums_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
852 TCGv r3
, uint32_t n
, uint32_t mode
)
854 TCGv temp
= tcg_const_i32(n
);
855 TCGv_i64 temp64
= tcg_temp_new_i64();
856 TCGv_i64 temp64_2
= tcg_temp_new_i64();
860 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
863 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
866 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
869 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
872 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
873 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
874 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
875 tcg_gen_shli_i64(temp64
, temp64
, 16);
876 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
878 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
879 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
882 tcg_temp_free_i64(temp64
);
883 tcg_temp_free_i64(temp64_2
);
888 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
889 TCGv r3
, uint32_t n
, uint32_t mode
)
891 TCGv temp
= tcg_const_i32(n
);
892 TCGv_i64 temp64
= tcg_temp_new_i64();
893 TCGv_i64 temp64_2
= tcg_temp_new_i64();
894 TCGv_i64 temp64_3
= tcg_temp_new_i64();
897 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
900 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
903 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
906 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
909 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
910 gen_add64_d(temp64_3
, temp64_2
, temp64
);
911 /* write back result */
912 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
915 tcg_temp_free_i64(temp64
);
916 tcg_temp_free_i64(temp64_2
);
917 tcg_temp_free_i64(temp64_3
);
921 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
922 TCGv r3
, uint32_t n
, uint32_t mode
)
924 TCGv temp
= tcg_const_i32(n
);
925 TCGv_i64 temp64
= tcg_temp_new_i64();
926 TCGv_i64 temp64_2
= tcg_temp_new_i64();
929 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
932 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
935 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
938 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
941 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
942 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
943 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
946 tcg_temp_free_i64(temp64
);
947 tcg_temp_free_i64(temp64_2
);
951 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
954 TCGv temp
= tcg_const_i32(n
);
955 TCGv_i64 temp64
= tcg_temp_new_i64();
958 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
961 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
964 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
967 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
970 gen_helper_addr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
973 tcg_temp_free_i64(temp64
);
977 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
979 TCGv temp
= tcg_temp_new();
980 TCGv temp2
= tcg_temp_new();
982 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
983 tcg_gen_shli_tl(temp
, r1
, 16);
984 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
987 tcg_temp_free(temp2
);
991 gen_maddsur32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
993 TCGv temp
= tcg_const_i32(n
);
994 TCGv temp2
= tcg_temp_new();
995 TCGv_i64 temp64
= tcg_temp_new_i64();
998 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1001 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1004 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1007 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1010 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1011 tcg_gen_shli_tl(temp
, r1
, 16);
1012 gen_helper_addsur_h(ret
, cpu_env
, temp64
, temp
, temp2
);
1014 tcg_temp_free(temp
);
1015 tcg_temp_free(temp2
);
1016 tcg_temp_free_i64(temp64
);
1021 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1022 uint32_t n
, uint32_t mode
)
1024 TCGv temp
= tcg_const_i32(n
);
1025 TCGv_i64 temp64
= tcg_temp_new_i64();
1028 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1031 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1034 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1037 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1040 gen_helper_addr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1042 tcg_temp_free(temp
);
1043 tcg_temp_free_i64(temp64
);
1047 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1049 TCGv temp
= tcg_temp_new();
1050 TCGv temp2
= tcg_temp_new();
1052 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1053 tcg_gen_shli_tl(temp
, r1
, 16);
1054 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1056 tcg_temp_free(temp
);
1057 tcg_temp_free(temp2
);
1061 gen_maddsur32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1063 TCGv temp
= tcg_const_i32(n
);
1064 TCGv temp2
= tcg_temp_new();
1065 TCGv_i64 temp64
= tcg_temp_new_i64();
1068 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1071 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1074 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1077 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1080 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1081 tcg_gen_shli_tl(temp
, r1
, 16);
1082 gen_helper_addsur_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
1084 tcg_temp_free(temp
);
1085 tcg_temp_free(temp2
);
1086 tcg_temp_free_i64(temp64
);
1090 gen_maddr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1092 TCGv temp
= tcg_const_i32(n
);
1093 gen_helper_maddr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1094 tcg_temp_free(temp
);
1098 gen_maddrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1100 TCGv temp
= tcg_const_i32(n
);
1101 gen_helper_maddr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1102 tcg_temp_free(temp
);
1106 gen_madd32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1107 uint32_t up_shift
, CPUTriCoreState
*env
)
1109 TCGv temp
= tcg_temp_new();
1110 TCGv temp2
= tcg_temp_new();
1111 TCGv temp3
= tcg_temp_new();
1112 TCGv_i64 t1
= tcg_temp_new_i64();
1113 TCGv_i64 t2
= tcg_temp_new_i64();
1114 TCGv_i64 t3
= tcg_temp_new_i64();
1116 tcg_gen_ext_i32_i64(t2
, arg2
);
1117 tcg_gen_ext_i32_i64(t3
, arg3
);
1119 tcg_gen_mul_i64(t2
, t2
, t3
);
1120 tcg_gen_shli_i64(t2
, t2
, n
);
1122 tcg_gen_ext_i32_i64(t1
, arg1
);
1123 tcg_gen_sari_i64(t2
, t2
, up_shift
);
1125 tcg_gen_add_i64(t3
, t1
, t2
);
1126 tcg_gen_extrl_i64_i32(temp3
, t3
);
1128 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1129 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1130 tcg_gen_or_i64(t1
, t1
, t2
);
1131 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1132 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1133 /* We produce an overflow on the host if the mul before was
1134 (0x80000000 * 0x80000000) << 1). If this is the
1135 case, we negate the ovf. */
1137 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1138 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1139 tcg_gen_and_tl(temp
, temp
, temp2
);
1140 tcg_gen_shli_tl(temp
, temp
, 31);
1141 /* negate v bit, if special condition */
1142 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1145 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1146 /* Calc AV/SAV bits */
1147 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1148 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1150 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1151 /* write back result */
1152 tcg_gen_mov_tl(ret
, temp3
);
1154 tcg_temp_free(temp
);
1155 tcg_temp_free(temp2
);
1156 tcg_temp_free(temp3
);
1157 tcg_temp_free_i64(t1
);
1158 tcg_temp_free_i64(t2
);
1159 tcg_temp_free_i64(t3
);
1163 gen_m16add32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1165 TCGv temp
= tcg_temp_new();
1166 TCGv temp2
= tcg_temp_new();
1168 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1169 } else { /* n is expected to be 1 */
1170 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1171 tcg_gen_shli_tl(temp
, temp
, 1);
1172 /* catch special case r1 = r2 = 0x8000 */
1173 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1174 tcg_gen_sub_tl(temp
, temp
, temp2
);
1176 gen_add_d(ret
, arg1
, temp
);
1178 tcg_temp_free(temp
);
1179 tcg_temp_free(temp2
);
1183 gen_m16adds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1185 TCGv temp
= tcg_temp_new();
1186 TCGv temp2
= tcg_temp_new();
1188 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1189 } else { /* n is expected to be 1 */
1190 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1191 tcg_gen_shli_tl(temp
, temp
, 1);
1192 /* catch special case r1 = r2 = 0x8000 */
1193 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1194 tcg_gen_sub_tl(temp
, temp
, temp2
);
1196 gen_adds(ret
, arg1
, temp
);
1198 tcg_temp_free(temp
);
1199 tcg_temp_free(temp2
);
1203 gen_m16add64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1204 TCGv arg3
, uint32_t n
)
1206 TCGv temp
= tcg_temp_new();
1207 TCGv temp2
= tcg_temp_new();
1208 TCGv_i64 t1
= tcg_temp_new_i64();
1209 TCGv_i64 t2
= tcg_temp_new_i64();
1210 TCGv_i64 t3
= tcg_temp_new_i64();
1213 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1214 } else { /* n is expected to be 1 */
1215 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1216 tcg_gen_shli_tl(temp
, temp
, 1);
1217 /* catch special case r1 = r2 = 0x8000 */
1218 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1219 tcg_gen_sub_tl(temp
, temp
, temp2
);
1221 tcg_gen_ext_i32_i64(t2
, temp
);
1222 tcg_gen_shli_i64(t2
, t2
, 16);
1223 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1224 gen_add64_d(t3
, t1
, t2
);
1225 /* write back result */
1226 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1228 tcg_temp_free_i64(t1
);
1229 tcg_temp_free_i64(t2
);
1230 tcg_temp_free_i64(t3
);
1231 tcg_temp_free(temp
);
1232 tcg_temp_free(temp2
);
1236 gen_m16adds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1237 TCGv arg3
, uint32_t n
)
1239 TCGv temp
= tcg_temp_new();
1240 TCGv temp2
= tcg_temp_new();
1241 TCGv_i64 t1
= tcg_temp_new_i64();
1242 TCGv_i64 t2
= tcg_temp_new_i64();
1245 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1246 } else { /* n is expected to be 1 */
1247 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1248 tcg_gen_shli_tl(temp
, temp
, 1);
1249 /* catch special case r1 = r2 = 0x8000 */
1250 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1251 tcg_gen_sub_tl(temp
, temp
, temp2
);
1253 tcg_gen_ext_i32_i64(t2
, temp
);
1254 tcg_gen_shli_i64(t2
, t2
, 16);
1255 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1257 gen_helper_add64_ssov(t1
, cpu_env
, t1
, t2
);
1258 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1260 tcg_temp_free(temp
);
1261 tcg_temp_free(temp2
);
1262 tcg_temp_free_i64(t1
);
1263 tcg_temp_free_i64(t2
);
1267 gen_madd64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1268 TCGv arg3
, uint32_t n
, CPUTriCoreState
*env
)
1270 TCGv_i64 t1
= tcg_temp_new_i64();
1271 TCGv_i64 t2
= tcg_temp_new_i64();
1272 TCGv_i64 t3
= tcg_temp_new_i64();
1273 TCGv_i64 t4
= tcg_temp_new_i64();
1276 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1277 tcg_gen_ext_i32_i64(t2
, arg2
);
1278 tcg_gen_ext_i32_i64(t3
, arg3
);
1280 tcg_gen_mul_i64(t2
, t2
, t3
);
1282 tcg_gen_shli_i64(t2
, t2
, 1);
1284 tcg_gen_add_i64(t4
, t1
, t2
);
1286 tcg_gen_xor_i64(t3
, t4
, t1
);
1287 tcg_gen_xor_i64(t2
, t1
, t2
);
1288 tcg_gen_andc_i64(t3
, t3
, t2
);
1289 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1290 /* We produce an overflow on the host if the mul before was
1291 (0x80000000 * 0x80000000) << 1). If this is the
1292 case, we negate the ovf. */
1294 temp
= tcg_temp_new();
1295 temp2
= tcg_temp_new();
1296 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1297 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1298 tcg_gen_and_tl(temp
, temp
, temp2
);
1299 tcg_gen_shli_tl(temp
, temp
, 31);
1300 /* negate v bit, if special condition */
1301 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1303 tcg_temp_free(temp
);
1304 tcg_temp_free(temp2
);
1306 /* write back result */
1307 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1309 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1310 /* Calc AV/SAV bits */
1311 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1312 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1314 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1316 tcg_temp_free_i64(t1
);
1317 tcg_temp_free_i64(t2
);
1318 tcg_temp_free_i64(t3
);
1319 tcg_temp_free_i64(t4
);
1323 gen_madds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1326 TCGv_i64 t1
= tcg_temp_new_i64();
1327 TCGv_i64 t2
= tcg_temp_new_i64();
1328 TCGv_i64 t3
= tcg_temp_new_i64();
1330 tcg_gen_ext_i32_i64(t1
, arg1
);
1331 tcg_gen_ext_i32_i64(t2
, arg2
);
1332 tcg_gen_ext_i32_i64(t3
, arg3
);
1334 tcg_gen_mul_i64(t2
, t2
, t3
);
1335 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1337 gen_helper_madd32_q_add_ssov(ret
, cpu_env
, t1
, t2
);
1339 tcg_temp_free_i64(t1
);
1340 tcg_temp_free_i64(t2
);
1341 tcg_temp_free_i64(t3
);
1345 gen_madds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1346 TCGv arg3
, uint32_t n
)
1348 TCGv_i64 r1
= tcg_temp_new_i64();
1349 TCGv temp
= tcg_const_i32(n
);
1351 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1352 gen_helper_madd64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
1353 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1355 tcg_temp_free_i64(r1
);
1356 tcg_temp_free(temp
);
1358 /* ret = r2 - (r1 * r3); */
1359 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
1361 TCGv_i64 t1
= tcg_temp_new_i64();
1362 TCGv_i64 t2
= tcg_temp_new_i64();
1363 TCGv_i64 t3
= tcg_temp_new_i64();
1365 tcg_gen_ext_i32_i64(t1
, r1
);
1366 tcg_gen_ext_i32_i64(t2
, r2
);
1367 tcg_gen_ext_i32_i64(t3
, r3
);
1369 tcg_gen_mul_i64(t1
, t1
, t3
);
1370 tcg_gen_sub_i64(t1
, t2
, t1
);
1372 tcg_gen_extrl_i64_i32(ret
, t1
);
1375 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
1376 /* result < -0x80000000 */
1377 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
1378 tcg_gen_or_i64(t2
, t2
, t3
);
1379 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
1380 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1383 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1384 /* Calc AV/SAV bits */
1385 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1386 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1388 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1390 tcg_temp_free_i64(t1
);
1391 tcg_temp_free_i64(t2
);
1392 tcg_temp_free_i64(t3
);
1395 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1397 TCGv temp
= tcg_const_i32(con
);
1398 gen_msub32_d(ret
, r1
, r2
, temp
);
1399 tcg_temp_free(temp
);
1403 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1406 TCGv t1
= tcg_temp_new();
1407 TCGv t2
= tcg_temp_new();
1408 TCGv t3
= tcg_temp_new();
1409 TCGv t4
= tcg_temp_new();
1411 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
1412 /* only the sub can overflow */
1413 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
1415 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
1416 tcg_gen_xor_tl(t1
, r2_high
, t2
);
1417 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
1419 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1420 /* Calc AV/SAV bits */
1421 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
1422 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
1424 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1425 /* write back the result */
1426 tcg_gen_mov_tl(ret_low
, t3
);
1427 tcg_gen_mov_tl(ret_high
, t4
);
1436 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1439 TCGv temp
= tcg_const_i32(con
);
1440 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1441 tcg_temp_free(temp
);
1445 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1448 TCGv_i64 t1
= tcg_temp_new_i64();
1449 TCGv_i64 t2
= tcg_temp_new_i64();
1450 TCGv_i64 t3
= tcg_temp_new_i64();
1452 tcg_gen_extu_i32_i64(t1
, r1
);
1453 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
1454 tcg_gen_extu_i32_i64(t3
, r3
);
1456 tcg_gen_mul_i64(t1
, t1
, t3
);
1457 tcg_gen_sub_i64(t3
, t2
, t1
);
1458 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
1459 /* calc V bit, only the sub can overflow, if t1 > t2 */
1460 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
1461 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1462 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1464 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1465 /* Calc AV/SAV bits */
1466 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1467 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1469 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1471 tcg_temp_free_i64(t1
);
1472 tcg_temp_free_i64(t2
);
1473 tcg_temp_free_i64(t3
);
1477 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1480 TCGv temp
= tcg_const_i32(con
);
1481 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1482 tcg_temp_free(temp
);
1485 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
1487 TCGv temp
= tcg_const_i32(r2
);
1488 gen_add_d(ret
, r1
, temp
);
1489 tcg_temp_free(temp
);
1491 /* calculate the carry bit too */
1492 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1494 TCGv t0
= tcg_temp_new_i32();
1495 TCGv result
= tcg_temp_new_i32();
1497 tcg_gen_movi_tl(t0
, 0);
1498 /* Addition and set C/V/SV bits */
1499 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
1501 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1502 tcg_gen_xor_tl(t0
, r1
, r2
);
1503 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1505 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1506 /* Calc AV/SAV bits */
1507 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1508 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1510 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1511 /* write back result */
1512 tcg_gen_mov_tl(ret
, result
);
1514 tcg_temp_free(result
);
1518 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
1520 TCGv temp
= tcg_const_i32(con
);
1521 gen_add_CC(ret
, r1
, temp
);
1522 tcg_temp_free(temp
);
1525 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1527 TCGv carry
= tcg_temp_new_i32();
1528 TCGv t0
= tcg_temp_new_i32();
1529 TCGv result
= tcg_temp_new_i32();
1531 tcg_gen_movi_tl(t0
, 0);
1532 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1533 /* Addition, carry and set C/V/SV bits */
1534 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1535 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1537 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1538 tcg_gen_xor_tl(t0
, r1
, r2
);
1539 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1541 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1542 /* Calc AV/SAV bits */
1543 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1544 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1546 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1547 /* write back result */
1548 tcg_gen_mov_tl(ret
, result
);
1550 tcg_temp_free(result
);
1552 tcg_temp_free(carry
);
1555 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1557 TCGv temp
= tcg_const_i32(con
);
1558 gen_addc_CC(ret
, r1
, temp
);
1559 tcg_temp_free(temp
);
1562 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1565 TCGv temp
= tcg_temp_new();
1566 TCGv temp2
= tcg_temp_new();
1567 TCGv result
= tcg_temp_new();
1568 TCGv mask
= tcg_temp_new();
1569 TCGv t0
= tcg_const_i32(0);
1571 /* create mask for sticky bits */
1572 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1573 tcg_gen_shli_tl(mask
, mask
, 31);
1575 tcg_gen_add_tl(result
, r1
, r2
);
1577 tcg_gen_xor_tl(temp
, result
, r1
);
1578 tcg_gen_xor_tl(temp2
, r1
, r2
);
1579 tcg_gen_andc_tl(temp
, temp
, temp2
);
1580 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1582 tcg_gen_and_tl(temp
, temp
, mask
);
1583 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1585 tcg_gen_add_tl(temp
, result
, result
);
1586 tcg_gen_xor_tl(temp
, temp
, result
);
1587 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1589 tcg_gen_and_tl(temp
, temp
, mask
);
1590 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1591 /* write back result */
1592 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1595 tcg_temp_free(temp
);
1596 tcg_temp_free(temp2
);
1597 tcg_temp_free(result
);
1598 tcg_temp_free(mask
);
1601 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1604 TCGv temp
= tcg_const_i32(r2
);
1605 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1606 tcg_temp_free(temp
);
1609 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1611 TCGv temp
= tcg_temp_new_i32();
1612 TCGv result
= tcg_temp_new_i32();
1614 tcg_gen_sub_tl(result
, r1
, r2
);
1616 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1617 tcg_gen_xor_tl(temp
, r1
, r2
);
1618 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1620 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1622 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1623 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1625 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1626 /* write back result */
1627 tcg_gen_mov_tl(ret
, result
);
1629 tcg_temp_free(temp
);
1630 tcg_temp_free(result
);
1634 gen_sub64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
1636 TCGv temp
= tcg_temp_new();
1637 TCGv_i64 t0
= tcg_temp_new_i64();
1638 TCGv_i64 t1
= tcg_temp_new_i64();
1639 TCGv_i64 result
= tcg_temp_new_i64();
1641 tcg_gen_sub_i64(result
, r1
, r2
);
1643 tcg_gen_xor_i64(t1
, result
, r1
);
1644 tcg_gen_xor_i64(t0
, r1
, r2
);
1645 tcg_gen_and_i64(t1
, t1
, t0
);
1646 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
1648 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1649 /* calc AV/SAV bits */
1650 tcg_gen_extrh_i64_i32(temp
, result
);
1651 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
1652 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
1654 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1655 /* write back result */
1656 tcg_gen_mov_i64(ret
, result
);
1658 tcg_temp_free(temp
);
1659 tcg_temp_free_i64(result
);
1660 tcg_temp_free_i64(t0
);
1661 tcg_temp_free_i64(t1
);
1664 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1666 TCGv result
= tcg_temp_new();
1667 TCGv temp
= tcg_temp_new();
1669 tcg_gen_sub_tl(result
, r1
, r2
);
1671 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1673 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1674 tcg_gen_xor_tl(temp
, r1
, r2
);
1675 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1677 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1679 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1680 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1682 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1683 /* write back result */
1684 tcg_gen_mov_tl(ret
, result
);
1686 tcg_temp_free(result
);
1687 tcg_temp_free(temp
);
1690 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1692 TCGv temp
= tcg_temp_new();
1693 tcg_gen_not_tl(temp
, r2
);
1694 gen_addc_CC(ret
, r1
, temp
);
1695 tcg_temp_free(temp
);
1698 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1701 TCGv temp
= tcg_temp_new();
1702 TCGv temp2
= tcg_temp_new();
1703 TCGv result
= tcg_temp_new();
1704 TCGv mask
= tcg_temp_new();
1705 TCGv t0
= tcg_const_i32(0);
1707 /* create mask for sticky bits */
1708 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1709 tcg_gen_shli_tl(mask
, mask
, 31);
1711 tcg_gen_sub_tl(result
, r1
, r2
);
1713 tcg_gen_xor_tl(temp
, result
, r1
);
1714 tcg_gen_xor_tl(temp2
, r1
, r2
);
1715 tcg_gen_and_tl(temp
, temp
, temp2
);
1716 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1718 tcg_gen_and_tl(temp
, temp
, mask
);
1719 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1721 tcg_gen_add_tl(temp
, result
, result
);
1722 tcg_gen_xor_tl(temp
, temp
, result
);
1723 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1725 tcg_gen_and_tl(temp
, temp
, mask
);
1726 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1727 /* write back result */
1728 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1731 tcg_temp_free(temp
);
1732 tcg_temp_free(temp2
);
1733 tcg_temp_free(result
);
1734 tcg_temp_free(mask
);
1738 gen_msub_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1739 TCGv r3
, uint32_t n
, uint32_t mode
)
1741 TCGv temp
= tcg_const_i32(n
);
1742 TCGv temp2
= tcg_temp_new();
1743 TCGv_i64 temp64
= tcg_temp_new_i64();
1746 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1749 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1752 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1755 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1758 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1759 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1760 tcg_gen_sub_tl
, tcg_gen_sub_tl
);
1761 tcg_temp_free(temp
);
1762 tcg_temp_free(temp2
);
1763 tcg_temp_free_i64(temp64
);
1767 gen_msubs_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1768 TCGv r3
, uint32_t n
, uint32_t mode
)
1770 TCGv temp
= tcg_const_i32(n
);
1771 TCGv temp2
= tcg_temp_new();
1772 TCGv temp3
= tcg_temp_new();
1773 TCGv_i64 temp64
= tcg_temp_new_i64();
1777 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1780 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1783 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1786 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1789 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1790 gen_subs(ret_low
, r1_low
, temp
);
1791 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
1792 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
1793 gen_subs(ret_high
, r1_high
, temp2
);
1794 /* combine v bits */
1795 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1796 /* combine av bits */
1797 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
1799 tcg_temp_free(temp
);
1800 tcg_temp_free(temp2
);
1801 tcg_temp_free(temp3
);
1802 tcg_temp_free_i64(temp64
);
1806 gen_msubm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1807 TCGv r3
, uint32_t n
, uint32_t mode
)
1809 TCGv temp
= tcg_const_i32(n
);
1810 TCGv_i64 temp64
= tcg_temp_new_i64();
1811 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1812 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1815 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1818 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1821 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1824 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1827 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1828 gen_sub64_d(temp64_3
, temp64_2
, temp64
);
1829 /* write back result */
1830 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
1832 tcg_temp_free(temp
);
1833 tcg_temp_free_i64(temp64
);
1834 tcg_temp_free_i64(temp64_2
);
1835 tcg_temp_free_i64(temp64_3
);
1839 gen_msubms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1840 TCGv r3
, uint32_t n
, uint32_t mode
)
1842 TCGv temp
= tcg_const_i32(n
);
1843 TCGv_i64 temp64
= tcg_temp_new_i64();
1844 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1847 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1850 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1853 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1856 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1859 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1860 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
1861 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1863 tcg_temp_free(temp
);
1864 tcg_temp_free_i64(temp64
);
1865 tcg_temp_free_i64(temp64_2
);
1869 gen_msubr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
1872 TCGv temp
= tcg_const_i32(n
);
1873 TCGv_i64 temp64
= tcg_temp_new_i64();
1876 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1879 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1882 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1885 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1888 gen_helper_subr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1890 tcg_temp_free(temp
);
1891 tcg_temp_free_i64(temp64
);
1895 gen_msubr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1897 TCGv temp
= tcg_temp_new();
1898 TCGv temp2
= tcg_temp_new();
1900 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1901 tcg_gen_shli_tl(temp
, r1
, 16);
1902 gen_msubr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1904 tcg_temp_free(temp
);
1905 tcg_temp_free(temp2
);
1909 gen_msubr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1910 uint32_t n
, uint32_t mode
)
1912 TCGv temp
= tcg_const_i32(n
);
1913 TCGv_i64 temp64
= tcg_temp_new_i64();
1916 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1919 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1922 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1925 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1928 gen_helper_subr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1930 tcg_temp_free(temp
);
1931 tcg_temp_free_i64(temp64
);
1935 gen_msubr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1937 TCGv temp
= tcg_temp_new();
1938 TCGv temp2
= tcg_temp_new();
1940 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1941 tcg_gen_shli_tl(temp
, r1
, 16);
1942 gen_msubr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1944 tcg_temp_free(temp
);
1945 tcg_temp_free(temp2
);
1949 gen_msubr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1951 TCGv temp
= tcg_const_i32(n
);
1952 gen_helper_msubr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1953 tcg_temp_free(temp
);
1957 gen_msubrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1959 TCGv temp
= tcg_const_i32(n
);
1960 gen_helper_msubr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1961 tcg_temp_free(temp
);
1965 gen_msub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1966 uint32_t up_shift
, CPUTriCoreState
*env
)
1968 TCGv temp
= tcg_temp_new();
1969 TCGv temp2
= tcg_temp_new();
1970 TCGv temp3
= tcg_temp_new();
1971 TCGv_i64 t1
= tcg_temp_new_i64();
1972 TCGv_i64 t2
= tcg_temp_new_i64();
1973 TCGv_i64 t3
= tcg_temp_new_i64();
1974 TCGv_i64 t4
= tcg_temp_new_i64();
1976 tcg_gen_ext_i32_i64(t2
, arg2
);
1977 tcg_gen_ext_i32_i64(t3
, arg3
);
1979 tcg_gen_mul_i64(t2
, t2
, t3
);
1981 tcg_gen_ext_i32_i64(t1
, arg1
);
1982 /* if we shift part of the fraction out, we need to round up */
1983 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1984 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1985 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1986 tcg_gen_add_i64(t2
, t2
, t4
);
1988 tcg_gen_sub_i64(t3
, t1
, t2
);
1989 tcg_gen_extrl_i64_i32(temp3
, t3
);
1991 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1992 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1993 tcg_gen_or_i64(t1
, t1
, t2
);
1994 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1995 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1997 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1998 /* Calc AV/SAV bits */
1999 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
2000 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
2002 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2003 /* write back result */
2004 tcg_gen_mov_tl(ret
, temp3
);
2006 tcg_temp_free(temp
);
2007 tcg_temp_free(temp2
);
2008 tcg_temp_free(temp3
);
2009 tcg_temp_free_i64(t1
);
2010 tcg_temp_free_i64(t2
);
2011 tcg_temp_free_i64(t3
);
2012 tcg_temp_free_i64(t4
);
2016 gen_m16sub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
2018 TCGv temp
= tcg_temp_new();
2019 TCGv temp2
= tcg_temp_new();
2021 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2022 } else { /* n is expected to be 1 */
2023 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2024 tcg_gen_shli_tl(temp
, temp
, 1);
2025 /* catch special case r1 = r2 = 0x8000 */
2026 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2027 tcg_gen_sub_tl(temp
, temp
, temp2
);
2029 gen_sub_d(ret
, arg1
, temp
);
2031 tcg_temp_free(temp
);
2032 tcg_temp_free(temp2
);
2036 gen_m16subs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
2038 TCGv temp
= tcg_temp_new();
2039 TCGv temp2
= tcg_temp_new();
2041 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2042 } else { /* n is expected to be 1 */
2043 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2044 tcg_gen_shli_tl(temp
, temp
, 1);
2045 /* catch special case r1 = r2 = 0x8000 */
2046 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2047 tcg_gen_sub_tl(temp
, temp
, temp2
);
2049 gen_subs(ret
, arg1
, temp
);
2051 tcg_temp_free(temp
);
2052 tcg_temp_free(temp2
);
2056 gen_m16sub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2057 TCGv arg3
, uint32_t n
)
2059 TCGv temp
= tcg_temp_new();
2060 TCGv temp2
= tcg_temp_new();
2061 TCGv_i64 t1
= tcg_temp_new_i64();
2062 TCGv_i64 t2
= tcg_temp_new_i64();
2063 TCGv_i64 t3
= tcg_temp_new_i64();
2066 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2067 } else { /* n is expected to be 1 */
2068 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2069 tcg_gen_shli_tl(temp
, temp
, 1);
2070 /* catch special case r1 = r2 = 0x8000 */
2071 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2072 tcg_gen_sub_tl(temp
, temp
, temp2
);
2074 tcg_gen_ext_i32_i64(t2
, temp
);
2075 tcg_gen_shli_i64(t2
, t2
, 16);
2076 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2077 gen_sub64_d(t3
, t1
, t2
);
2078 /* write back result */
2079 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
2081 tcg_temp_free_i64(t1
);
2082 tcg_temp_free_i64(t2
);
2083 tcg_temp_free_i64(t3
);
2084 tcg_temp_free(temp
);
2085 tcg_temp_free(temp2
);
2089 gen_m16subs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2090 TCGv arg3
, uint32_t n
)
2092 TCGv temp
= tcg_temp_new();
2093 TCGv temp2
= tcg_temp_new();
2094 TCGv_i64 t1
= tcg_temp_new_i64();
2095 TCGv_i64 t2
= tcg_temp_new_i64();
2098 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2099 } else { /* n is expected to be 1 */
2100 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2101 tcg_gen_shli_tl(temp
, temp
, 1);
2102 /* catch special case r1 = r2 = 0x8000 */
2103 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2104 tcg_gen_sub_tl(temp
, temp
, temp2
);
2106 tcg_gen_ext_i32_i64(t2
, temp
);
2107 tcg_gen_shli_i64(t2
, t2
, 16);
2108 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2110 gen_helper_sub64_ssov(t1
, cpu_env
, t1
, t2
);
2111 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
2113 tcg_temp_free(temp
);
2114 tcg_temp_free(temp2
);
2115 tcg_temp_free_i64(t1
);
2116 tcg_temp_free_i64(t2
);
2120 gen_msub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2121 TCGv arg3
, uint32_t n
, CPUTriCoreState
*env
)
2123 TCGv_i64 t1
= tcg_temp_new_i64();
2124 TCGv_i64 t2
= tcg_temp_new_i64();
2125 TCGv_i64 t3
= tcg_temp_new_i64();
2126 TCGv_i64 t4
= tcg_temp_new_i64();
2129 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2130 tcg_gen_ext_i32_i64(t2
, arg2
);
2131 tcg_gen_ext_i32_i64(t3
, arg3
);
2133 tcg_gen_mul_i64(t2
, t2
, t3
);
2135 tcg_gen_shli_i64(t2
, t2
, 1);
2137 tcg_gen_sub_i64(t4
, t1
, t2
);
2139 tcg_gen_xor_i64(t3
, t4
, t1
);
2140 tcg_gen_xor_i64(t2
, t1
, t2
);
2141 tcg_gen_and_i64(t3
, t3
, t2
);
2142 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
2143 /* We produce an overflow on the host if the mul before was
2144 (0x80000000 * 0x80000000) << 1). If this is the
2145 case, we negate the ovf. */
2147 temp
= tcg_temp_new();
2148 temp2
= tcg_temp_new();
2149 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
2150 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
2151 tcg_gen_and_tl(temp
, temp
, temp2
);
2152 tcg_gen_shli_tl(temp
, temp
, 31);
2153 /* negate v bit, if special condition */
2154 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2156 tcg_temp_free(temp
);
2157 tcg_temp_free(temp2
);
2159 /* write back result */
2160 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
2162 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2163 /* Calc AV/SAV bits */
2164 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2165 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2167 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2169 tcg_temp_free_i64(t1
);
2170 tcg_temp_free_i64(t2
);
2171 tcg_temp_free_i64(t3
);
2172 tcg_temp_free_i64(t4
);
2176 gen_msubs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
2179 TCGv_i64 t1
= tcg_temp_new_i64();
2180 TCGv_i64 t2
= tcg_temp_new_i64();
2181 TCGv_i64 t3
= tcg_temp_new_i64();
2182 TCGv_i64 t4
= tcg_temp_new_i64();
2184 tcg_gen_ext_i32_i64(t1
, arg1
);
2185 tcg_gen_ext_i32_i64(t2
, arg2
);
2186 tcg_gen_ext_i32_i64(t3
, arg3
);
2188 tcg_gen_mul_i64(t2
, t2
, t3
);
2189 /* if we shift part of the fraction out, we need to round up */
2190 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
2191 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
2192 tcg_gen_sari_i64(t3
, t2
, up_shift
- n
);
2193 tcg_gen_add_i64(t3
, t3
, t4
);
2195 gen_helper_msub32_q_sub_ssov(ret
, cpu_env
, t1
, t3
);
2197 tcg_temp_free_i64(t1
);
2198 tcg_temp_free_i64(t2
);
2199 tcg_temp_free_i64(t3
);
2200 tcg_temp_free_i64(t4
);
2204 gen_msubs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2205 TCGv arg3
, uint32_t n
)
2207 TCGv_i64 r1
= tcg_temp_new_i64();
2208 TCGv temp
= tcg_const_i32(n
);
2210 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
2211 gen_helper_msub64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
2212 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
2214 tcg_temp_free_i64(r1
);
2215 tcg_temp_free(temp
);
2219 gen_msubad_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2220 TCGv r3
, uint32_t n
, uint32_t mode
)
2222 TCGv temp
= tcg_const_i32(n
);
2223 TCGv temp2
= tcg_temp_new();
2224 TCGv_i64 temp64
= tcg_temp_new_i64();
2227 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2230 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2233 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2236 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2239 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2240 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
2241 tcg_gen_add_tl
, tcg_gen_sub_tl
);
2242 tcg_temp_free(temp
);
2243 tcg_temp_free(temp2
);
2244 tcg_temp_free_i64(temp64
);
2248 gen_msubadm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2249 TCGv r3
, uint32_t n
, uint32_t mode
)
2251 TCGv temp
= tcg_const_i32(n
);
2252 TCGv_i64 temp64
= tcg_temp_new_i64();
2253 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2254 TCGv_i64 temp64_3
= tcg_temp_new_i64();
2257 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2260 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2263 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2266 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2269 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
2270 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2271 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2272 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2273 tcg_gen_shli_i64(temp64
, temp64
, 16);
2275 gen_sub64_d(temp64_2
, temp64_3
, temp64
);
2276 /* write back result */
2277 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
2279 tcg_temp_free(temp
);
2280 tcg_temp_free_i64(temp64
);
2281 tcg_temp_free_i64(temp64_2
);
2282 tcg_temp_free_i64(temp64_3
);
2286 gen_msubadr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2288 TCGv temp
= tcg_const_i32(n
);
2289 TCGv temp2
= tcg_temp_new();
2290 TCGv_i64 temp64
= tcg_temp_new_i64();
2293 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2296 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2299 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2302 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2305 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2306 tcg_gen_shli_tl(temp
, r1
, 16);
2307 gen_helper_subadr_h(ret
, cpu_env
, temp64
, temp
, temp2
);
2309 tcg_temp_free(temp
);
2310 tcg_temp_free(temp2
);
2311 tcg_temp_free_i64(temp64
);
2315 gen_msubads_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2316 TCGv r3
, uint32_t n
, uint32_t mode
)
2318 TCGv temp
= tcg_const_i32(n
);
2319 TCGv temp2
= tcg_temp_new();
2320 TCGv temp3
= tcg_temp_new();
2321 TCGv_i64 temp64
= tcg_temp_new_i64();
2325 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2328 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2331 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2334 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2337 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2338 gen_adds(ret_low
, r1_low
, temp
);
2339 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
2340 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
2341 gen_subs(ret_high
, r1_high
, temp2
);
2342 /* combine v bits */
2343 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2344 /* combine av bits */
2345 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
2347 tcg_temp_free(temp
);
2348 tcg_temp_free(temp2
);
2349 tcg_temp_free(temp3
);
2350 tcg_temp_free_i64(temp64
);
2354 gen_msubadms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2355 TCGv r3
, uint32_t n
, uint32_t mode
)
2357 TCGv temp
= tcg_const_i32(n
);
2358 TCGv_i64 temp64
= tcg_temp_new_i64();
2359 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2363 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2366 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2369 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2372 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2375 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2376 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2377 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2378 tcg_gen_shli_i64(temp64
, temp64
, 16);
2379 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
2381 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
2382 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2384 tcg_temp_free(temp
);
2385 tcg_temp_free_i64(temp64
);
2386 tcg_temp_free_i64(temp64_2
);
2390 gen_msubadr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2392 TCGv temp
= tcg_const_i32(n
);
2393 TCGv temp2
= tcg_temp_new();
2394 TCGv_i64 temp64
= tcg_temp_new_i64();
2397 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2400 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2403 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2406 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2409 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2410 tcg_gen_shli_tl(temp
, r1
, 16);
2411 gen_helper_subadr_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
2413 tcg_temp_free(temp
);
2414 tcg_temp_free(temp2
);
2415 tcg_temp_free_i64(temp64
);
2418 static inline void gen_abs(TCGv ret
, TCGv r1
)
2420 TCGv temp
= tcg_temp_new();
2421 TCGv t0
= tcg_const_i32(0);
2423 tcg_gen_neg_tl(temp
, r1
);
2424 tcg_gen_movcond_tl(TCG_COND_GE
, ret
, r1
, t0
, r1
, temp
);
2425 /* overflow can only happen, if r1 = 0x80000000 */
2426 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
2427 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2429 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2431 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2432 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2434 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2436 tcg_temp_free(temp
);
2440 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
2442 TCGv temp
= tcg_temp_new_i32();
2443 TCGv result
= tcg_temp_new_i32();
2445 tcg_gen_sub_tl(result
, r1
, r2
);
2446 tcg_gen_sub_tl(temp
, r2
, r1
);
2447 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
2450 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
2451 tcg_gen_xor_tl(temp
, result
, r2
);
2452 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
2453 tcg_gen_xor_tl(temp
, r1
, r2
);
2454 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2456 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2458 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
2459 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
2461 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2462 /* write back result */
2463 tcg_gen_mov_tl(ret
, result
);
2465 tcg_temp_free(temp
);
2466 tcg_temp_free(result
);
2469 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
2471 TCGv temp
= tcg_const_i32(con
);
2472 gen_absdif(ret
, r1
, temp
);
2473 tcg_temp_free(temp
);
2476 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
2478 TCGv temp
= tcg_const_i32(con
);
2479 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
2480 tcg_temp_free(temp
);
2483 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
2485 TCGv high
= tcg_temp_new();
2486 TCGv low
= tcg_temp_new();
2488 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
2489 tcg_gen_mov_tl(ret
, low
);
2491 tcg_gen_sari_tl(low
, low
, 31);
2492 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
2493 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2495 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2497 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2498 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2500 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2502 tcg_temp_free(high
);
2506 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
2508 TCGv temp
= tcg_const_i32(con
);
2509 gen_mul_i32s(ret
, r1
, temp
);
2510 tcg_temp_free(temp
);
2513 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2515 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
2517 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2519 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2521 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2522 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2524 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2527 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2530 TCGv temp
= tcg_const_i32(con
);
2531 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
2532 tcg_temp_free(temp
);
2535 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2537 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
2539 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2541 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2543 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2544 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2546 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2549 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2552 TCGv temp
= tcg_const_i32(con
);
2553 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
2554 tcg_temp_free(temp
);
2557 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
2559 TCGv temp
= tcg_const_i32(con
);
2560 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
2561 tcg_temp_free(temp
);
2564 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
2566 TCGv temp
= tcg_const_i32(con
);
2567 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
2568 tcg_temp_free(temp
);
2570 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
2571 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2573 TCGv temp
= tcg_const_i32(con
);
2574 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2575 tcg_temp_free(temp
);
2578 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2580 TCGv temp
= tcg_const_i32(con
);
2581 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2582 tcg_temp_free(temp
);
2586 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
2588 TCGv temp
= tcg_temp_new();
2589 TCGv_i64 temp_64
= tcg_temp_new_i64();
2590 TCGv_i64 temp2_64
= tcg_temp_new_i64();
2593 if (up_shift
== 32) {
2594 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
2595 } else if (up_shift
== 16) {
2596 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2597 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2599 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2600 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
2601 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2603 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
2606 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2607 } else { /* n is expected to be 1 */
2608 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2609 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2611 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2613 if (up_shift
== 0) {
2614 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
2616 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
2618 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2619 /* overflow only occurs if r1 = r2 = 0x8000 */
2620 if (up_shift
== 0) {/* result is 64 bit */
2621 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
2623 } else { /* result is 32 bit */
2624 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
2627 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2628 /* calc sv overflow bit */
2629 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2631 /* calc av overflow bit */
2632 if (up_shift
== 0) {
2633 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2634 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2636 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
2637 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
2639 /* calc sav overflow bit */
2640 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2641 tcg_temp_free(temp
);
2642 tcg_temp_free_i64(temp_64
);
2643 tcg_temp_free_i64(temp2_64
);
2647 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2649 TCGv temp
= tcg_temp_new();
2651 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2652 } else { /* n is expected to be 1 */
2653 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2654 tcg_gen_shli_tl(ret
, ret
, 1);
2655 /* catch special case r1 = r2 = 0x8000 */
2656 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
2657 tcg_gen_sub_tl(ret
, ret
, temp
);
2660 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2661 /* calc av overflow bit */
2662 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2663 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2664 /* calc sav overflow bit */
2665 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2667 tcg_temp_free(temp
);
2670 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2672 TCGv temp
= tcg_temp_new();
2674 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2675 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2677 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2678 tcg_gen_shli_tl(ret
, ret
, 1);
2679 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2680 /* catch special case r1 = r2 = 0x8000 */
2681 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
2682 tcg_gen_muli_tl(temp
, temp
, 0x8001);
2683 tcg_gen_sub_tl(ret
, ret
, temp
);
2686 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2687 /* calc av overflow bit */
2688 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2689 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2690 /* calc sav overflow bit */
2691 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2692 /* cut halfword off */
2693 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
2695 tcg_temp_free(temp
);
2699 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2702 TCGv_i64 temp64
= tcg_temp_new_i64();
2703 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2704 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2705 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2706 tcg_temp_free_i64(temp64
);
2710 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2713 TCGv temp
= tcg_const_i32(con
);
2714 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2715 tcg_temp_free(temp
);
2719 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2722 TCGv_i64 temp64
= tcg_temp_new_i64();
2723 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2724 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2725 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2726 tcg_temp_free_i64(temp64
);
2730 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2733 TCGv temp
= tcg_const_i32(con
);
2734 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2735 tcg_temp_free(temp
);
2738 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2740 TCGv temp
= tcg_const_i32(con
);
2741 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2742 tcg_temp_free(temp
);
2745 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2747 TCGv temp
= tcg_const_i32(con
);
2748 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2749 tcg_temp_free(temp
);
2753 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2756 TCGv_i64 temp64
= tcg_temp_new_i64();
2757 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2758 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2759 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2760 tcg_temp_free_i64(temp64
);
2764 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2767 TCGv temp
= tcg_const_i32(con
);
2768 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2769 tcg_temp_free(temp
);
2773 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2776 TCGv_i64 temp64
= tcg_temp_new_i64();
2777 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2778 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2779 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2780 tcg_temp_free_i64(temp64
);
2784 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2787 TCGv temp
= tcg_const_i32(con
);
2788 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2789 tcg_temp_free(temp
);
2792 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
2794 TCGv sat_neg
= tcg_const_i32(low
);
2795 TCGv temp
= tcg_const_i32(up
);
2797 /* sat_neg = (arg < low ) ? low : arg; */
2798 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
2800 /* ret = (sat_neg > up ) ? up : sat_neg; */
2801 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
2803 tcg_temp_free(sat_neg
);
2804 tcg_temp_free(temp
);
2807 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
2809 TCGv temp
= tcg_const_i32(up
);
2810 /* sat_neg = (arg > up ) ? up : arg; */
2811 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
2812 tcg_temp_free(temp
);
2815 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2817 if (shift_count
== -32) {
2818 tcg_gen_movi_tl(ret
, 0);
2819 } else if (shift_count
>= 0) {
2820 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2822 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
2826 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
2828 TCGv temp_low
, temp_high
;
2830 if (shiftcount
== -16) {
2831 tcg_gen_movi_tl(ret
, 0);
2833 temp_high
= tcg_temp_new();
2834 temp_low
= tcg_temp_new();
2836 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
2837 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
2838 gen_shi(temp_low
, temp_low
, shiftcount
);
2839 gen_shi(ret
, temp_high
, shiftcount
);
2840 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
2842 tcg_temp_free(temp_low
);
2843 tcg_temp_free(temp_high
);
2847 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
2849 uint32_t msk
, msk_start
;
2850 TCGv temp
= tcg_temp_new();
2851 TCGv temp2
= tcg_temp_new();
2852 TCGv t_0
= tcg_const_i32(0);
2854 if (shift_count
== 0) {
2855 /* Clear PSW.C and PSW.V */
2856 tcg_gen_movi_tl(cpu_PSW_C
, 0);
2857 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
2858 tcg_gen_mov_tl(ret
, r1
);
2859 } else if (shift_count
== -32) {
2861 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
2862 /* fill ret completely with sign bit */
2863 tcg_gen_sari_tl(ret
, r1
, 31);
2865 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2866 } else if (shift_count
> 0) {
2867 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
2868 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
2871 msk_start
= 32 - shift_count
;
2872 msk
= ((1 << shift_count
) - 1) << msk_start
;
2873 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2874 /* calc v/sv bits */
2875 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
2876 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
2877 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
2878 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2880 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
2882 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2884 tcg_temp_free(t_max
);
2885 tcg_temp_free(t_min
);
2888 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2890 msk
= (1 << -shift_count
) - 1;
2891 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2893 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2895 /* calc av overflow bit */
2896 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2897 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2898 /* calc sav overflow bit */
2899 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2901 tcg_temp_free(temp
);
2902 tcg_temp_free(temp2
);
2906 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
2908 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
2911 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
2913 TCGv temp
= tcg_const_i32(con
);
2914 gen_shas(ret
, r1
, temp
);
2915 tcg_temp_free(temp
);
2918 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2922 if (shift_count
== 0) {
2923 tcg_gen_mov_tl(ret
, r1
);
2924 } else if (shift_count
> 0) {
2925 low
= tcg_temp_new();
2926 high
= tcg_temp_new();
2928 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
2929 tcg_gen_shli_tl(low
, r1
, shift_count
);
2930 tcg_gen_shli_tl(ret
, high
, shift_count
);
2931 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2934 tcg_temp_free(high
);
2936 low
= tcg_temp_new();
2937 high
= tcg_temp_new();
2939 tcg_gen_ext16s_tl(low
, r1
);
2940 tcg_gen_sari_tl(low
, low
, -shift_count
);
2941 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2942 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2945 tcg_temp_free(high
);
2950 /* ret = {ret[30:0], (r1 cond r2)}; */
2951 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2953 TCGv temp
= tcg_temp_new();
2954 TCGv temp2
= tcg_temp_new();
2956 tcg_gen_shli_tl(temp
, ret
, 1);
2957 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
2958 tcg_gen_or_tl(ret
, temp
, temp2
);
2960 tcg_temp_free(temp
);
2961 tcg_temp_free(temp2
);
2964 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
2966 TCGv temp
= tcg_const_i32(con
);
2967 gen_sh_cond(cond
, ret
, r1
, temp
);
2968 tcg_temp_free(temp
);
2971 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
2973 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
2976 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
2978 TCGv temp
= tcg_const_i32(con
);
2979 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
2980 tcg_temp_free(temp
);
2983 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
2985 TCGv temp
= tcg_const_i32(con
);
2986 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
2987 tcg_temp_free(temp
);
2990 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
2992 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
2995 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
2997 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
3000 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
3002 void(*op1
)(TCGv
, TCGv
, TCGv
),
3003 void(*op2
)(TCGv
, TCGv
, TCGv
))
3007 temp1
= tcg_temp_new();
3008 temp2
= tcg_temp_new();
3010 tcg_gen_shri_tl(temp2
, r2
, pos2
);
3011 tcg_gen_shri_tl(temp1
, r1
, pos1
);
3013 (*op1
)(temp1
, temp1
, temp2
);
3014 (*op2
)(temp1
, ret
, temp1
);
3016 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
3018 tcg_temp_free(temp1
);
3019 tcg_temp_free(temp2
);
3022 /* ret = r1[pos1] op1 r2[pos2]; */
3023 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
3025 void(*op1
)(TCGv
, TCGv
, TCGv
))
3029 temp1
= tcg_temp_new();
3030 temp2
= tcg_temp_new();
3032 tcg_gen_shri_tl(temp2
, r2
, pos2
);
3033 tcg_gen_shri_tl(temp1
, r1
, pos1
);
3035 (*op1
)(ret
, temp1
, temp2
);
3037 tcg_gen_andi_tl(ret
, ret
, 0x1);
3039 tcg_temp_free(temp1
);
3040 tcg_temp_free(temp2
);
3043 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
3044 void(*op
)(TCGv
, TCGv
, TCGv
))
3046 TCGv temp
= tcg_temp_new();
3047 TCGv temp2
= tcg_temp_new();
3048 /* temp = (arg1 cond arg2 )*/
3049 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
3051 tcg_gen_andi_tl(temp2
, ret
, 0x1);
3052 /* temp = temp insn temp2 */
3053 (*op
)(temp
, temp
, temp2
);
3054 /* ret = {ret[31:1], temp} */
3055 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
3057 tcg_temp_free(temp
);
3058 tcg_temp_free(temp2
);
3062 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
3063 void(*op
)(TCGv
, TCGv
, TCGv
))
3065 TCGv temp
= tcg_const_i32(con
);
3066 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
3067 tcg_temp_free(temp
);
3070 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
3071 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
3073 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
3074 tcg_gen_neg_tl(ret
, ret
);
3077 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
3079 TCGv b0
= tcg_temp_new();
3080 TCGv b1
= tcg_temp_new();
3081 TCGv b2
= tcg_temp_new();
3082 TCGv b3
= tcg_temp_new();
3085 tcg_gen_andi_tl(b0
, r1
, 0xff);
3086 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
3089 tcg_gen_andi_tl(b1
, r1
, 0xff00);
3090 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
3093 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
3094 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
3097 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
3098 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
3101 tcg_gen_or_tl(ret
, b0
, b1
);
3102 tcg_gen_or_tl(ret
, ret
, b2
);
3103 tcg_gen_or_tl(ret
, ret
, b3
);
3111 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
3113 TCGv h0
= tcg_temp_new();
3114 TCGv h1
= tcg_temp_new();
3117 tcg_gen_andi_tl(h0
, r1
, 0xffff);
3118 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
3121 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
3122 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
3125 tcg_gen_or_tl(ret
, h0
, h1
);
3130 /* mask = ((1 << width) -1) << pos;
3131 ret = (r1 & ~mask) | (r2 << pos) & mask); */
3132 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
3134 TCGv mask
= tcg_temp_new();
3135 TCGv temp
= tcg_temp_new();
3136 TCGv temp2
= tcg_temp_new();
3138 tcg_gen_movi_tl(mask
, 1);
3139 tcg_gen_shl_tl(mask
, mask
, width
);
3140 tcg_gen_subi_tl(mask
, mask
, 1);
3141 tcg_gen_shl_tl(mask
, mask
, pos
);
3143 tcg_gen_shl_tl(temp
, r2
, pos
);
3144 tcg_gen_and_tl(temp
, temp
, mask
);
3145 tcg_gen_andc_tl(temp2
, r1
, mask
);
3146 tcg_gen_or_tl(ret
, temp
, temp2
);
3148 tcg_temp_free(mask
);
3149 tcg_temp_free(temp
);
3150 tcg_temp_free(temp2
);
3153 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
3155 TCGv_i64 temp
= tcg_temp_new_i64();
3157 gen_helper_bsplit(temp
, r1
);
3158 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
3160 tcg_temp_free_i64(temp
);
3163 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
3165 TCGv_i64 temp
= tcg_temp_new_i64();
3167 gen_helper_unpack(temp
, r1
);
3168 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
3170 tcg_temp_free_i64(temp
);
3174 gen_dvinit_b(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
3176 TCGv_i64 ret
= tcg_temp_new_i64();
3178 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
3179 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
3181 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
3183 tcg_gen_extr_i64_i32(rl
, rh
, ret
);