scsi: esp: check buffer length before reading scsi command
[qemu.git] / target-tricore / translate.c
1 /*
2 * TriCore emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
27
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30
31 #include "tricore-opcodes.h"
32 #include "exec/log.h"
33
34 /*
35 * TCG registers
36 */
37 static TCGv cpu_PC;
38 static TCGv cpu_PCXI;
39 static TCGv cpu_PSW;
40 static TCGv cpu_ICR;
41 /* GPR registers */
42 static TCGv cpu_gpr_a[16];
43 static TCGv cpu_gpr_d[16];
44 /* PSW Flag cache */
45 static TCGv cpu_PSW_C;
46 static TCGv cpu_PSW_V;
47 static TCGv cpu_PSW_SV;
48 static TCGv cpu_PSW_AV;
49 static TCGv cpu_PSW_SAV;
50 /* CPU env */
51 static TCGv_env cpu_env;
52
53 #include "exec/gen-icount.h"
54
55 static const char *regnames_a[] = {
56 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
57 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
58 "a12" , "a13" , "a14" , "a15",
59 };
60
61 static const char *regnames_d[] = {
62 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
63 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
64 "d12" , "d13" , "d14" , "d15",
65 };
66
67 typedef struct DisasContext {
68 struct TranslationBlock *tb;
69 target_ulong pc, saved_pc, next_pc;
70 uint32_t opcode;
71 int singlestep_enabled;
72 /* Routine used to access memory */
73 int mem_idx;
74 uint32_t hflags, saved_hflags;
75 int bstate;
76 } DisasContext;
77
78 enum {
79
80 BS_NONE = 0,
81 BS_STOP = 1,
82 BS_BRANCH = 2,
83 BS_EXCP = 3,
84 };
85
86 enum {
87 MODE_LL = 0,
88 MODE_LU = 1,
89 MODE_UL = 2,
90 MODE_UU = 3,
91 };
92
93 void tricore_cpu_dump_state(CPUState *cs, FILE *f,
94 fprintf_function cpu_fprintf, int flags)
95 {
96 TriCoreCPU *cpu = TRICORE_CPU(cs);
97 CPUTriCoreState *env = &cpu->env;
98 uint32_t psw;
99 int i;
100
101 psw = psw_read(env);
102
103 cpu_fprintf(f, "PC: " TARGET_FMT_lx, env->PC);
104 cpu_fprintf(f, " PSW: " TARGET_FMT_lx, psw);
105 cpu_fprintf(f, " ICR: " TARGET_FMT_lx, env->ICR);
106 cpu_fprintf(f, "\nPCXI: " TARGET_FMT_lx, env->PCXI);
107 cpu_fprintf(f, " FCX: " TARGET_FMT_lx, env->FCX);
108 cpu_fprintf(f, " LCX: " TARGET_FMT_lx, env->LCX);
109
110 for (i = 0; i < 16; ++i) {
111 if ((i & 3) == 0) {
112 cpu_fprintf(f, "\nGPR A%02d:", i);
113 }
114 cpu_fprintf(f, " " TARGET_FMT_lx, env->gpr_a[i]);
115 }
116 for (i = 0; i < 16; ++i) {
117 if ((i & 3) == 0) {
118 cpu_fprintf(f, "\nGPR D%02d:", i);
119 }
120 cpu_fprintf(f, " " TARGET_FMT_lx, env->gpr_d[i]);
121 }
122 cpu_fprintf(f, "\n");
123 }
124
125 /*
126 * Functions to generate micro-ops
127 */
128
129 /* Makros for generating helpers */
130
131 #define gen_helper_1arg(name, arg) do { \
132 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
133 gen_helper_##name(cpu_env, helper_tmp); \
134 tcg_temp_free_i32(helper_tmp); \
135 } while (0)
136
137 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
138 TCGv arg00 = tcg_temp_new(); \
139 TCGv arg01 = tcg_temp_new(); \
140 TCGv arg11 = tcg_temp_new(); \
141 tcg_gen_sari_tl(arg00, arg0, 16); \
142 tcg_gen_ext16s_tl(arg01, arg0); \
143 tcg_gen_ext16s_tl(arg11, arg1); \
144 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
145 tcg_temp_free(arg00); \
146 tcg_temp_free(arg01); \
147 tcg_temp_free(arg11); \
148 } while (0)
149
150 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
151 TCGv arg00 = tcg_temp_new(); \
152 TCGv arg01 = tcg_temp_new(); \
153 TCGv arg10 = tcg_temp_new(); \
154 TCGv arg11 = tcg_temp_new(); \
155 tcg_gen_sari_tl(arg00, arg0, 16); \
156 tcg_gen_ext16s_tl(arg01, arg0); \
157 tcg_gen_sari_tl(arg11, arg1, 16); \
158 tcg_gen_ext16s_tl(arg10, arg1); \
159 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
160 tcg_temp_free(arg00); \
161 tcg_temp_free(arg01); \
162 tcg_temp_free(arg10); \
163 tcg_temp_free(arg11); \
164 } while (0)
165
166 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
167 TCGv arg00 = tcg_temp_new(); \
168 TCGv arg01 = tcg_temp_new(); \
169 TCGv arg10 = tcg_temp_new(); \
170 TCGv arg11 = tcg_temp_new(); \
171 tcg_gen_sari_tl(arg00, arg0, 16); \
172 tcg_gen_ext16s_tl(arg01, arg0); \
173 tcg_gen_sari_tl(arg10, arg1, 16); \
174 tcg_gen_ext16s_tl(arg11, arg1); \
175 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
176 tcg_temp_free(arg00); \
177 tcg_temp_free(arg01); \
178 tcg_temp_free(arg10); \
179 tcg_temp_free(arg11); \
180 } while (0)
181
182 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
183 TCGv arg00 = tcg_temp_new(); \
184 TCGv arg01 = tcg_temp_new(); \
185 TCGv arg11 = tcg_temp_new(); \
186 tcg_gen_sari_tl(arg01, arg0, 16); \
187 tcg_gen_ext16s_tl(arg00, arg0); \
188 tcg_gen_sari_tl(arg11, arg1, 16); \
189 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
190 tcg_temp_free(arg00); \
191 tcg_temp_free(arg01); \
192 tcg_temp_free(arg11); \
193 } while (0)
194
195 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
196 TCGv_i64 ret = tcg_temp_new_i64(); \
197 TCGv_i64 arg1 = tcg_temp_new_i64(); \
198 \
199 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
200 gen_helper_##name(ret, arg1, arg2); \
201 tcg_gen_extr_i64_i32(rl, rh, ret); \
202 \
203 tcg_temp_free_i64(ret); \
204 tcg_temp_free_i64(arg1); \
205 } while (0)
206
207 #define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
208 TCGv_i64 ret = tcg_temp_new_i64(); \
209 \
210 gen_helper_##name(ret, cpu_env, arg1, arg2); \
211 tcg_gen_extr_i64_i32(rl, rh, ret); \
212 \
213 tcg_temp_free_i64(ret); \
214 } while (0)
215
216 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
217 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
218 ((offset & 0x0fffff) << 1))
219
220 /* For two 32-bit registers used a 64-bit register, the first
221 registernumber needs to be even. Otherwise we trap. */
222 static inline void generate_trap(DisasContext *ctx, int class, int tin);
223 #define CHECK_REG_PAIR(reg) do { \
224 if (reg & 0x1) { \
225 generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \
226 } \
227 } while (0)
228
229 /* Functions for load/save to/from memory */
230
231 static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
232 int16_t con, TCGMemOp mop)
233 {
234 TCGv temp = tcg_temp_new();
235 tcg_gen_addi_tl(temp, r2, con);
236 tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
237 tcg_temp_free(temp);
238 }
239
240 static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
241 int16_t con, TCGMemOp mop)
242 {
243 TCGv temp = tcg_temp_new();
244 tcg_gen_addi_tl(temp, r2, con);
245 tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
246 tcg_temp_free(temp);
247 }
248
249 static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
250 {
251 TCGv_i64 temp = tcg_temp_new_i64();
252
253 tcg_gen_concat_i32_i64(temp, rl, rh);
254 tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEQ);
255
256 tcg_temp_free_i64(temp);
257 }
258
259 static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
260 DisasContext *ctx)
261 {
262 TCGv temp = tcg_temp_new();
263 tcg_gen_addi_tl(temp, base, con);
264 gen_st_2regs_64(rh, rl, temp, ctx);
265 tcg_temp_free(temp);
266 }
267
268 static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
269 {
270 TCGv_i64 temp = tcg_temp_new_i64();
271
272 tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEQ);
273 /* write back to two 32 bit regs */
274 tcg_gen_extr_i64_i32(rl, rh, temp);
275
276 tcg_temp_free_i64(temp);
277 }
278
279 static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
280 DisasContext *ctx)
281 {
282 TCGv temp = tcg_temp_new();
283 tcg_gen_addi_tl(temp, base, con);
284 gen_ld_2regs_64(rh, rl, temp, ctx);
285 tcg_temp_free(temp);
286 }
287
288 static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
289 TCGMemOp mop)
290 {
291 TCGv temp = tcg_temp_new();
292 tcg_gen_addi_tl(temp, r2, off);
293 tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
294 tcg_gen_mov_tl(r2, temp);
295 tcg_temp_free(temp);
296 }
297
298 static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
299 TCGMemOp mop)
300 {
301 TCGv temp = tcg_temp_new();
302 tcg_gen_addi_tl(temp, r2, off);
303 tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
304 tcg_gen_mov_tl(r2, temp);
305 tcg_temp_free(temp);
306 }
307
308 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
309 static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea)
310 {
311 TCGv temp = tcg_temp_new();
312 TCGv temp2 = tcg_temp_new();
313
314 CHECK_REG_PAIR(ereg);
315 /* temp = (M(EA, word) */
316 tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
317 /* temp = temp & ~E[a][63:32]) */
318 tcg_gen_andc_tl(temp, temp, cpu_gpr_d[ereg+1]);
319 /* temp2 = (E[a][31:0] & E[a][63:32]); */
320 tcg_gen_and_tl(temp2, cpu_gpr_d[ereg], cpu_gpr_d[ereg+1]);
321 /* temp = temp | temp2; */
322 tcg_gen_or_tl(temp, temp, temp2);
323 /* M(EA, word) = temp; */
324 tcg_gen_qemu_st_tl(temp, ea, ctx->mem_idx, MO_LEUL);
325
326 tcg_temp_free(temp);
327 tcg_temp_free(temp2);
328 }
329
330 /* tmp = M(EA, word);
331 M(EA, word) = D[a];
332 D[a] = tmp[31:0];*/
333 static void gen_swap(DisasContext *ctx, int reg, TCGv ea)
334 {
335 TCGv temp = tcg_temp_new();
336
337 tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
338 tcg_gen_qemu_st_tl(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL);
339 tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
340
341 tcg_temp_free(temp);
342 }
343
344 static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea)
345 {
346 TCGv temp = tcg_temp_new();
347 TCGv temp2 = tcg_temp_new();
348 tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
349 tcg_gen_movcond_tl(TCG_COND_EQ, temp2, cpu_gpr_d[reg+1], temp,
350 cpu_gpr_d[reg], temp);
351 tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL);
352 tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
353
354 tcg_temp_free(temp);
355 tcg_temp_free(temp2);
356 }
357
358 static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea)
359 {
360 TCGv temp = tcg_temp_new();
361 TCGv temp2 = tcg_temp_new();
362 TCGv temp3 = tcg_temp_new();
363
364 tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
365 tcg_gen_and_tl(temp2, cpu_gpr_d[reg], cpu_gpr_d[reg+1]);
366 tcg_gen_andc_tl(temp3, temp, cpu_gpr_d[reg+1]);
367 tcg_gen_or_tl(temp2, temp2, temp3);
368 tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL);
369 tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
370
371 tcg_temp_free(temp);
372 tcg_temp_free(temp2);
373 tcg_temp_free(temp3);
374 }
375
376
377 /* We generate loads and store to core special function register (csfr) through
378 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
379 makros R, A and E, which allow read-only, all and endinit protected access.
380 These makros also specify in which ISA version the csfr was introduced. */
381 #define R(ADDRESS, REG, FEATURE) \
382 case ADDRESS: \
383 if (tricore_feature(env, FEATURE)) { \
384 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
385 } \
386 break;
387 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
388 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
389 static inline void gen_mfcr(CPUTriCoreState *env, TCGv ret, int32_t offset)
390 {
391 /* since we're caching PSW make this a special case */
392 if (offset == 0xfe04) {
393 gen_helper_psw_read(ret, cpu_env);
394 } else {
395 switch (offset) {
396 #include "csfr.def"
397 }
398 }
399 }
400 #undef R
401 #undef A
402 #undef E
403
404 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
405 since no execption occurs */
406 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
407 case ADDRESS: \
408 if (tricore_feature(env, FEATURE)) { \
409 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
410 } \
411 break;
412 /* Endinit protected registers
413 TODO: Since the endinit bit is in a register of a not yet implemented
414 watchdog device, we handle endinit protected registers like
415 all-access registers for now. */
416 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
417 static inline void gen_mtcr(CPUTriCoreState *env, DisasContext *ctx, TCGv r1,
418 int32_t offset)
419 {
420 if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM) {
421 /* since we're caching PSW make this a special case */
422 if (offset == 0xfe04) {
423 gen_helper_psw_write(cpu_env, r1);
424 } else {
425 switch (offset) {
426 #include "csfr.def"
427 }
428 }
429 } else {
430 /* generate privilege trap */
431 }
432 }
433
434 /* Functions for arithmetic instructions */
435
436 static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
437 {
438 TCGv t0 = tcg_temp_new_i32();
439 TCGv result = tcg_temp_new_i32();
440 /* Addition and set V/SV bits */
441 tcg_gen_add_tl(result, r1, r2);
442 /* calc V bit */
443 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
444 tcg_gen_xor_tl(t0, r1, r2);
445 tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
446 /* Calc SV bit */
447 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
448 /* Calc AV/SAV bits */
449 tcg_gen_add_tl(cpu_PSW_AV, result, result);
450 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
451 /* calc SAV */
452 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
453 /* write back result */
454 tcg_gen_mov_tl(ret, result);
455
456 tcg_temp_free(result);
457 tcg_temp_free(t0);
458 }
459
460 static inline void
461 gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
462 {
463 TCGv temp = tcg_temp_new();
464 TCGv_i64 t0 = tcg_temp_new_i64();
465 TCGv_i64 t1 = tcg_temp_new_i64();
466 TCGv_i64 result = tcg_temp_new_i64();
467
468 tcg_gen_add_i64(result, r1, r2);
469 /* calc v bit */
470 tcg_gen_xor_i64(t1, result, r1);
471 tcg_gen_xor_i64(t0, r1, r2);
472 tcg_gen_andc_i64(t1, t1, t0);
473 tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
474 /* calc SV bit */
475 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
476 /* calc AV/SAV bits */
477 tcg_gen_extrh_i64_i32(temp, result);
478 tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
479 tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
480 /* calc SAV */
481 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
482 /* write back result */
483 tcg_gen_mov_i64(ret, result);
484
485 tcg_temp_free(temp);
486 tcg_temp_free_i64(result);
487 tcg_temp_free_i64(t0);
488 tcg_temp_free_i64(t1);
489 }
490
491 static inline void
492 gen_addsub64_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
493 TCGv r3, void(*op1)(TCGv, TCGv, TCGv),
494 void(*op2)(TCGv, TCGv, TCGv))
495 {
496 TCGv temp = tcg_temp_new();
497 TCGv temp2 = tcg_temp_new();
498 TCGv temp3 = tcg_temp_new();
499 TCGv temp4 = tcg_temp_new();
500
501 (*op1)(temp, r1_low, r2);
502 /* calc V0 bit */
503 tcg_gen_xor_tl(temp2, temp, r1_low);
504 tcg_gen_xor_tl(temp3, r1_low, r2);
505 if (op1 == tcg_gen_add_tl) {
506 tcg_gen_andc_tl(temp2, temp2, temp3);
507 } else {
508 tcg_gen_and_tl(temp2, temp2, temp3);
509 }
510
511 (*op2)(temp3, r1_high, r3);
512 /* calc V1 bit */
513 tcg_gen_xor_tl(cpu_PSW_V, temp3, r1_high);
514 tcg_gen_xor_tl(temp4, r1_high, r3);
515 if (op2 == tcg_gen_add_tl) {
516 tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, temp4);
517 } else {
518 tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp4);
519 }
520 /* combine V0/V1 bits */
521 tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp2);
522 /* calc sv bit */
523 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
524 /* write result */
525 tcg_gen_mov_tl(ret_low, temp);
526 tcg_gen_mov_tl(ret_high, temp3);
527 /* calc AV bit */
528 tcg_gen_add_tl(temp, ret_low, ret_low);
529 tcg_gen_xor_tl(temp, temp, ret_low);
530 tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
531 tcg_gen_xor_tl(cpu_PSW_AV, cpu_PSW_AV, ret_high);
532 tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
533 /* calc SAV bit */
534 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
535
536 tcg_temp_free(temp);
537 tcg_temp_free(temp2);
538 tcg_temp_free(temp3);
539 tcg_temp_free(temp4);
540 }
541
542 /* ret = r2 + (r1 * r3); */
543 static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
544 {
545 TCGv_i64 t1 = tcg_temp_new_i64();
546 TCGv_i64 t2 = tcg_temp_new_i64();
547 TCGv_i64 t3 = tcg_temp_new_i64();
548
549 tcg_gen_ext_i32_i64(t1, r1);
550 tcg_gen_ext_i32_i64(t2, r2);
551 tcg_gen_ext_i32_i64(t3, r3);
552
553 tcg_gen_mul_i64(t1, t1, t3);
554 tcg_gen_add_i64(t1, t2, t1);
555
556 tcg_gen_extrl_i64_i32(ret, t1);
557 /* calc V
558 t1 > 0x7fffffff */
559 tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
560 /* t1 < -0x80000000 */
561 tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
562 tcg_gen_or_i64(t2, t2, t3);
563 tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
564 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
565 /* Calc SV bit */
566 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
567 /* Calc AV/SAV bits */
568 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
569 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
570 /* calc SAV */
571 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
572
573 tcg_temp_free_i64(t1);
574 tcg_temp_free_i64(t2);
575 tcg_temp_free_i64(t3);
576 }
577
578 static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
579 {
580 TCGv temp = tcg_const_i32(con);
581 gen_madd32_d(ret, r1, r2, temp);
582 tcg_temp_free(temp);
583 }
584
585 static inline void
586 gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
587 TCGv r3)
588 {
589 TCGv t1 = tcg_temp_new();
590 TCGv t2 = tcg_temp_new();
591 TCGv t3 = tcg_temp_new();
592 TCGv t4 = tcg_temp_new();
593
594 tcg_gen_muls2_tl(t1, t2, r1, r3);
595 /* only the add can overflow */
596 tcg_gen_add2_tl(t3, t4, r2_low, r2_high, t1, t2);
597 /* calc V bit */
598 tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
599 tcg_gen_xor_tl(t1, r2_high, t2);
600 tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t1);
601 /* Calc SV bit */
602 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
603 /* Calc AV/SAV bits */
604 tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
605 tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
606 /* calc SAV */
607 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
608 /* write back the result */
609 tcg_gen_mov_tl(ret_low, t3);
610 tcg_gen_mov_tl(ret_high, t4);
611
612 tcg_temp_free(t1);
613 tcg_temp_free(t2);
614 tcg_temp_free(t3);
615 tcg_temp_free(t4);
616 }
617
618 static inline void
619 gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
620 TCGv r3)
621 {
622 TCGv_i64 t1 = tcg_temp_new_i64();
623 TCGv_i64 t2 = tcg_temp_new_i64();
624 TCGv_i64 t3 = tcg_temp_new_i64();
625
626 tcg_gen_extu_i32_i64(t1, r1);
627 tcg_gen_concat_i32_i64(t2, r2_low, r2_high);
628 tcg_gen_extu_i32_i64(t3, r3);
629
630 tcg_gen_mul_i64(t1, t1, t3);
631 tcg_gen_add_i64(t2, t2, t1);
632 /* write back result */
633 tcg_gen_extr_i64_i32(ret_low, ret_high, t2);
634 /* only the add overflows, if t2 < t1
635 calc V bit */
636 tcg_gen_setcond_i64(TCG_COND_LTU, t2, t2, t1);
637 tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
638 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
639 /* Calc SV bit */
640 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
641 /* Calc AV/SAV bits */
642 tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
643 tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
644 /* calc SAV */
645 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
646
647 tcg_temp_free_i64(t1);
648 tcg_temp_free_i64(t2);
649 tcg_temp_free_i64(t3);
650 }
651
652 static inline void
653 gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
654 int32_t con)
655 {
656 TCGv temp = tcg_const_i32(con);
657 gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
658 tcg_temp_free(temp);
659 }
660
661 static inline void
662 gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
663 int32_t con)
664 {
665 TCGv temp = tcg_const_i32(con);
666 gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
667 tcg_temp_free(temp);
668 }
669
670 static inline void
671 gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
672 TCGv r3, uint32_t n, uint32_t mode)
673 {
674 TCGv temp = tcg_const_i32(n);
675 TCGv temp2 = tcg_temp_new();
676 TCGv_i64 temp64 = tcg_temp_new_i64();
677 switch (mode) {
678 case MODE_LL:
679 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
680 break;
681 case MODE_LU:
682 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
683 break;
684 case MODE_UL:
685 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
686 break;
687 case MODE_UU:
688 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
689 break;
690 }
691 tcg_gen_extr_i64_i32(temp, temp2, temp64);
692 gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
693 tcg_gen_add_tl, tcg_gen_add_tl);
694 tcg_temp_free(temp);
695 tcg_temp_free(temp2);
696 tcg_temp_free_i64(temp64);
697 }
698
699 static inline void
700 gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
701 TCGv r3, uint32_t n, uint32_t mode)
702 {
703 TCGv temp = tcg_const_i32(n);
704 TCGv temp2 = tcg_temp_new();
705 TCGv_i64 temp64 = tcg_temp_new_i64();
706 switch (mode) {
707 case MODE_LL:
708 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
709 break;
710 case MODE_LU:
711 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
712 break;
713 case MODE_UL:
714 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
715 break;
716 case MODE_UU:
717 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
718 break;
719 }
720 tcg_gen_extr_i64_i32(temp, temp2, temp64);
721 gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
722 tcg_gen_sub_tl, tcg_gen_add_tl);
723 tcg_temp_free(temp);
724 tcg_temp_free(temp2);
725 tcg_temp_free_i64(temp64);
726 }
727
728 static inline void
729 gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
730 TCGv r3, uint32_t n, uint32_t mode)
731 {
732 TCGv temp = tcg_const_i32(n);
733 TCGv_i64 temp64 = tcg_temp_new_i64();
734 TCGv_i64 temp64_2 = tcg_temp_new_i64();
735 TCGv_i64 temp64_3 = tcg_temp_new_i64();
736 switch (mode) {
737 case MODE_LL:
738 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
739 break;
740 case MODE_LU:
741 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
742 break;
743 case MODE_UL:
744 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
745 break;
746 case MODE_UU:
747 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
748 break;
749 }
750 tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high);
751 tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
752 tcg_gen_ext32s_i64(temp64, temp64); /* low */
753 tcg_gen_sub_i64(temp64, temp64_2, temp64);
754 tcg_gen_shli_i64(temp64, temp64, 16);
755
756 gen_add64_d(temp64_2, temp64_3, temp64);
757 /* write back result */
758 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2);
759
760 tcg_temp_free(temp);
761 tcg_temp_free_i64(temp64);
762 tcg_temp_free_i64(temp64_2);
763 tcg_temp_free_i64(temp64_3);
764 }
765
766 static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2);
767
768 static inline void
769 gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
770 TCGv r3, uint32_t n, uint32_t mode)
771 {
772 TCGv temp = tcg_const_i32(n);
773 TCGv temp2 = tcg_temp_new();
774 TCGv temp3 = tcg_temp_new();
775 TCGv_i64 temp64 = tcg_temp_new_i64();
776
777 switch (mode) {
778 case MODE_LL:
779 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
780 break;
781 case MODE_LU:
782 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
783 break;
784 case MODE_UL:
785 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
786 break;
787 case MODE_UU:
788 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
789 break;
790 }
791 tcg_gen_extr_i64_i32(temp, temp2, temp64);
792 gen_adds(ret_low, r1_low, temp);
793 tcg_gen_mov_tl(temp, cpu_PSW_V);
794 tcg_gen_mov_tl(temp3, cpu_PSW_AV);
795 gen_adds(ret_high, r1_high, temp2);
796 /* combine v bits */
797 tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
798 /* combine av bits */
799 tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
800
801 tcg_temp_free(temp);
802 tcg_temp_free(temp2);
803 tcg_temp_free(temp3);
804 tcg_temp_free_i64(temp64);
805
806 }
807
808 static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2);
809
810 static inline void
811 gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
812 TCGv r3, uint32_t n, uint32_t mode)
813 {
814 TCGv temp = tcg_const_i32(n);
815 TCGv temp2 = tcg_temp_new();
816 TCGv temp3 = tcg_temp_new();
817 TCGv_i64 temp64 = tcg_temp_new_i64();
818
819 switch (mode) {
820 case MODE_LL:
821 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
822 break;
823 case MODE_LU:
824 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
825 break;
826 case MODE_UL:
827 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
828 break;
829 case MODE_UU:
830 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
831 break;
832 }
833 tcg_gen_extr_i64_i32(temp, temp2, temp64);
834 gen_subs(ret_low, r1_low, temp);
835 tcg_gen_mov_tl(temp, cpu_PSW_V);
836 tcg_gen_mov_tl(temp3, cpu_PSW_AV);
837 gen_adds(ret_high, r1_high, temp2);
838 /* combine v bits */
839 tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
840 /* combine av bits */
841 tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
842
843 tcg_temp_free(temp);
844 tcg_temp_free(temp2);
845 tcg_temp_free(temp3);
846 tcg_temp_free_i64(temp64);
847
848 }
849
850 static inline void
851 gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
852 TCGv r3, uint32_t n, uint32_t mode)
853 {
854 TCGv temp = tcg_const_i32(n);
855 TCGv_i64 temp64 = tcg_temp_new_i64();
856 TCGv_i64 temp64_2 = tcg_temp_new_i64();
857
858 switch (mode) {
859 case MODE_LL:
860 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
861 break;
862 case MODE_LU:
863 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
864 break;
865 case MODE_UL:
866 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
867 break;
868 case MODE_UU:
869 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
870 break;
871 }
872 tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
873 tcg_gen_ext32s_i64(temp64, temp64); /* low */
874 tcg_gen_sub_i64(temp64, temp64_2, temp64);
875 tcg_gen_shli_i64(temp64, temp64, 16);
876 tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
877
878 gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64);
879 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
880
881 tcg_temp_free(temp);
882 tcg_temp_free_i64(temp64);
883 tcg_temp_free_i64(temp64_2);
884 }
885
886
887 static inline void
888 gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
889 TCGv r3, uint32_t n, uint32_t mode)
890 {
891 TCGv temp = tcg_const_i32(n);
892 TCGv_i64 temp64 = tcg_temp_new_i64();
893 TCGv_i64 temp64_2 = tcg_temp_new_i64();
894 TCGv_i64 temp64_3 = tcg_temp_new_i64();
895 switch (mode) {
896 case MODE_LL:
897 GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
898 break;
899 case MODE_LU:
900 GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
901 break;
902 case MODE_UL:
903 GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
904 break;
905 case MODE_UU:
906 GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
907 break;
908 }
909 tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
910 gen_add64_d(temp64_3, temp64_2, temp64);
911 /* write back result */
912 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3);
913
914 tcg_temp_free(temp);
915 tcg_temp_free_i64(temp64);
916 tcg_temp_free_i64(temp64_2);
917 tcg_temp_free_i64(temp64_3);
918 }
919
920 static inline void
921 gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
922 TCGv r3, uint32_t n, uint32_t mode)
923 {
924 TCGv temp = tcg_const_i32(n);
925 TCGv_i64 temp64 = tcg_temp_new_i64();
926 TCGv_i64 temp64_2 = tcg_temp_new_i64();
927 switch (mode) {
928 case MODE_LL:
929 GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
930 break;
931 case MODE_LU:
932 GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
933 break;
934 case MODE_UL:
935 GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
936 break;
937 case MODE_UU:
938 GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
939 break;
940 }
941 tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
942 gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64);
943 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
944
945 tcg_temp_free(temp);
946 tcg_temp_free_i64(temp64);
947 tcg_temp_free_i64(temp64_2);
948 }
949
950 static inline void
951 gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
952 uint32_t mode)
953 {
954 TCGv temp = tcg_const_i32(n);
955 TCGv_i64 temp64 = tcg_temp_new_i64();
956 switch (mode) {
957 case MODE_LL:
958 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
959 break;
960 case MODE_LU:
961 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
962 break;
963 case MODE_UL:
964 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
965 break;
966 case MODE_UU:
967 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
968 break;
969 }
970 gen_helper_addr_h(ret, cpu_env, temp64, r1_low, r1_high);
971
972 tcg_temp_free(temp);
973 tcg_temp_free_i64(temp64);
974 }
975
976 static inline void
977 gen_maddr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
978 {
979 TCGv temp = tcg_temp_new();
980 TCGv temp2 = tcg_temp_new();
981
982 tcg_gen_andi_tl(temp2, r1, 0xffff0000);
983 tcg_gen_shli_tl(temp, r1, 16);
984 gen_maddr64_h(ret, temp, temp2, r2, r3, n, mode);
985
986 tcg_temp_free(temp);
987 tcg_temp_free(temp2);
988 }
989
990 static inline void
991 gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
992 {
993 TCGv temp = tcg_const_i32(n);
994 TCGv temp2 = tcg_temp_new();
995 TCGv_i64 temp64 = tcg_temp_new_i64();
996 switch (mode) {
997 case MODE_LL:
998 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
999 break;
1000 case MODE_LU:
1001 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
1002 break;
1003 case MODE_UL:
1004 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
1005 break;
1006 case MODE_UU:
1007 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
1008 break;
1009 }
1010 tcg_gen_andi_tl(temp2, r1, 0xffff0000);
1011 tcg_gen_shli_tl(temp, r1, 16);
1012 gen_helper_addsur_h(ret, cpu_env, temp64, temp, temp2);
1013
1014 tcg_temp_free(temp);
1015 tcg_temp_free(temp2);
1016 tcg_temp_free_i64(temp64);
1017 }
1018
1019
1020 static inline void
1021 gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
1022 uint32_t n, uint32_t mode)
1023 {
1024 TCGv temp = tcg_const_i32(n);
1025 TCGv_i64 temp64 = tcg_temp_new_i64();
1026 switch (mode) {
1027 case MODE_LL:
1028 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
1029 break;
1030 case MODE_LU:
1031 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
1032 break;
1033 case MODE_UL:
1034 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
1035 break;
1036 case MODE_UU:
1037 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
1038 break;
1039 }
1040 gen_helper_addr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high);
1041
1042 tcg_temp_free(temp);
1043 tcg_temp_free_i64(temp64);
1044 }
1045
1046 static inline void
1047 gen_maddr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
1048 {
1049 TCGv temp = tcg_temp_new();
1050 TCGv temp2 = tcg_temp_new();
1051
1052 tcg_gen_andi_tl(temp2, r1, 0xffff0000);
1053 tcg_gen_shli_tl(temp, r1, 16);
1054 gen_maddr64s_h(ret, temp, temp2, r2, r3, n, mode);
1055
1056 tcg_temp_free(temp);
1057 tcg_temp_free(temp2);
1058 }
1059
1060 static inline void
1061 gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
1062 {
1063 TCGv temp = tcg_const_i32(n);
1064 TCGv temp2 = tcg_temp_new();
1065 TCGv_i64 temp64 = tcg_temp_new_i64();
1066 switch (mode) {
1067 case MODE_LL:
1068 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
1069 break;
1070 case MODE_LU:
1071 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
1072 break;
1073 case MODE_UL:
1074 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
1075 break;
1076 case MODE_UU:
1077 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
1078 break;
1079 }
1080 tcg_gen_andi_tl(temp2, r1, 0xffff0000);
1081 tcg_gen_shli_tl(temp, r1, 16);
1082 gen_helper_addsur_h_ssov(ret, cpu_env, temp64, temp, temp2);
1083
1084 tcg_temp_free(temp);
1085 tcg_temp_free(temp2);
1086 tcg_temp_free_i64(temp64);
1087 }
1088
1089 static inline void
1090 gen_maddr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
1091 {
1092 TCGv temp = tcg_const_i32(n);
1093 gen_helper_maddr_q(ret, cpu_env, r1, r2, r3, temp);
1094 tcg_temp_free(temp);
1095 }
1096
1097 static inline void
1098 gen_maddrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
1099 {
1100 TCGv temp = tcg_const_i32(n);
1101 gen_helper_maddr_q_ssov(ret, cpu_env, r1, r2, r3, temp);
1102 tcg_temp_free(temp);
1103 }
1104
1105 static inline void
1106 gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
1107 uint32_t up_shift, CPUTriCoreState *env)
1108 {
1109 TCGv temp = tcg_temp_new();
1110 TCGv temp2 = tcg_temp_new();
1111 TCGv temp3 = tcg_temp_new();
1112 TCGv_i64 t1 = tcg_temp_new_i64();
1113 TCGv_i64 t2 = tcg_temp_new_i64();
1114 TCGv_i64 t3 = tcg_temp_new_i64();
1115
1116 tcg_gen_ext_i32_i64(t2, arg2);
1117 tcg_gen_ext_i32_i64(t3, arg3);
1118
1119 tcg_gen_mul_i64(t2, t2, t3);
1120 tcg_gen_shli_i64(t2, t2, n);
1121
1122 tcg_gen_ext_i32_i64(t1, arg1);
1123 tcg_gen_sari_i64(t2, t2, up_shift);
1124
1125 tcg_gen_add_i64(t3, t1, t2);
1126 tcg_gen_extrl_i64_i32(temp3, t3);
1127 /* calc v bit */
1128 tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL);
1129 tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
1130 tcg_gen_or_i64(t1, t1, t2);
1131 tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
1132 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
1133 /* We produce an overflow on the host if the mul before was
1134 (0x80000000 * 0x80000000) << 1). If this is the
1135 case, we negate the ovf. */
1136 if (n == 1) {
1137 tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
1138 tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
1139 tcg_gen_and_tl(temp, temp, temp2);
1140 tcg_gen_shli_tl(temp, temp, 31);
1141 /* negate v bit, if special condition */
1142 tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
1143 }
1144 /* Calc SV bit */
1145 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
1146 /* Calc AV/SAV bits */
1147 tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3);
1148 tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV);
1149 /* calc SAV */
1150 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1151 /* write back result */
1152 tcg_gen_mov_tl(ret, temp3);
1153
1154 tcg_temp_free(temp);
1155 tcg_temp_free(temp2);
1156 tcg_temp_free(temp3);
1157 tcg_temp_free_i64(t1);
1158 tcg_temp_free_i64(t2);
1159 tcg_temp_free_i64(t3);
1160 }
1161
1162 static inline void
1163 gen_m16add32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
1164 {
1165 TCGv temp = tcg_temp_new();
1166 TCGv temp2 = tcg_temp_new();
1167 if (n == 0) {
1168 tcg_gen_mul_tl(temp, arg2, arg3);
1169 } else { /* n is expected to be 1 */
1170 tcg_gen_mul_tl(temp, arg2, arg3);
1171 tcg_gen_shli_tl(temp, temp, 1);
1172 /* catch special case r1 = r2 = 0x8000 */
1173 tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
1174 tcg_gen_sub_tl(temp, temp, temp2);
1175 }
1176 gen_add_d(ret, arg1, temp);
1177
1178 tcg_temp_free(temp);
1179 tcg_temp_free(temp2);
1180 }
1181
1182 static inline void
1183 gen_m16adds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
1184 {
1185 TCGv temp = tcg_temp_new();
1186 TCGv temp2 = tcg_temp_new();
1187 if (n == 0) {
1188 tcg_gen_mul_tl(temp, arg2, arg3);
1189 } else { /* n is expected to be 1 */
1190 tcg_gen_mul_tl(temp, arg2, arg3);
1191 tcg_gen_shli_tl(temp, temp, 1);
1192 /* catch special case r1 = r2 = 0x8000 */
1193 tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
1194 tcg_gen_sub_tl(temp, temp, temp2);
1195 }
1196 gen_adds(ret, arg1, temp);
1197
1198 tcg_temp_free(temp);
1199 tcg_temp_free(temp2);
1200 }
1201
1202 static inline void
1203 gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
1204 TCGv arg3, uint32_t n)
1205 {
1206 TCGv temp = tcg_temp_new();
1207 TCGv temp2 = tcg_temp_new();
1208 TCGv_i64 t1 = tcg_temp_new_i64();
1209 TCGv_i64 t2 = tcg_temp_new_i64();
1210 TCGv_i64 t3 = tcg_temp_new_i64();
1211
1212 if (n == 0) {
1213 tcg_gen_mul_tl(temp, arg2, arg3);
1214 } else { /* n is expected to be 1 */
1215 tcg_gen_mul_tl(temp, arg2, arg3);
1216 tcg_gen_shli_tl(temp, temp, 1);
1217 /* catch special case r1 = r2 = 0x8000 */
1218 tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
1219 tcg_gen_sub_tl(temp, temp, temp2);
1220 }
1221 tcg_gen_ext_i32_i64(t2, temp);
1222 tcg_gen_shli_i64(t2, t2, 16);
1223 tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
1224 gen_add64_d(t3, t1, t2);
1225 /* write back result */
1226 tcg_gen_extr_i64_i32(rl, rh, t3);
1227
1228 tcg_temp_free_i64(t1);
1229 tcg_temp_free_i64(t2);
1230 tcg_temp_free_i64(t3);
1231 tcg_temp_free(temp);
1232 tcg_temp_free(temp2);
1233 }
1234
1235 static inline void
1236 gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
1237 TCGv arg3, uint32_t n)
1238 {
1239 TCGv temp = tcg_temp_new();
1240 TCGv temp2 = tcg_temp_new();
1241 TCGv_i64 t1 = tcg_temp_new_i64();
1242 TCGv_i64 t2 = tcg_temp_new_i64();
1243
1244 if (n == 0) {
1245 tcg_gen_mul_tl(temp, arg2, arg3);
1246 } else { /* n is expected to be 1 */
1247 tcg_gen_mul_tl(temp, arg2, arg3);
1248 tcg_gen_shli_tl(temp, temp, 1);
1249 /* catch special case r1 = r2 = 0x8000 */
1250 tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
1251 tcg_gen_sub_tl(temp, temp, temp2);
1252 }
1253 tcg_gen_ext_i32_i64(t2, temp);
1254 tcg_gen_shli_i64(t2, t2, 16);
1255 tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
1256
1257 gen_helper_add64_ssov(t1, cpu_env, t1, t2);
1258 tcg_gen_extr_i64_i32(rl, rh, t1);
1259
1260 tcg_temp_free(temp);
1261 tcg_temp_free(temp2);
1262 tcg_temp_free_i64(t1);
1263 tcg_temp_free_i64(t2);
1264 }
1265
1266 static inline void
1267 gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
1268 TCGv arg3, uint32_t n, CPUTriCoreState *env)
1269 {
1270 TCGv_i64 t1 = tcg_temp_new_i64();
1271 TCGv_i64 t2 = tcg_temp_new_i64();
1272 TCGv_i64 t3 = tcg_temp_new_i64();
1273 TCGv_i64 t4 = tcg_temp_new_i64();
1274 TCGv temp, temp2;
1275
1276 tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
1277 tcg_gen_ext_i32_i64(t2, arg2);
1278 tcg_gen_ext_i32_i64(t3, arg3);
1279
1280 tcg_gen_mul_i64(t2, t2, t3);
1281 if (n != 0) {
1282 tcg_gen_shli_i64(t2, t2, 1);
1283 }
1284 tcg_gen_add_i64(t4, t1, t2);
1285 /* calc v bit */
1286 tcg_gen_xor_i64(t3, t4, t1);
1287 tcg_gen_xor_i64(t2, t1, t2);
1288 tcg_gen_andc_i64(t3, t3, t2);
1289 tcg_gen_extrh_i64_i32(cpu_PSW_V, t3);
1290 /* We produce an overflow on the host if the mul before was
1291 (0x80000000 * 0x80000000) << 1). If this is the
1292 case, we negate the ovf. */
1293 if (n == 1) {
1294 temp = tcg_temp_new();
1295 temp2 = tcg_temp_new();
1296 tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
1297 tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
1298 tcg_gen_and_tl(temp, temp, temp2);
1299 tcg_gen_shli_tl(temp, temp, 31);
1300 /* negate v bit, if special condition */
1301 tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
1302
1303 tcg_temp_free(temp);
1304 tcg_temp_free(temp2);
1305 }
1306 /* write back result */
1307 tcg_gen_extr_i64_i32(rl, rh, t4);
1308 /* Calc SV bit */
1309 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
1310 /* Calc AV/SAV bits */
1311 tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
1312 tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
1313 /* calc SAV */
1314 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1315
1316 tcg_temp_free_i64(t1);
1317 tcg_temp_free_i64(t2);
1318 tcg_temp_free_i64(t3);
1319 tcg_temp_free_i64(t4);
1320 }
1321
1322 static inline void
1323 gen_madds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
1324 uint32_t up_shift)
1325 {
1326 TCGv_i64 t1 = tcg_temp_new_i64();
1327 TCGv_i64 t2 = tcg_temp_new_i64();
1328 TCGv_i64 t3 = tcg_temp_new_i64();
1329
1330 tcg_gen_ext_i32_i64(t1, arg1);
1331 tcg_gen_ext_i32_i64(t2, arg2);
1332 tcg_gen_ext_i32_i64(t3, arg3);
1333
1334 tcg_gen_mul_i64(t2, t2, t3);
1335 tcg_gen_sari_i64(t2, t2, up_shift - n);
1336
1337 gen_helper_madd32_q_add_ssov(ret, cpu_env, t1, t2);
1338
1339 tcg_temp_free_i64(t1);
1340 tcg_temp_free_i64(t2);
1341 tcg_temp_free_i64(t3);
1342 }
1343
1344 static inline void
1345 gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
1346 TCGv arg3, uint32_t n)
1347 {
1348 TCGv_i64 r1 = tcg_temp_new_i64();
1349 TCGv temp = tcg_const_i32(n);
1350
1351 tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
1352 gen_helper_madd64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp);
1353 tcg_gen_extr_i64_i32(rl, rh, r1);
1354
1355 tcg_temp_free_i64(r1);
1356 tcg_temp_free(temp);
1357 }
1358 /* ret = r2 - (r1 * r3); */
1359 static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
1360 {
1361 TCGv_i64 t1 = tcg_temp_new_i64();
1362 TCGv_i64 t2 = tcg_temp_new_i64();
1363 TCGv_i64 t3 = tcg_temp_new_i64();
1364
1365 tcg_gen_ext_i32_i64(t1, r1);
1366 tcg_gen_ext_i32_i64(t2, r2);
1367 tcg_gen_ext_i32_i64(t3, r3);
1368
1369 tcg_gen_mul_i64(t1, t1, t3);
1370 tcg_gen_sub_i64(t1, t2, t1);
1371
1372 tcg_gen_extrl_i64_i32(ret, t1);
1373 /* calc V
1374 t2 > 0x7fffffff */
1375 tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
1376 /* result < -0x80000000 */
1377 tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
1378 tcg_gen_or_i64(t2, t2, t3);
1379 tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
1380 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
1381
1382 /* Calc SV bit */
1383 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
1384 /* Calc AV/SAV bits */
1385 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
1386 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
1387 /* calc SAV */
1388 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1389
1390 tcg_temp_free_i64(t1);
1391 tcg_temp_free_i64(t2);
1392 tcg_temp_free_i64(t3);
1393 }
1394
1395 static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
1396 {
1397 TCGv temp = tcg_const_i32(con);
1398 gen_msub32_d(ret, r1, r2, temp);
1399 tcg_temp_free(temp);
1400 }
1401
1402 static inline void
1403 gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
1404 TCGv r3)
1405 {
1406 TCGv t1 = tcg_temp_new();
1407 TCGv t2 = tcg_temp_new();
1408 TCGv t3 = tcg_temp_new();
1409 TCGv t4 = tcg_temp_new();
1410
1411 tcg_gen_muls2_tl(t1, t2, r1, r3);
1412 /* only the sub can overflow */
1413 tcg_gen_sub2_tl(t3, t4, r2_low, r2_high, t1, t2);
1414 /* calc V bit */
1415 tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
1416 tcg_gen_xor_tl(t1, r2_high, t2);
1417 tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, t1);
1418 /* Calc SV bit */
1419 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
1420 /* Calc AV/SAV bits */
1421 tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
1422 tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
1423 /* calc SAV */
1424 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1425 /* write back the result */
1426 tcg_gen_mov_tl(ret_low, t3);
1427 tcg_gen_mov_tl(ret_high, t4);
1428
1429 tcg_temp_free(t1);
1430 tcg_temp_free(t2);
1431 tcg_temp_free(t3);
1432 tcg_temp_free(t4);
1433 }
1434
1435 static inline void
1436 gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
1437 int32_t con)
1438 {
1439 TCGv temp = tcg_const_i32(con);
1440 gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
1441 tcg_temp_free(temp);
1442 }
1443
1444 static inline void
1445 gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
1446 TCGv r3)
1447 {
1448 TCGv_i64 t1 = tcg_temp_new_i64();
1449 TCGv_i64 t2 = tcg_temp_new_i64();
1450 TCGv_i64 t3 = tcg_temp_new_i64();
1451
1452 tcg_gen_extu_i32_i64(t1, r1);
1453 tcg_gen_concat_i32_i64(t2, r2_low, r2_high);
1454 tcg_gen_extu_i32_i64(t3, r3);
1455
1456 tcg_gen_mul_i64(t1, t1, t3);
1457 tcg_gen_sub_i64(t3, t2, t1);
1458 tcg_gen_extr_i64_i32(ret_low, ret_high, t3);
1459 /* calc V bit, only the sub can overflow, if t1 > t2 */
1460 tcg_gen_setcond_i64(TCG_COND_GTU, t1, t1, t2);
1461 tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
1462 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
1463 /* Calc SV bit */
1464 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
1465 /* Calc AV/SAV bits */
1466 tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
1467 tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
1468 /* calc SAV */
1469 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1470
1471 tcg_temp_free_i64(t1);
1472 tcg_temp_free_i64(t2);
1473 tcg_temp_free_i64(t3);
1474 }
1475
1476 static inline void
1477 gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
1478 int32_t con)
1479 {
1480 TCGv temp = tcg_const_i32(con);
1481 gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
1482 tcg_temp_free(temp);
1483 }
1484
1485 static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2)
1486 {
1487 TCGv temp = tcg_const_i32(r2);
1488 gen_add_d(ret, r1, temp);
1489 tcg_temp_free(temp);
1490 }
1491 /* calculate the carry bit too */
1492 static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2)
1493 {
1494 TCGv t0 = tcg_temp_new_i32();
1495 TCGv result = tcg_temp_new_i32();
1496
1497 tcg_gen_movi_tl(t0, 0);
1498 /* Addition and set C/V/SV bits */
1499 tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, r2, t0);
1500 /* calc V bit */
1501 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
1502 tcg_gen_xor_tl(t0, r1, r2);
1503 tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
1504 /* Calc SV bit */
1505 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
1506 /* Calc AV/SAV bits */
1507 tcg_gen_add_tl(cpu_PSW_AV, result, result);
1508 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
1509 /* calc SAV */
1510 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1511 /* write back result */
1512 tcg_gen_mov_tl(ret, result);
1513
1514 tcg_temp_free(result);
1515 tcg_temp_free(t0);
1516 }
1517
1518 static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
1519 {
1520 TCGv temp = tcg_const_i32(con);
1521 gen_add_CC(ret, r1, temp);
1522 tcg_temp_free(temp);
1523 }
1524
1525 static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
1526 {
1527 TCGv carry = tcg_temp_new_i32();
1528 TCGv t0 = tcg_temp_new_i32();
1529 TCGv result = tcg_temp_new_i32();
1530
1531 tcg_gen_movi_tl(t0, 0);
1532 tcg_gen_setcondi_tl(TCG_COND_NE, carry, cpu_PSW_C, 0);
1533 /* Addition, carry and set C/V/SV bits */
1534 tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, carry, t0);
1535 tcg_gen_add2_i32(result, cpu_PSW_C, result, cpu_PSW_C, r2, t0);
1536 /* calc V bit */
1537 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
1538 tcg_gen_xor_tl(t0, r1, r2);
1539 tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
1540 /* Calc SV bit */
1541 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
1542 /* Calc AV/SAV bits */
1543 tcg_gen_add_tl(cpu_PSW_AV, result, result);
1544 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
1545 /* calc SAV */
1546 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1547 /* write back result */
1548 tcg_gen_mov_tl(ret, result);
1549
1550 tcg_temp_free(result);
1551 tcg_temp_free(t0);
1552 tcg_temp_free(carry);
1553 }
1554
1555 static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con)
1556 {
1557 TCGv temp = tcg_const_i32(con);
1558 gen_addc_CC(ret, r1, temp);
1559 tcg_temp_free(temp);
1560 }
1561
1562 static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
1563 TCGv r4)
1564 {
1565 TCGv temp = tcg_temp_new();
1566 TCGv temp2 = tcg_temp_new();
1567 TCGv result = tcg_temp_new();
1568 TCGv mask = tcg_temp_new();
1569 TCGv t0 = tcg_const_i32(0);
1570
1571 /* create mask for sticky bits */
1572 tcg_gen_setcond_tl(cond, mask, r4, t0);
1573 tcg_gen_shli_tl(mask, mask, 31);
1574
1575 tcg_gen_add_tl(result, r1, r2);
1576 /* Calc PSW_V */
1577 tcg_gen_xor_tl(temp, result, r1);
1578 tcg_gen_xor_tl(temp2, r1, r2);
1579 tcg_gen_andc_tl(temp, temp, temp2);
1580 tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
1581 /* Set PSW_SV */
1582 tcg_gen_and_tl(temp, temp, mask);
1583 tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
1584 /* calc AV bit */
1585 tcg_gen_add_tl(temp, result, result);
1586 tcg_gen_xor_tl(temp, temp, result);
1587 tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
1588 /* calc SAV bit */
1589 tcg_gen_and_tl(temp, temp, mask);
1590 tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
1591 /* write back result */
1592 tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1);
1593
1594 tcg_temp_free(t0);
1595 tcg_temp_free(temp);
1596 tcg_temp_free(temp2);
1597 tcg_temp_free(result);
1598 tcg_temp_free(mask);
1599 }
1600
1601 static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2,
1602 TCGv r3, TCGv r4)
1603 {
1604 TCGv temp = tcg_const_i32(r2);
1605 gen_cond_add(cond, r1, temp, r3, r4);
1606 tcg_temp_free(temp);
1607 }
1608
1609 static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
1610 {
1611 TCGv temp = tcg_temp_new_i32();
1612 TCGv result = tcg_temp_new_i32();
1613
1614 tcg_gen_sub_tl(result, r1, r2);
1615 /* calc V bit */
1616 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
1617 tcg_gen_xor_tl(temp, r1, r2);
1618 tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
1619 /* calc SV bit */
1620 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
1621 /* Calc AV bit */
1622 tcg_gen_add_tl(cpu_PSW_AV, result, result);
1623 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
1624 /* calc SAV bit */
1625 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1626 /* write back result */
1627 tcg_gen_mov_tl(ret, result);
1628
1629 tcg_temp_free(temp);
1630 tcg_temp_free(result);
1631 }
1632
1633 static inline void
1634 gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
1635 {
1636 TCGv temp = tcg_temp_new();
1637 TCGv_i64 t0 = tcg_temp_new_i64();
1638 TCGv_i64 t1 = tcg_temp_new_i64();
1639 TCGv_i64 result = tcg_temp_new_i64();
1640
1641 tcg_gen_sub_i64(result, r1, r2);
1642 /* calc v bit */
1643 tcg_gen_xor_i64(t1, result, r1);
1644 tcg_gen_xor_i64(t0, r1, r2);
1645 tcg_gen_and_i64(t1, t1, t0);
1646 tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
1647 /* calc SV bit */
1648 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
1649 /* calc AV/SAV bits */
1650 tcg_gen_extrh_i64_i32(temp, result);
1651 tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
1652 tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
1653 /* calc SAV */
1654 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1655 /* write back result */
1656 tcg_gen_mov_i64(ret, result);
1657
1658 tcg_temp_free(temp);
1659 tcg_temp_free_i64(result);
1660 tcg_temp_free_i64(t0);
1661 tcg_temp_free_i64(t1);
1662 }
1663
1664 static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2)
1665 {
1666 TCGv result = tcg_temp_new();
1667 TCGv temp = tcg_temp_new();
1668
1669 tcg_gen_sub_tl(result, r1, r2);
1670 /* calc C bit */
1671 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_PSW_C, r1, r2);
1672 /* calc V bit */
1673 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
1674 tcg_gen_xor_tl(temp, r1, r2);
1675 tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
1676 /* calc SV bit */
1677 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
1678 /* Calc AV bit */
1679 tcg_gen_add_tl(cpu_PSW_AV, result, result);
1680 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
1681 /* calc SAV bit */
1682 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1683 /* write back result */
1684 tcg_gen_mov_tl(ret, result);
1685
1686 tcg_temp_free(result);
1687 tcg_temp_free(temp);
1688 }
1689
1690 static inline void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2)
1691 {
1692 TCGv temp = tcg_temp_new();
1693 tcg_gen_not_tl(temp, r2);
1694 gen_addc_CC(ret, r1, temp);
1695 tcg_temp_free(temp);
1696 }
1697
1698 static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
1699 TCGv r4)
1700 {
1701 TCGv temp = tcg_temp_new();
1702 TCGv temp2 = tcg_temp_new();
1703 TCGv result = tcg_temp_new();
1704 TCGv mask = tcg_temp_new();
1705 TCGv t0 = tcg_const_i32(0);
1706
1707 /* create mask for sticky bits */
1708 tcg_gen_setcond_tl(cond, mask, r4, t0);
1709 tcg_gen_shli_tl(mask, mask, 31);
1710
1711 tcg_gen_sub_tl(result, r1, r2);
1712 /* Calc PSW_V */
1713 tcg_gen_xor_tl(temp, result, r1);
1714 tcg_gen_xor_tl(temp2, r1, r2);
1715 tcg_gen_and_tl(temp, temp, temp2);
1716 tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
1717 /* Set PSW_SV */
1718 tcg_gen_and_tl(temp, temp, mask);
1719 tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
1720 /* calc AV bit */
1721 tcg_gen_add_tl(temp, result, result);
1722 tcg_gen_xor_tl(temp, temp, result);
1723 tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
1724 /* calc SAV bit */
1725 tcg_gen_and_tl(temp, temp, mask);
1726 tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
1727 /* write back result */
1728 tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1);
1729
1730 tcg_temp_free(t0);
1731 tcg_temp_free(temp);
1732 tcg_temp_free(temp2);
1733 tcg_temp_free(result);
1734 tcg_temp_free(mask);
1735 }
1736
1737 static inline void
1738 gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
1739 TCGv r3, uint32_t n, uint32_t mode)
1740 {
1741 TCGv temp = tcg_const_i32(n);
1742 TCGv temp2 = tcg_temp_new();
1743 TCGv_i64 temp64 = tcg_temp_new_i64();
1744 switch (mode) {
1745 case MODE_LL:
1746 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
1747 break;
1748 case MODE_LU:
1749 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
1750 break;
1751 case MODE_UL:
1752 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
1753 break;
1754 case MODE_UU:
1755 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
1756 break;
1757 }
1758 tcg_gen_extr_i64_i32(temp, temp2, temp64);
1759 gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
1760 tcg_gen_sub_tl, tcg_gen_sub_tl);
1761 tcg_temp_free(temp);
1762 tcg_temp_free(temp2);
1763 tcg_temp_free_i64(temp64);
1764 }
1765
1766 static inline void
1767 gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
1768 TCGv r3, uint32_t n, uint32_t mode)
1769 {
1770 TCGv temp = tcg_const_i32(n);
1771 TCGv temp2 = tcg_temp_new();
1772 TCGv temp3 = tcg_temp_new();
1773 TCGv_i64 temp64 = tcg_temp_new_i64();
1774
1775 switch (mode) {
1776 case MODE_LL:
1777 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
1778 break;
1779 case MODE_LU:
1780 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
1781 break;
1782 case MODE_UL:
1783 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
1784 break;
1785 case MODE_UU:
1786 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
1787 break;
1788 }
1789 tcg_gen_extr_i64_i32(temp, temp2, temp64);
1790 gen_subs(ret_low, r1_low, temp);
1791 tcg_gen_mov_tl(temp, cpu_PSW_V);
1792 tcg_gen_mov_tl(temp3, cpu_PSW_AV);
1793 gen_subs(ret_high, r1_high, temp2);
1794 /* combine v bits */
1795 tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
1796 /* combine av bits */
1797 tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
1798
1799 tcg_temp_free(temp);
1800 tcg_temp_free(temp2);
1801 tcg_temp_free(temp3);
1802 tcg_temp_free_i64(temp64);
1803 }
1804
1805 static inline void
1806 gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
1807 TCGv r3, uint32_t n, uint32_t mode)
1808 {
1809 TCGv temp = tcg_const_i32(n);
1810 TCGv_i64 temp64 = tcg_temp_new_i64();
1811 TCGv_i64 temp64_2 = tcg_temp_new_i64();
1812 TCGv_i64 temp64_3 = tcg_temp_new_i64();
1813 switch (mode) {
1814 case MODE_LL:
1815 GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
1816 break;
1817 case MODE_LU:
1818 GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
1819 break;
1820 case MODE_UL:
1821 GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
1822 break;
1823 case MODE_UU:
1824 GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
1825 break;
1826 }
1827 tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
1828 gen_sub64_d(temp64_3, temp64_2, temp64);
1829 /* write back result */
1830 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3);
1831
1832 tcg_temp_free(temp);
1833 tcg_temp_free_i64(temp64);
1834 tcg_temp_free_i64(temp64_2);
1835 tcg_temp_free_i64(temp64_3);
1836 }
1837
1838 static inline void
1839 gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
1840 TCGv r3, uint32_t n, uint32_t mode)
1841 {
1842 TCGv temp = tcg_const_i32(n);
1843 TCGv_i64 temp64 = tcg_temp_new_i64();
1844 TCGv_i64 temp64_2 = tcg_temp_new_i64();
1845 switch (mode) {
1846 case MODE_LL:
1847 GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
1848 break;
1849 case MODE_LU:
1850 GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
1851 break;
1852 case MODE_UL:
1853 GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
1854 break;
1855 case MODE_UU:
1856 GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
1857 break;
1858 }
1859 tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
1860 gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64);
1861 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
1862
1863 tcg_temp_free(temp);
1864 tcg_temp_free_i64(temp64);
1865 tcg_temp_free_i64(temp64_2);
1866 }
1867
1868 static inline void
1869 gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
1870 uint32_t mode)
1871 {
1872 TCGv temp = tcg_const_i32(n);
1873 TCGv_i64 temp64 = tcg_temp_new_i64();
1874 switch (mode) {
1875 case MODE_LL:
1876 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
1877 break;
1878 case MODE_LU:
1879 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
1880 break;
1881 case MODE_UL:
1882 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
1883 break;
1884 case MODE_UU:
1885 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
1886 break;
1887 }
1888 gen_helper_subr_h(ret, cpu_env, temp64, r1_low, r1_high);
1889
1890 tcg_temp_free(temp);
1891 tcg_temp_free_i64(temp64);
1892 }
1893
1894 static inline void
1895 gen_msubr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
1896 {
1897 TCGv temp = tcg_temp_new();
1898 TCGv temp2 = tcg_temp_new();
1899
1900 tcg_gen_andi_tl(temp2, r1, 0xffff0000);
1901 tcg_gen_shli_tl(temp, r1, 16);
1902 gen_msubr64_h(ret, temp, temp2, r2, r3, n, mode);
1903
1904 tcg_temp_free(temp);
1905 tcg_temp_free(temp2);
1906 }
1907
1908 static inline void
1909 gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
1910 uint32_t n, uint32_t mode)
1911 {
1912 TCGv temp = tcg_const_i32(n);
1913 TCGv_i64 temp64 = tcg_temp_new_i64();
1914 switch (mode) {
1915 case MODE_LL:
1916 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
1917 break;
1918 case MODE_LU:
1919 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
1920 break;
1921 case MODE_UL:
1922 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
1923 break;
1924 case MODE_UU:
1925 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
1926 break;
1927 }
1928 gen_helper_subr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high);
1929
1930 tcg_temp_free(temp);
1931 tcg_temp_free_i64(temp64);
1932 }
1933
1934 static inline void
1935 gen_msubr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
1936 {
1937 TCGv temp = tcg_temp_new();
1938 TCGv temp2 = tcg_temp_new();
1939
1940 tcg_gen_andi_tl(temp2, r1, 0xffff0000);
1941 tcg_gen_shli_tl(temp, r1, 16);
1942 gen_msubr64s_h(ret, temp, temp2, r2, r3, n, mode);
1943
1944 tcg_temp_free(temp);
1945 tcg_temp_free(temp2);
1946 }
1947
1948 static inline void
1949 gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
1950 {
1951 TCGv temp = tcg_const_i32(n);
1952 gen_helper_msubr_q(ret, cpu_env, r1, r2, r3, temp);
1953 tcg_temp_free(temp);
1954 }
1955
1956 static inline void
1957 gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
1958 {
1959 TCGv temp = tcg_const_i32(n);
1960 gen_helper_msubr_q_ssov(ret, cpu_env, r1, r2, r3, temp);
1961 tcg_temp_free(temp);
1962 }
1963
1964 static inline void
1965 gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
1966 uint32_t up_shift, CPUTriCoreState *env)
1967 {
1968 TCGv temp = tcg_temp_new();
1969 TCGv temp2 = tcg_temp_new();
1970 TCGv temp3 = tcg_temp_new();
1971 TCGv_i64 t1 = tcg_temp_new_i64();
1972 TCGv_i64 t2 = tcg_temp_new_i64();
1973 TCGv_i64 t3 = tcg_temp_new_i64();
1974 TCGv_i64 t4 = tcg_temp_new_i64();
1975
1976 tcg_gen_ext_i32_i64(t2, arg2);
1977 tcg_gen_ext_i32_i64(t3, arg3);
1978
1979 tcg_gen_mul_i64(t2, t2, t3);
1980
1981 tcg_gen_ext_i32_i64(t1, arg1);
1982 /* if we shift part of the fraction out, we need to round up */
1983 tcg_gen_andi_i64(t4, t2, (1ll << (up_shift - n)) - 1);
1984 tcg_gen_setcondi_i64(TCG_COND_NE, t4, t4, 0);
1985 tcg_gen_sari_i64(t2, t2, up_shift - n);
1986 tcg_gen_add_i64(t2, t2, t4);
1987
1988 tcg_gen_sub_i64(t3, t1, t2);
1989 tcg_gen_extrl_i64_i32(temp3, t3);
1990 /* calc v bit */
1991 tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL);
1992 tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
1993 tcg_gen_or_i64(t1, t1, t2);
1994 tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
1995 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
1996 /* Calc SV bit */
1997 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
1998 /* Calc AV/SAV bits */
1999 tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3);
2000 tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV);
2001 /* calc SAV */
2002 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
2003 /* write back result */
2004 tcg_gen_mov_tl(ret, temp3);
2005
2006 tcg_temp_free(temp);
2007 tcg_temp_free(temp2);
2008 tcg_temp_free(temp3);
2009 tcg_temp_free_i64(t1);
2010 tcg_temp_free_i64(t2);
2011 tcg_temp_free_i64(t3);
2012 tcg_temp_free_i64(t4);
2013 }
2014
2015 static inline void
2016 gen_m16sub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
2017 {
2018 TCGv temp = tcg_temp_new();
2019 TCGv temp2 = tcg_temp_new();
2020 if (n == 0) {
2021 tcg_gen_mul_tl(temp, arg2, arg3);
2022 } else { /* n is expected to be 1 */
2023 tcg_gen_mul_tl(temp, arg2, arg3);
2024 tcg_gen_shli_tl(temp, temp, 1);
2025 /* catch special case r1 = r2 = 0x8000 */
2026 tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
2027 tcg_gen_sub_tl(temp, temp, temp2);
2028 }
2029 gen_sub_d(ret, arg1, temp);
2030
2031 tcg_temp_free(temp);
2032 tcg_temp_free(temp2);
2033 }
2034
2035 static inline void
2036 gen_m16subs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
2037 {
2038 TCGv temp = tcg_temp_new();
2039 TCGv temp2 = tcg_temp_new();
2040 if (n == 0) {
2041 tcg_gen_mul_tl(temp, arg2, arg3);
2042 } else { /* n is expected to be 1 */
2043 tcg_gen_mul_tl(temp, arg2, arg3);
2044 tcg_gen_shli_tl(temp, temp, 1);
2045 /* catch special case r1 = r2 = 0x8000 */
2046 tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
2047 tcg_gen_sub_tl(temp, temp, temp2);
2048 }
2049 gen_subs(ret, arg1, temp);
2050
2051 tcg_temp_free(temp);
2052 tcg_temp_free(temp2);
2053 }
2054
2055 static inline void
2056 gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
2057 TCGv arg3, uint32_t n)
2058 {
2059 TCGv temp = tcg_temp_new();
2060 TCGv temp2 = tcg_temp_new();
2061 TCGv_i64 t1 = tcg_temp_new_i64();
2062 TCGv_i64 t2 = tcg_temp_new_i64();
2063 TCGv_i64 t3 = tcg_temp_new_i64();
2064
2065 if (n == 0) {
2066 tcg_gen_mul_tl(temp, arg2, arg3);
2067 } else { /* n is expected to be 1 */
2068 tcg_gen_mul_tl(temp, arg2, arg3);
2069 tcg_gen_shli_tl(temp, temp, 1);
2070 /* catch special case r1 = r2 = 0x8000 */
2071 tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
2072 tcg_gen_sub_tl(temp, temp, temp2);
2073 }
2074 tcg_gen_ext_i32_i64(t2, temp);
2075 tcg_gen_shli_i64(t2, t2, 16);
2076 tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
2077 gen_sub64_d(t3, t1, t2);
2078 /* write back result */
2079 tcg_gen_extr_i64_i32(rl, rh, t3);
2080
2081 tcg_temp_free_i64(t1);
2082 tcg_temp_free_i64(t2);
2083 tcg_temp_free_i64(t3);
2084 tcg_temp_free(temp);
2085 tcg_temp_free(temp2);
2086 }
2087
2088 static inline void
2089 gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
2090 TCGv arg3, uint32_t n)
2091 {
2092 TCGv temp = tcg_temp_new();
2093 TCGv temp2 = tcg_temp_new();
2094 TCGv_i64 t1 = tcg_temp_new_i64();
2095 TCGv_i64 t2 = tcg_temp_new_i64();
2096
2097 if (n == 0) {
2098 tcg_gen_mul_tl(temp, arg2, arg3);
2099 } else { /* n is expected to be 1 */
2100 tcg_gen_mul_tl(temp, arg2, arg3);
2101 tcg_gen_shli_tl(temp, temp, 1);
2102 /* catch special case r1 = r2 = 0x8000 */
2103 tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
2104 tcg_gen_sub_tl(temp, temp, temp2);
2105 }
2106 tcg_gen_ext_i32_i64(t2, temp);
2107 tcg_gen_shli_i64(t2, t2, 16);
2108 tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
2109
2110 gen_helper_sub64_ssov(t1, cpu_env, t1, t2);
2111 tcg_gen_extr_i64_i32(rl, rh, t1);
2112
2113 tcg_temp_free(temp);
2114 tcg_temp_free(temp2);
2115 tcg_temp_free_i64(t1);
2116 tcg_temp_free_i64(t2);
2117 }
2118
2119 static inline void
2120 gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
2121 TCGv arg3, uint32_t n, CPUTriCoreState *env)
2122 {
2123 TCGv_i64 t1 = tcg_temp_new_i64();
2124 TCGv_i64 t2 = tcg_temp_new_i64();
2125 TCGv_i64 t3 = tcg_temp_new_i64();
2126 TCGv_i64 t4 = tcg_temp_new_i64();
2127 TCGv temp, temp2;
2128
2129 tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
2130 tcg_gen_ext_i32_i64(t2, arg2);
2131 tcg_gen_ext_i32_i64(t3, arg3);
2132
2133 tcg_gen_mul_i64(t2, t2, t3);
2134 if (n != 0) {
2135 tcg_gen_shli_i64(t2, t2, 1);
2136 }
2137 tcg_gen_sub_i64(t4, t1, t2);
2138 /* calc v bit */
2139 tcg_gen_xor_i64(t3, t4, t1);
2140 tcg_gen_xor_i64(t2, t1, t2);
2141 tcg_gen_and_i64(t3, t3, t2);
2142 tcg_gen_extrh_i64_i32(cpu_PSW_V, t3);
2143 /* We produce an overflow on the host if the mul before was
2144 (0x80000000 * 0x80000000) << 1). If this is the
2145 case, we negate the ovf. */
2146 if (n == 1) {
2147 temp = tcg_temp_new();
2148 temp2 = tcg_temp_new();
2149 tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
2150 tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
2151 tcg_gen_and_tl(temp, temp, temp2);
2152 tcg_gen_shli_tl(temp, temp, 31);
2153 /* negate v bit, if special condition */
2154 tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
2155
2156 tcg_temp_free(temp);
2157 tcg_temp_free(temp2);
2158 }
2159 /* write back result */
2160 tcg_gen_extr_i64_i32(rl, rh, t4);
2161 /* Calc SV bit */
2162 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
2163 /* Calc AV/SAV bits */
2164 tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
2165 tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
2166 /* calc SAV */
2167 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
2168
2169 tcg_temp_free_i64(t1);
2170 tcg_temp_free_i64(t2);
2171 tcg_temp_free_i64(t3);
2172 tcg_temp_free_i64(t4);
2173 }
2174
2175 static inline void
2176 gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
2177 uint32_t up_shift)
2178 {
2179 TCGv_i64 t1 = tcg_temp_new_i64();
2180 TCGv_i64 t2 = tcg_temp_new_i64();
2181 TCGv_i64 t3 = tcg_temp_new_i64();
2182 TCGv_i64 t4 = tcg_temp_new_i64();
2183
2184 tcg_gen_ext_i32_i64(t1, arg1);
2185 tcg_gen_ext_i32_i64(t2, arg2);
2186 tcg_gen_ext_i32_i64(t3, arg3);
2187
2188 tcg_gen_mul_i64(t2, t2, t3);
2189 /* if we shift part of the fraction out, we need to round up */
2190 tcg_gen_andi_i64(t4, t2, (1ll << (up_shift - n)) - 1);
2191 tcg_gen_setcondi_i64(TCG_COND_NE, t4, t4, 0);
2192 tcg_gen_sari_i64(t3, t2, up_shift - n);
2193 tcg_gen_add_i64(t3, t3, t4);
2194
2195 gen_helper_msub32_q_sub_ssov(ret, cpu_env, t1, t3);
2196
2197 tcg_temp_free_i64(t1);
2198 tcg_temp_free_i64(t2);
2199 tcg_temp_free_i64(t3);
2200 tcg_temp_free_i64(t4);
2201 }
2202
2203 static inline void
2204 gen_msubs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
2205 TCGv arg3, uint32_t n)
2206 {
2207 TCGv_i64 r1 = tcg_temp_new_i64();
2208 TCGv temp = tcg_const_i32(n);
2209
2210 tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
2211 gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp);
2212 tcg_gen_extr_i64_i32(rl, rh, r1);
2213
2214 tcg_temp_free_i64(r1);
2215 tcg_temp_free(temp);
2216 }
2217
2218 static inline void
2219 gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
2220 TCGv r3, uint32_t n, uint32_t mode)
2221 {
2222 TCGv temp = tcg_const_i32(n);
2223 TCGv temp2 = tcg_temp_new();
2224 TCGv_i64 temp64 = tcg_temp_new_i64();
2225 switch (mode) {
2226 case MODE_LL:
2227 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
2228 break;
2229 case MODE_LU:
2230 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
2231 break;
2232 case MODE_UL:
2233 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
2234 break;
2235 case MODE_UU:
2236 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
2237 break;
2238 }
2239 tcg_gen_extr_i64_i32(temp, temp2, temp64);
2240 gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
2241 tcg_gen_add_tl, tcg_gen_sub_tl);
2242 tcg_temp_free(temp);
2243 tcg_temp_free(temp2);
2244 tcg_temp_free_i64(temp64);
2245 }
2246
2247 static inline void
2248 gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
2249 TCGv r3, uint32_t n, uint32_t mode)
2250 {
2251 TCGv temp = tcg_const_i32(n);
2252 TCGv_i64 temp64 = tcg_temp_new_i64();
2253 TCGv_i64 temp64_2 = tcg_temp_new_i64();
2254 TCGv_i64 temp64_3 = tcg_temp_new_i64();
2255 switch (mode) {
2256 case MODE_LL:
2257 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
2258 break;
2259 case MODE_LU:
2260 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
2261 break;
2262 case MODE_UL:
2263 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
2264 break;
2265 case MODE_UU:
2266 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
2267 break;
2268 }
2269 tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high);
2270 tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
2271 tcg_gen_ext32s_i64(temp64, temp64); /* low */
2272 tcg_gen_sub_i64(temp64, temp64_2, temp64);
2273 tcg_gen_shli_i64(temp64, temp64, 16);
2274
2275 gen_sub64_d(temp64_2, temp64_3, temp64);
2276 /* write back result */
2277 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2);
2278
2279 tcg_temp_free(temp);
2280 tcg_temp_free_i64(temp64);
2281 tcg_temp_free_i64(temp64_2);
2282 tcg_temp_free_i64(temp64_3);
2283 }
2284
2285 static inline void
2286 gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
2287 {
2288 TCGv temp = tcg_const_i32(n);
2289 TCGv temp2 = tcg_temp_new();
2290 TCGv_i64 temp64 = tcg_temp_new_i64();
2291 switch (mode) {
2292 case MODE_LL:
2293 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
2294 break;
2295 case MODE_LU:
2296 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
2297 break;
2298 case MODE_UL:
2299 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
2300 break;
2301 case MODE_UU:
2302 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
2303 break;
2304 }
2305 tcg_gen_andi_tl(temp2, r1, 0xffff0000);
2306 tcg_gen_shli_tl(temp, r1, 16);
2307 gen_helper_subadr_h(ret, cpu_env, temp64, temp, temp2);
2308
2309 tcg_temp_free(temp);
2310 tcg_temp_free(temp2);
2311 tcg_temp_free_i64(temp64);
2312 }
2313
2314 static inline void
2315 gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
2316 TCGv r3, uint32_t n, uint32_t mode)
2317 {
2318 TCGv temp = tcg_const_i32(n);
2319 TCGv temp2 = tcg_temp_new();
2320 TCGv temp3 = tcg_temp_new();
2321 TCGv_i64 temp64 = tcg_temp_new_i64();
2322
2323 switch (mode) {
2324 case MODE_LL:
2325 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
2326 break;
2327 case MODE_LU:
2328 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
2329 break;
2330 case MODE_UL:
2331 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
2332 break;
2333 case MODE_UU:
2334 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
2335 break;
2336 }
2337 tcg_gen_extr_i64_i32(temp, temp2, temp64);
2338 gen_adds(ret_low, r1_low, temp);
2339 tcg_gen_mov_tl(temp, cpu_PSW_V);
2340 tcg_gen_mov_tl(temp3, cpu_PSW_AV);
2341 gen_subs(ret_high, r1_high, temp2);
2342 /* combine v bits */
2343 tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
2344 /* combine av bits */
2345 tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
2346
2347 tcg_temp_free(temp);
2348 tcg_temp_free(temp2);
2349 tcg_temp_free(temp3);
2350 tcg_temp_free_i64(temp64);
2351 }
2352
2353 static inline void
2354 gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
2355 TCGv r3, uint32_t n, uint32_t mode)
2356 {
2357 TCGv temp = tcg_const_i32(n);
2358 TCGv_i64 temp64 = tcg_temp_new_i64();
2359 TCGv_i64 temp64_2 = tcg_temp_new_i64();
2360
2361 switch (mode) {
2362 case MODE_LL:
2363 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
2364 break;
2365 case MODE_LU:
2366 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
2367 break;
2368 case MODE_UL:
2369 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
2370 break;
2371 case MODE_UU:
2372 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
2373 break;
2374 }
2375 tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
2376 tcg_gen_ext32s_i64(temp64, temp64); /* low */
2377 tcg_gen_sub_i64(temp64, temp64_2, temp64);
2378 tcg_gen_shli_i64(temp64, temp64, 16);
2379 tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
2380
2381 gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64);
2382 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
2383
2384 tcg_temp_free(temp);
2385 tcg_temp_free_i64(temp64);
2386 tcg_temp_free_i64(temp64_2);
2387 }
2388
2389 static inline void
2390 gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
2391 {
2392 TCGv temp = tcg_const_i32(n);
2393 TCGv temp2 = tcg_temp_new();
2394 TCGv_i64 temp64 = tcg_temp_new_i64();
2395 switch (mode) {
2396 case MODE_LL:
2397 GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
2398 break;
2399 case MODE_LU:
2400 GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
2401 break;
2402 case MODE_UL:
2403 GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
2404 break;
2405 case MODE_UU:
2406 GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
2407 break;
2408 }
2409 tcg_gen_andi_tl(temp2, r1, 0xffff0000);
2410 tcg_gen_shli_tl(temp, r1, 16);
2411 gen_helper_subadr_h_ssov(ret, cpu_env, temp64, temp, temp2);
2412
2413 tcg_temp_free(temp);
2414 tcg_temp_free(temp2);
2415 tcg_temp_free_i64(temp64);
2416 }
2417
2418 static inline void gen_abs(TCGv ret, TCGv r1)
2419 {
2420 TCGv temp = tcg_temp_new();
2421 TCGv t0 = tcg_const_i32(0);
2422
2423 tcg_gen_neg_tl(temp, r1);
2424 tcg_gen_movcond_tl(TCG_COND_GE, ret, r1, t0, r1, temp);
2425 /* overflow can only happen, if r1 = 0x80000000 */
2426 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, r1, 0x80000000);
2427 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
2428 /* calc SV bit */
2429 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
2430 /* Calc AV bit */
2431 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
2432 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
2433 /* calc SAV bit */
2434 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
2435
2436 tcg_temp_free(temp);
2437 tcg_temp_free(t0);
2438 }
2439
2440 static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
2441 {
2442 TCGv temp = tcg_temp_new_i32();
2443 TCGv result = tcg_temp_new_i32();
2444
2445 tcg_gen_sub_tl(result, r1, r2);
2446 tcg_gen_sub_tl(temp, r2, r1);
2447 tcg_gen_movcond_tl(TCG_COND_GT, result, r1, r2, result, temp);
2448
2449 /* calc V bit */
2450 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
2451 tcg_gen_xor_tl(temp, result, r2);
2452 tcg_gen_movcond_tl(TCG_COND_GT, cpu_PSW_V, r1, r2, cpu_PSW_V, temp);
2453 tcg_gen_xor_tl(temp, r1, r2);
2454 tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
2455 /* calc SV bit */
2456 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
2457 /* Calc AV bit */
2458 tcg_gen_add_tl(cpu_PSW_AV, result, result);
2459 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
2460 /* calc SAV bit */
2461 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
2462 /* write back result */
2463 tcg_gen_mov_tl(ret, result);
2464
2465 tcg_temp_free(temp);
2466 tcg_temp_free(result);
2467 }
2468
2469 static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con)
2470 {
2471 TCGv temp = tcg_const_i32(con);
2472 gen_absdif(ret, r1, temp);
2473 tcg_temp_free(temp);
2474 }
2475
2476 static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con)
2477 {
2478 TCGv temp = tcg_const_i32(con);
2479 gen_helper_absdif_ssov(ret, cpu_env, r1, temp);
2480 tcg_temp_free(temp);
2481 }
2482
2483 static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
2484 {
2485 TCGv high = tcg_temp_new();
2486 TCGv low = tcg_temp_new();
2487
2488 tcg_gen_muls2_tl(low, high, r1, r2);
2489 tcg_gen_mov_tl(ret, low);
2490 /* calc V bit */
2491 tcg_gen_sari_tl(low, low, 31);
2492 tcg_gen_setcond_tl(TCG_COND_NE, cpu_PSW_V, high, low);
2493 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
2494 /* calc SV bit */
2495 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
2496 /* Calc AV bit */
2497 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
2498 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
2499 /* calc SAV bit */
2500 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
2501
2502 tcg_temp_free(high);
2503 tcg_temp_free(low);
2504 }
2505
2506 static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con)
2507 {
2508 TCGv temp = tcg_const_i32(con);
2509 gen_mul_i32s(ret, r1, temp);
2510 tcg_temp_free(temp);
2511 }
2512
2513 static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
2514 {
2515 tcg_gen_muls2_tl(ret_low, ret_high, r1, r2);
2516 /* clear V bit */
2517 tcg_gen_movi_tl(cpu_PSW_V, 0);
2518 /* calc SV bit */
2519 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
2520 /* Calc AV bit */
2521 tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
2522 tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
2523 /* calc SAV bit */
2524 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
2525 }
2526
2527 static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1,
2528 int32_t con)
2529 {
2530 TCGv temp = tcg_const_i32(con);
2531 gen_mul_i64s(ret_low, ret_high, r1, temp);
2532 tcg_temp_free(temp);
2533 }
2534
2535 static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
2536 {
2537 tcg_gen_mulu2_tl(ret_low, ret_high, r1, r2);
2538 /* clear V bit */
2539 tcg_gen_movi_tl(cpu_PSW_V, 0);
2540 /* calc SV bit */
2541 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
2542 /* Calc AV bit */
2543 tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
2544 tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
2545 /* calc SAV bit */
2546 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
2547 }
2548
2549 static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1,
2550 int32_t con)
2551 {
2552 TCGv temp = tcg_const_i32(con);
2553 gen_mul_i64u(ret_low, ret_high, r1, temp);
2554 tcg_temp_free(temp);
2555 }
2556
2557 static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con)
2558 {
2559 TCGv temp = tcg_const_i32(con);
2560 gen_helper_mul_ssov(ret, cpu_env, r1, temp);
2561 tcg_temp_free(temp);
2562 }
2563
2564 static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con)
2565 {
2566 TCGv temp = tcg_const_i32(con);
2567 gen_helper_mul_suov(ret, cpu_env, r1, temp);
2568 tcg_temp_free(temp);
2569 }
2570 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
2571 static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
2572 {
2573 TCGv temp = tcg_const_i32(con);
2574 gen_helper_madd32_ssov(ret, cpu_env, r1, r2, temp);
2575 tcg_temp_free(temp);
2576 }
2577
2578 static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
2579 {
2580 TCGv temp = tcg_const_i32(con);
2581 gen_helper_madd32_suov(ret, cpu_env, r1, r2, temp);
2582 tcg_temp_free(temp);
2583 }
2584
2585 static void
2586 gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift)
2587 {
2588 TCGv temp = tcg_temp_new();
2589 TCGv_i64 temp_64 = tcg_temp_new_i64();
2590 TCGv_i64 temp2_64 = tcg_temp_new_i64();
2591
2592 if (n == 0) {
2593 if (up_shift == 32) {
2594 tcg_gen_muls2_tl(rh, rl, arg1, arg2);
2595 } else if (up_shift == 16) {
2596 tcg_gen_ext_i32_i64(temp_64, arg1);
2597 tcg_gen_ext_i32_i64(temp2_64, arg2);
2598
2599 tcg_gen_mul_i64(temp_64, temp_64, temp2_64);
2600 tcg_gen_shri_i64(temp_64, temp_64, up_shift);
2601 tcg_gen_extr_i64_i32(rl, rh, temp_64);
2602 } else {
2603 tcg_gen_muls2_tl(rl, rh, arg1, arg2);
2604 }
2605 /* reset v bit */
2606 tcg_gen_movi_tl(cpu_PSW_V, 0);
2607 } else { /* n is expected to be 1 */
2608 tcg_gen_ext_i32_i64(temp_64, arg1);
2609 tcg_gen_ext_i32_i64(temp2_64, arg2);
2610
2611 tcg_gen_mul_i64(temp_64, temp_64, temp2_64);
2612
2613 if (up_shift == 0) {
2614 tcg_gen_shli_i64(temp_64, temp_64, 1);
2615 } else {
2616 tcg_gen_shri_i64(temp_64, temp_64, up_shift - 1);
2617 }
2618 tcg_gen_extr_i64_i32(rl, rh, temp_64);
2619 /* overflow only occurs if r1 = r2 = 0x8000 */
2620 if (up_shift == 0) {/* result is 64 bit */
2621 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rh,
2622 0x80000000);
2623 } else { /* result is 32 bit */
2624 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rl,
2625 0x80000000);
2626 }
2627 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
2628 /* calc sv overflow bit */
2629 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
2630 }
2631 /* calc av overflow bit */
2632 if (up_shift == 0) {
2633 tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
2634 tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
2635 } else {
2636 tcg_gen_add_tl(cpu_PSW_AV, rl, rl);
2637 tcg_gen_xor_tl(cpu_PSW_AV, rl, cpu_PSW_AV);
2638 }
2639 /* calc sav overflow bit */
2640 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
2641 tcg_temp_free(temp);
2642 tcg_temp_free_i64(temp_64);
2643 tcg_temp_free_i64(temp2_64);
2644 }
2645
2646 static void
2647 gen_mul_q_16(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
2648 {
2649 TCGv temp = tcg_temp_new();
2650 if (n == 0) {
2651 tcg_gen_mul_tl(ret, arg1, arg2);
2652 } else { /* n is expected to be 1 */
2653 tcg_gen_mul_tl(ret, arg1, arg2);
2654 tcg_gen_shli_tl(ret, ret, 1);
2655 /* catch special case r1 = r2 = 0x8000 */
2656 tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80000000);
2657 tcg_gen_sub_tl(ret, ret, temp);
2658 }
2659 /* reset v bit */
2660 tcg_gen_movi_tl(cpu_PSW_V, 0);
2661 /* calc av overflow bit */
2662 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
2663 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
2664 /* calc sav overflow bit */
2665 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
2666
2667 tcg_temp_free(temp);
2668 }
2669
2670 static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
2671 {
2672 TCGv temp = tcg_temp_new();
2673 if (n == 0) {
2674 tcg_gen_mul_tl(ret, arg1, arg2);
2675 tcg_gen_addi_tl(ret, ret, 0x8000);
2676 } else {
2677 tcg_gen_mul_tl(ret, arg1, arg2);
2678 tcg_gen_shli_tl(ret, ret, 1);
2679 tcg_gen_addi_tl(ret, ret, 0x8000);
2680 /* catch special case r1 = r2 = 0x8000 */
2681 tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80008000);
2682 tcg_gen_muli_tl(temp, temp, 0x8001);
2683 tcg_gen_sub_tl(ret, ret, temp);
2684 }
2685 /* reset v bit */
2686 tcg_gen_movi_tl(cpu_PSW_V, 0);
2687 /* calc av overflow bit */
2688 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
2689 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
2690 /* calc sav overflow bit */
2691 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
2692 /* cut halfword off */
2693 tcg_gen_andi_tl(ret, ret, 0xffff0000);
2694
2695 tcg_temp_free(temp);
2696 }
2697
2698 static inline void
2699 gen_madds_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
2700 TCGv r3)
2701 {
2702 TCGv_i64 temp64 = tcg_temp_new_i64();
2703 tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
2704 gen_helper_madd64_ssov(temp64, cpu_env, r1, temp64, r3);
2705 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
2706 tcg_temp_free_i64(temp64);
2707 }
2708
2709 static inline void
2710 gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
2711 int32_t con)
2712 {
2713 TCGv temp = tcg_const_i32(con);
2714 gen_madds_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
2715 tcg_temp_free(temp);
2716 }
2717
2718 static inline void
2719 gen_maddsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
2720 TCGv r3)
2721 {
2722 TCGv_i64 temp64 = tcg_temp_new_i64();
2723 tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
2724 gen_helper_madd64_suov(temp64, cpu_env, r1, temp64, r3);
2725 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
2726 tcg_temp_free_i64(temp64);
2727 }
2728
2729 static inline void
2730 gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
2731 int32_t con)
2732 {
2733 TCGv temp = tcg_const_i32(con);
2734 gen_maddsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
2735 tcg_temp_free(temp);
2736 }
2737
2738 static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
2739 {
2740 TCGv temp = tcg_const_i32(con);
2741 gen_helper_msub32_ssov(ret, cpu_env, r1, r2, temp);
2742 tcg_temp_free(temp);
2743 }
2744
2745 static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
2746 {
2747 TCGv temp = tcg_const_i32(con);
2748 gen_helper_msub32_suov(ret, cpu_env, r1, r2, temp);
2749 tcg_temp_free(temp);
2750 }
2751
2752 static inline void
2753 gen_msubs_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
2754 TCGv r3)
2755 {
2756 TCGv_i64 temp64 = tcg_temp_new_i64();
2757 tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
2758 gen_helper_msub64_ssov(temp64, cpu_env, r1, temp64, r3);
2759 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
2760 tcg_temp_free_i64(temp64);
2761 }
2762
2763 static inline void
2764 gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
2765 int32_t con)
2766 {
2767 TCGv temp = tcg_const_i32(con);
2768 gen_msubs_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
2769 tcg_temp_free(temp);
2770 }
2771
2772 static inline void
2773 gen_msubsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
2774 TCGv r3)
2775 {
2776 TCGv_i64 temp64 = tcg_temp_new_i64();
2777 tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
2778 gen_helper_msub64_suov(temp64, cpu_env, r1, temp64, r3);
2779 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
2780 tcg_temp_free_i64(temp64);
2781 }
2782
2783 static inline void
2784 gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
2785 int32_t con)
2786 {
2787 TCGv temp = tcg_const_i32(con);
2788 gen_msubsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
2789 tcg_temp_free(temp);
2790 }
2791
2792 static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low)
2793 {
2794 TCGv sat_neg = tcg_const_i32(low);
2795 TCGv temp = tcg_const_i32(up);
2796
2797 /* sat_neg = (arg < low ) ? low : arg; */
2798 tcg_gen_movcond_tl(TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg);
2799
2800 /* ret = (sat_neg > up ) ? up : sat_neg; */
2801 tcg_gen_movcond_tl(TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg);
2802
2803 tcg_temp_free(sat_neg);
2804 tcg_temp_free(temp);
2805 }
2806
2807 static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up)
2808 {
2809 TCGv temp = tcg_const_i32(up);
2810 /* sat_neg = (arg > up ) ? up : arg; */
2811 tcg_gen_movcond_tl(TCG_COND_GTU, ret, arg, temp, temp, arg);
2812 tcg_temp_free(temp);
2813 }
2814
2815 static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
2816 {
2817 if (shift_count == -32) {
2818 tcg_gen_movi_tl(ret, 0);
2819 } else if (shift_count >= 0) {
2820 tcg_gen_shli_tl(ret, r1, shift_count);
2821 } else {
2822 tcg_gen_shri_tl(ret, r1, -shift_count);
2823 }
2824 }
2825
2826 static void gen_sh_hi(TCGv ret, TCGv r1, int32_t shiftcount)
2827 {
2828 TCGv temp_low, temp_high;
2829
2830 if (shiftcount == -16) {
2831 tcg_gen_movi_tl(ret, 0);
2832 } else {
2833 temp_high = tcg_temp_new();
2834 temp_low = tcg_temp_new();
2835
2836 tcg_gen_andi_tl(temp_low, r1, 0xffff);
2837 tcg_gen_andi_tl(temp_high, r1, 0xffff0000);
2838 gen_shi(temp_low, temp_low, shiftcount);
2839 gen_shi(ret, temp_high, shiftcount);
2840 tcg_gen_deposit_tl(ret, ret, temp_low, 0, 16);
2841
2842 tcg_temp_free(temp_low);
2843 tcg_temp_free(temp_high);
2844 }
2845 }
2846
2847 static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
2848 {
2849 uint32_t msk, msk_start;
2850 TCGv temp = tcg_temp_new();
2851 TCGv temp2 = tcg_temp_new();
2852 TCGv t_0 = tcg_const_i32(0);
2853
2854 if (shift_count == 0) {
2855 /* Clear PSW.C and PSW.V */
2856 tcg_gen_movi_tl(cpu_PSW_C, 0);
2857 tcg_gen_mov_tl(cpu_PSW_V, cpu_PSW_C);
2858 tcg_gen_mov_tl(ret, r1);
2859 } else if (shift_count == -32) {
2860 /* set PSW.C */
2861 tcg_gen_mov_tl(cpu_PSW_C, r1);
2862 /* fill ret completely with sign bit */
2863 tcg_gen_sari_tl(ret, r1, 31);
2864 /* clear PSW.V */
2865 tcg_gen_movi_tl(cpu_PSW_V, 0);
2866 } else if (shift_count > 0) {
2867 TCGv t_max = tcg_const_i32(0x7FFFFFFF >> shift_count);
2868 TCGv t_min = tcg_const_i32(((int32_t) -0x80000000) >> shift_count);
2869
2870 /* calc carry */
2871 msk_start = 32 - shift_count;
2872 msk = ((1 << shift_count) - 1) << msk_start;
2873 tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
2874 /* calc v/sv bits */
2875 tcg_gen_setcond_tl(TCG_COND_GT, temp, r1, t_max);
2876 tcg_gen_setcond_tl(TCG_COND_LT, temp2, r1, t_min);
2877 tcg_gen_or_tl(cpu_PSW_V, temp, temp2);
2878 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
2879 /* calc sv */
2880 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV);
2881 /* do shift */
2882 tcg_gen_shli_tl(ret, r1, shift_count);
2883
2884 tcg_temp_free(t_max);
2885 tcg_temp_free(t_min);
2886 } else {
2887 /* clear PSW.V */
2888 tcg_gen_movi_tl(cpu_PSW_V, 0);
2889 /* calc carry */
2890 msk = (1 << -shift_count) - 1;
2891 tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
2892 /* do shift */
2893 tcg_gen_sari_tl(ret, r1, -shift_count);
2894 }
2895 /* calc av overflow bit */
2896 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
2897 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
2898 /* calc sav overflow bit */
2899 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
2900
2901 tcg_temp_free(temp);
2902 tcg_temp_free(temp2);
2903 tcg_temp_free(t_0);
2904 }
2905
2906 static void gen_shas(TCGv ret, TCGv r1, TCGv r2)
2907 {
2908 gen_helper_sha_ssov(ret, cpu_env, r1, r2);
2909 }
2910
2911 static void gen_shasi(TCGv ret, TCGv r1, int32_t con)
2912 {
2913 TCGv temp = tcg_const_i32(con);
2914 gen_shas(ret, r1, temp);
2915 tcg_temp_free(temp);
2916 }
2917
2918 static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count)
2919 {
2920 TCGv low, high;
2921
2922 if (shift_count == 0) {
2923 tcg_gen_mov_tl(ret, r1);
2924 } else if (shift_count > 0) {
2925 low = tcg_temp_new();
2926 high = tcg_temp_new();
2927
2928 tcg_gen_andi_tl(high, r1, 0xffff0000);
2929 tcg_gen_shli_tl(low, r1, shift_count);
2930 tcg_gen_shli_tl(ret, high, shift_count);
2931 tcg_gen_deposit_tl(ret, ret, low, 0, 16);
2932
2933 tcg_temp_free(low);
2934 tcg_temp_free(high);
2935 } else {
2936 low = tcg_temp_new();
2937 high = tcg_temp_new();
2938
2939 tcg_gen_ext16s_tl(low, r1);
2940 tcg_gen_sari_tl(low, low, -shift_count);
2941 tcg_gen_sari_tl(ret, r1, -shift_count);
2942 tcg_gen_deposit_tl(ret, ret, low, 0, 16);
2943
2944 tcg_temp_free(low);
2945 tcg_temp_free(high);
2946 }
2947
2948 }
2949
2950 /* ret = {ret[30:0], (r1 cond r2)}; */
2951 static void gen_sh_cond(int cond, TCGv ret, TCGv r1, TCGv r2)
2952 {
2953 TCGv temp = tcg_temp_new();
2954 TCGv temp2 = tcg_temp_new();
2955
2956 tcg_gen_shli_tl(temp, ret, 1);
2957 tcg_gen_setcond_tl(cond, temp2, r1, r2);
2958 tcg_gen_or_tl(ret, temp, temp2);
2959
2960 tcg_temp_free(temp);
2961 tcg_temp_free(temp2);
2962 }
2963
2964 static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con)
2965 {
2966 TCGv temp = tcg_const_i32(con);
2967 gen_sh_cond(cond, ret, r1, temp);
2968 tcg_temp_free(temp);
2969 }
2970
2971 static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2)
2972 {
2973 gen_helper_add_ssov(ret, cpu_env, r1, r2);
2974 }
2975
2976 static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con)
2977 {
2978 TCGv temp = tcg_const_i32(con);
2979 gen_helper_add_ssov(ret, cpu_env, r1, temp);
2980 tcg_temp_free(temp);
2981 }
2982
2983 static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con)
2984 {
2985 TCGv temp = tcg_const_i32(con);
2986 gen_helper_add_suov(ret, cpu_env, r1, temp);
2987 tcg_temp_free(temp);
2988 }
2989
2990 static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2)
2991 {
2992 gen_helper_sub_ssov(ret, cpu_env, r1, r2);
2993 }
2994
2995 static inline void gen_subsu(TCGv ret, TCGv r1, TCGv r2)
2996 {
2997 gen_helper_sub_suov(ret, cpu_env, r1, r2);
2998 }
2999
3000 static inline void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2,
3001 int pos1, int pos2,
3002 void(*op1)(TCGv, TCGv, TCGv),
3003 void(*op2)(TCGv, TCGv, TCGv))
3004 {
3005 TCGv temp1, temp2;
3006
3007 temp1 = tcg_temp_new();
3008 temp2 = tcg_temp_new();
3009
3010 tcg_gen_shri_tl(temp2, r2, pos2);
3011 tcg_gen_shri_tl(temp1, r1, pos1);
3012
3013 (*op1)(temp1, temp1, temp2);
3014 (*op2)(temp1 , ret, temp1);
3015
3016 tcg_gen_deposit_tl(ret, ret, temp1, 0, 1);
3017
3018 tcg_temp_free(temp1);
3019 tcg_temp_free(temp2);
3020 }
3021
3022 /* ret = r1[pos1] op1 r2[pos2]; */
3023 static inline void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2,
3024 int pos1, int pos2,
3025 void(*op1)(TCGv, TCGv, TCGv))
3026 {
3027 TCGv temp1, temp2;
3028
3029 temp1 = tcg_temp_new();
3030 temp2 = tcg_temp_new();
3031
3032 tcg_gen_shri_tl(temp2, r2, pos2);
3033 tcg_gen_shri_tl(temp1, r1, pos1);
3034
3035 (*op1)(ret, temp1, temp2);
3036
3037 tcg_gen_andi_tl(ret, ret, 0x1);
3038
3039 tcg_temp_free(temp1);
3040 tcg_temp_free(temp2);
3041 }
3042
3043 static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2,
3044 void(*op)(TCGv, TCGv, TCGv))
3045 {
3046 TCGv temp = tcg_temp_new();
3047 TCGv temp2 = tcg_temp_new();
3048 /* temp = (arg1 cond arg2 )*/
3049 tcg_gen_setcond_tl(cond, temp, r1, r2);
3050 /* temp2 = ret[0]*/
3051 tcg_gen_andi_tl(temp2, ret, 0x1);
3052 /* temp = temp insn temp2 */
3053 (*op)(temp, temp, temp2);
3054 /* ret = {ret[31:1], temp} */
3055 tcg_gen_deposit_tl(ret, ret, temp, 0, 1);
3056
3057 tcg_temp_free(temp);
3058 tcg_temp_free(temp2);
3059 }
3060
3061 static inline void
3062 gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con,
3063 void(*op)(TCGv, TCGv, TCGv))
3064 {
3065 TCGv temp = tcg_const_i32(con);
3066 gen_accumulating_cond(cond, ret, r1, temp, op);
3067 tcg_temp_free(temp);
3068 }
3069
3070 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
3071 static inline void gen_cond_w(TCGCond cond, TCGv ret, TCGv r1, TCGv r2)
3072 {
3073 tcg_gen_setcond_tl(cond, ret, r1, r2);
3074 tcg_gen_neg_tl(ret, ret);
3075 }
3076
3077 static inline void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con)
3078 {
3079 TCGv b0 = tcg_temp_new();
3080 TCGv b1 = tcg_temp_new();
3081 TCGv b2 = tcg_temp_new();
3082 TCGv b3 = tcg_temp_new();
3083
3084 /* byte 0 */
3085 tcg_gen_andi_tl(b0, r1, 0xff);
3086 tcg_gen_setcondi_tl(TCG_COND_EQ, b0, b0, con & 0xff);
3087
3088 /* byte 1 */
3089 tcg_gen_andi_tl(b1, r1, 0xff00);
3090 tcg_gen_setcondi_tl(TCG_COND_EQ, b1, b1, con & 0xff00);
3091
3092 /* byte 2 */
3093 tcg_gen_andi_tl(b2, r1, 0xff0000);
3094 tcg_gen_setcondi_tl(TCG_COND_EQ, b2, b2, con & 0xff0000);
3095
3096 /* byte 3 */
3097 tcg_gen_andi_tl(b3, r1, 0xff000000);
3098 tcg_gen_setcondi_tl(TCG_COND_EQ, b3, b3, con & 0xff000000);
3099
3100 /* combine them */
3101 tcg_gen_or_tl(ret, b0, b1);
3102 tcg_gen_or_tl(ret, ret, b2);
3103 tcg_gen_or_tl(ret, ret, b3);
3104
3105 tcg_temp_free(b0);
3106 tcg_temp_free(b1);
3107 tcg_temp_free(b2);
3108 tcg_temp_free(b3);
3109 }
3110
3111 static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con)
3112 {
3113 TCGv h0 = tcg_temp_new();
3114 TCGv h1 = tcg_temp_new();
3115
3116 /* halfword 0 */
3117 tcg_gen_andi_tl(h0, r1, 0xffff);
3118 tcg_gen_setcondi_tl(TCG_COND_EQ, h0, h0, con & 0xffff);
3119
3120 /* halfword 1 */
3121 tcg_gen_andi_tl(h1, r1, 0xffff0000);
3122 tcg_gen_setcondi_tl(TCG_COND_EQ, h1, h1, con & 0xffff0000);
3123
3124 /* combine them */
3125 tcg_gen_or_tl(ret, h0, h1);
3126
3127 tcg_temp_free(h0);
3128 tcg_temp_free(h1);
3129 }
3130 /* mask = ((1 << width) -1) << pos;
3131 ret = (r1 & ~mask) | (r2 << pos) & mask); */
3132 static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos)
3133 {
3134 TCGv mask = tcg_temp_new();
3135 TCGv temp = tcg_temp_new();
3136 TCGv temp2 = tcg_temp_new();
3137
3138 tcg_gen_movi_tl(mask, 1);
3139 tcg_gen_shl_tl(mask, mask, width);
3140 tcg_gen_subi_tl(mask, mask, 1);
3141 tcg_gen_shl_tl(mask, mask, pos);
3142
3143 tcg_gen_shl_tl(temp, r2, pos);
3144 tcg_gen_and_tl(temp, temp, mask);
3145 tcg_gen_andc_tl(temp2, r1, mask);
3146 tcg_gen_or_tl(ret, temp, temp2);
3147
3148 tcg_temp_free(mask);
3149 tcg_temp_free(temp);
3150 tcg_temp_free(temp2);
3151 }
3152
3153 static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1)
3154 {
3155 TCGv_i64 temp = tcg_temp_new_i64();
3156
3157 gen_helper_bsplit(temp, r1);
3158 tcg_gen_extr_i64_i32(rl, rh, temp);
3159
3160 tcg_temp_free_i64(temp);
3161 }
3162
3163 static inline void gen_unpack(TCGv rl, TCGv rh, TCGv r1)
3164 {
3165 TCGv_i64 temp = tcg_temp_new_i64();
3166
3167 gen_helper_unpack(temp, r1);
3168 tcg_gen_extr_i64_i32(rl, rh, temp);
3169
3170 tcg_temp_free_i64(temp);
3171 }
3172
3173 static inline void
3174 gen_dvinit_b(CPUTriCoreState *env, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
3175 {
3176 TCGv_i64 ret = tcg_temp_new_i64();
3177
3178 if (!tricore_feature(env, TRICORE_FEATURE_131)) {
3179 gen_helper_dvinit_b_13(ret, cpu_env, r1, r2);
3180 } else {
3181 gen_helper_dvinit_b_131(ret, cpu_env, r1, r2);
3182 }
3183 tcg_gen_extr_i64_i32(rl, rh, ret);