Fix new typos (found by codespell)
[qemu.git] / tcg / mips / tcg-target.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
5 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
6 * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
27 #include "tcg-be-ldst.h"
28
29 #ifdef HOST_WORDS_BIGENDIAN
30 # define MIPS_BE 1
31 #else
32 # define MIPS_BE 0
33 #endif
34
35 #define LO_OFF (MIPS_BE * 4)
36 #define HI_OFF (4 - LO_OFF)
37
38 #ifndef NDEBUG
39 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
40 "zero",
41 "at",
42 "v0",
43 "v1",
44 "a0",
45 "a1",
46 "a2",
47 "a3",
48 "t0",
49 "t1",
50 "t2",
51 "t3",
52 "t4",
53 "t5",
54 "t6",
55 "t7",
56 "s0",
57 "s1",
58 "s2",
59 "s3",
60 "s4",
61 "s5",
62 "s6",
63 "s7",
64 "t8",
65 "t9",
66 "k0",
67 "k1",
68 "gp",
69 "sp",
70 "s8",
71 "ra",
72 };
73 #endif
74
75 #define TCG_TMP0 TCG_REG_AT
76 #define TCG_TMP1 TCG_REG_T9
77
78 /* check if we really need so many registers :P */
79 static const TCGReg tcg_target_reg_alloc_order[] = {
80 /* Call saved registers. */
81 TCG_REG_S0,
82 TCG_REG_S1,
83 TCG_REG_S2,
84 TCG_REG_S3,
85 TCG_REG_S4,
86 TCG_REG_S5,
87 TCG_REG_S6,
88 TCG_REG_S7,
89 TCG_REG_S8,
90
91 /* Call clobbered registers. */
92 TCG_REG_T0,
93 TCG_REG_T1,
94 TCG_REG_T2,
95 TCG_REG_T3,
96 TCG_REG_T4,
97 TCG_REG_T5,
98 TCG_REG_T6,
99 TCG_REG_T7,
100 TCG_REG_T8,
101 TCG_REG_T9,
102 TCG_REG_V1,
103 TCG_REG_V0,
104
105 /* Argument registers, opposite order of allocation. */
106 TCG_REG_A3,
107 TCG_REG_A2,
108 TCG_REG_A1,
109 TCG_REG_A0,
110 };
111
112 static const TCGReg tcg_target_call_iarg_regs[4] = {
113 TCG_REG_A0,
114 TCG_REG_A1,
115 TCG_REG_A2,
116 TCG_REG_A3
117 };
118
119 static const TCGReg tcg_target_call_oarg_regs[2] = {
120 TCG_REG_V0,
121 TCG_REG_V1
122 };
123
124 static tcg_insn_unit *tb_ret_addr;
125
126 static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc, tcg_insn_unit *target)
127 {
128 /* Let the compiler perform the right-shift as part of the arithmetic. */
129 ptrdiff_t disp = target - (pc + 1);
130 assert(disp == (int16_t)disp);
131 return disp & 0xffff;
132 }
133
134 static inline void reloc_pc16(tcg_insn_unit *pc, tcg_insn_unit *target)
135 {
136 *pc = deposit32(*pc, 0, 16, reloc_pc16_val(pc, target));
137 }
138
139 static inline uint32_t reloc_26_val(tcg_insn_unit *pc, tcg_insn_unit *target)
140 {
141 assert((((uintptr_t)pc ^ (uintptr_t)target) & 0xf0000000) == 0);
142 return ((uintptr_t)target >> 2) & 0x3ffffff;
143 }
144
145 static inline void reloc_26(tcg_insn_unit *pc, tcg_insn_unit *target)
146 {
147 *pc = deposit32(*pc, 0, 26, reloc_26_val(pc, target));
148 }
149
150 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
151 intptr_t value, intptr_t addend)
152 {
153 assert(type == R_MIPS_PC16);
154 assert(addend == 0);
155 reloc_pc16(code_ptr, (tcg_insn_unit *)value);
156 }
157
158 #define TCG_CT_CONST_ZERO 0x100
159 #define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */
160 #define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */
161 #define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */
162 #define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */
163
164 static inline bool is_p2m1(tcg_target_long val)
165 {
166 return val && ((val + 1) & val) == 0;
167 }
168
169 /* parse target specific constraints */
170 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
171 {
172 const char *ct_str;
173
174 ct_str = *pct_str;
175 switch(ct_str[0]) {
176 case 'r':
177 ct->ct |= TCG_CT_REG;
178 tcg_regset_set(ct->u.regs, 0xffffffff);
179 break;
180 case 'L': /* qemu_ld output arg constraint */
181 ct->ct |= TCG_CT_REG;
182 tcg_regset_set(ct->u.regs, 0xffffffff);
183 tcg_regset_reset_reg(ct->u.regs, TCG_REG_V0);
184 break;
185 case 'l': /* qemu_ld input arg constraint */
186 ct->ct |= TCG_CT_REG;
187 tcg_regset_set(ct->u.regs, 0xffffffff);
188 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
189 #if defined(CONFIG_SOFTMMU)
190 if (TARGET_LONG_BITS == 64) {
191 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
192 }
193 #endif
194 break;
195 case 'S': /* qemu_st constraint */
196 ct->ct |= TCG_CT_REG;
197 tcg_regset_set(ct->u.regs, 0xffffffff);
198 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
199 #if defined(CONFIG_SOFTMMU)
200 if (TARGET_LONG_BITS == 32) {
201 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1);
202 } else {
203 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
204 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A3);
205 }
206 #endif
207 break;
208 case 'I':
209 ct->ct |= TCG_CT_CONST_U16;
210 break;
211 case 'J':
212 ct->ct |= TCG_CT_CONST_S16;
213 break;
214 case 'K':
215 ct->ct |= TCG_CT_CONST_P2M1;
216 break;
217 case 'N':
218 ct->ct |= TCG_CT_CONST_N16;
219 break;
220 case 'Z':
221 /* We are cheating a bit here, using the fact that the register
222 ZERO is also the register number 0. Hence there is no need
223 to check for const_args in each instruction. */
224 ct->ct |= TCG_CT_CONST_ZERO;
225 break;
226 default:
227 return -1;
228 }
229 ct_str++;
230 *pct_str = ct_str;
231 return 0;
232 }
233
234 /* test if a constant matches the constraint */
235 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
236 const TCGArgConstraint *arg_ct)
237 {
238 int ct;
239 ct = arg_ct->ct;
240 if (ct & TCG_CT_CONST) {
241 return 1;
242 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
243 return 1;
244 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
245 return 1;
246 } else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
247 return 1;
248 } else if ((ct & TCG_CT_CONST_N16) && val >= -32767 && val <= 32767) {
249 return 1;
250 } else if ((ct & TCG_CT_CONST_P2M1)
251 && use_mips32r2_instructions && is_p2m1(val)) {
252 return 1;
253 }
254 return 0;
255 }
256
257 /* instruction opcodes */
258 typedef enum {
259 OPC_J = 0x02 << 26,
260 OPC_JAL = 0x03 << 26,
261 OPC_BEQ = 0x04 << 26,
262 OPC_BNE = 0x05 << 26,
263 OPC_BLEZ = 0x06 << 26,
264 OPC_BGTZ = 0x07 << 26,
265 OPC_ADDIU = 0x09 << 26,
266 OPC_SLTI = 0x0A << 26,
267 OPC_SLTIU = 0x0B << 26,
268 OPC_ANDI = 0x0C << 26,
269 OPC_ORI = 0x0D << 26,
270 OPC_XORI = 0x0E << 26,
271 OPC_LUI = 0x0F << 26,
272 OPC_LB = 0x20 << 26,
273 OPC_LH = 0x21 << 26,
274 OPC_LW = 0x23 << 26,
275 OPC_LBU = 0x24 << 26,
276 OPC_LHU = 0x25 << 26,
277 OPC_LWU = 0x27 << 26,
278 OPC_SB = 0x28 << 26,
279 OPC_SH = 0x29 << 26,
280 OPC_SW = 0x2B << 26,
281
282 OPC_SPECIAL = 0x00 << 26,
283 OPC_SLL = OPC_SPECIAL | 0x00,
284 OPC_SRL = OPC_SPECIAL | 0x02,
285 OPC_ROTR = OPC_SPECIAL | (0x01 << 21) | 0x02,
286 OPC_SRA = OPC_SPECIAL | 0x03,
287 OPC_SLLV = OPC_SPECIAL | 0x04,
288 OPC_SRLV = OPC_SPECIAL | 0x06,
289 OPC_ROTRV = OPC_SPECIAL | (0x01 << 6) | 0x06,
290 OPC_SRAV = OPC_SPECIAL | 0x07,
291 OPC_JR = OPC_SPECIAL | 0x08,
292 OPC_JALR = OPC_SPECIAL | 0x09,
293 OPC_MOVZ = OPC_SPECIAL | 0x0A,
294 OPC_MOVN = OPC_SPECIAL | 0x0B,
295 OPC_MFHI = OPC_SPECIAL | 0x10,
296 OPC_MFLO = OPC_SPECIAL | 0x12,
297 OPC_MULT = OPC_SPECIAL | 0x18,
298 OPC_MULTU = OPC_SPECIAL | 0x19,
299 OPC_DIV = OPC_SPECIAL | 0x1A,
300 OPC_DIVU = OPC_SPECIAL | 0x1B,
301 OPC_ADDU = OPC_SPECIAL | 0x21,
302 OPC_SUBU = OPC_SPECIAL | 0x23,
303 OPC_AND = OPC_SPECIAL | 0x24,
304 OPC_OR = OPC_SPECIAL | 0x25,
305 OPC_XOR = OPC_SPECIAL | 0x26,
306 OPC_NOR = OPC_SPECIAL | 0x27,
307 OPC_SLT = OPC_SPECIAL | 0x2A,
308 OPC_SLTU = OPC_SPECIAL | 0x2B,
309
310 OPC_REGIMM = 0x01 << 26,
311 OPC_BLTZ = OPC_REGIMM | (0x00 << 16),
312 OPC_BGEZ = OPC_REGIMM | (0x01 << 16),
313
314 OPC_SPECIAL2 = 0x1c << 26,
315 OPC_MUL = OPC_SPECIAL2 | 0x002,
316
317 OPC_SPECIAL3 = 0x1f << 26,
318 OPC_EXT = OPC_SPECIAL3 | 0x000,
319 OPC_INS = OPC_SPECIAL3 | 0x004,
320 OPC_WSBH = OPC_SPECIAL3 | 0x0a0,
321 OPC_SEB = OPC_SPECIAL3 | 0x420,
322 OPC_SEH = OPC_SPECIAL3 | 0x620,
323 } MIPSInsn;
324
325 /*
326 * Type reg
327 */
328 static inline void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc,
329 TCGReg rd, TCGReg rs, TCGReg rt)
330 {
331 int32_t inst;
332
333 inst = opc;
334 inst |= (rs & 0x1F) << 21;
335 inst |= (rt & 0x1F) << 16;
336 inst |= (rd & 0x1F) << 11;
337 tcg_out32(s, inst);
338 }
339
340 /*
341 * Type immediate
342 */
343 static inline void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc,
344 TCGReg rt, TCGReg rs, TCGArg imm)
345 {
346 int32_t inst;
347
348 inst = opc;
349 inst |= (rs & 0x1F) << 21;
350 inst |= (rt & 0x1F) << 16;
351 inst |= (imm & 0xffff);
352 tcg_out32(s, inst);
353 }
354
355 /*
356 * Type bitfield
357 */
358 static inline void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt,
359 TCGReg rs, int msb, int lsb)
360 {
361 int32_t inst;
362
363 inst = opc;
364 inst |= (rs & 0x1F) << 21;
365 inst |= (rt & 0x1F) << 16;
366 inst |= (msb & 0x1F) << 11;
367 inst |= (lsb & 0x1F) << 6;
368 tcg_out32(s, inst);
369 }
370
371 /*
372 * Type branch
373 */
374 static inline void tcg_out_opc_br(TCGContext *s, MIPSInsn opc,
375 TCGReg rt, TCGReg rs)
376 {
377 /* We pay attention here to not modify the branch target by reading
378 the existing value and using it again. This ensure that caches and
379 memory are kept coherent during retranslation. */
380 uint16_t offset = (uint16_t)*s->code_ptr;
381
382 tcg_out_opc_imm(s, opc, rt, rs, offset);
383 }
384
385 /*
386 * Type sa
387 */
388 static inline void tcg_out_opc_sa(TCGContext *s, MIPSInsn opc,
389 TCGReg rd, TCGReg rt, TCGArg sa)
390 {
391 int32_t inst;
392
393 inst = opc;
394 inst |= (rt & 0x1F) << 16;
395 inst |= (rd & 0x1F) << 11;
396 inst |= (sa & 0x1F) << 6;
397 tcg_out32(s, inst);
398
399 }
400
401 /*
402 * Type jump.
403 * Returns true if the branch was in range and the insn was emitted.
404 */
405 static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, void *target)
406 {
407 uintptr_t dest = (uintptr_t)target;
408 uintptr_t from = (uintptr_t)s->code_ptr + 4;
409 int32_t inst;
410
411 /* The pc-region branch happens within the 256MB region of
412 the delay slot (thus the +4). */
413 if ((from ^ dest) & -(1 << 28)) {
414 return false;
415 }
416 assert((dest & 3) == 0);
417
418 inst = opc;
419 inst |= (dest >> 2) & 0x3ffffff;
420 tcg_out32(s, inst);
421 return true;
422 }
423
424 static inline void tcg_out_nop(TCGContext *s)
425 {
426 tcg_out32(s, 0);
427 }
428
429 static inline void tcg_out_mov(TCGContext *s, TCGType type,
430 TCGReg ret, TCGReg arg)
431 {
432 /* Simple reg-reg move, optimising out the 'do nothing' case */
433 if (ret != arg) {
434 tcg_out_opc_reg(s, OPC_ADDU, ret, arg, TCG_REG_ZERO);
435 }
436 }
437
438 static inline void tcg_out_movi(TCGContext *s, TCGType type,
439 TCGReg reg, tcg_target_long arg)
440 {
441 if (arg == (int16_t)arg) {
442 tcg_out_opc_imm(s, OPC_ADDIU, reg, TCG_REG_ZERO, arg);
443 } else if (arg == (uint16_t)arg) {
444 tcg_out_opc_imm(s, OPC_ORI, reg, TCG_REG_ZERO, arg);
445 } else {
446 tcg_out_opc_imm(s, OPC_LUI, reg, TCG_REG_ZERO, arg >> 16);
447 if (arg & 0xffff) {
448 tcg_out_opc_imm(s, OPC_ORI, reg, reg, arg & 0xffff);
449 }
450 }
451 }
452
453 static inline void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg)
454 {
455 if (use_mips32r2_instructions) {
456 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
457 } else {
458 /* ret and arg can't be register at */
459 if (ret == TCG_TMP0 || arg == TCG_TMP0) {
460 tcg_abort();
461 }
462
463 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
464 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8);
465 tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00);
466 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
467 }
468 }
469
470 static inline void tcg_out_bswap16s(TCGContext *s, TCGReg ret, TCGReg arg)
471 {
472 if (use_mips32r2_instructions) {
473 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
474 tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret);
475 } else {
476 /* ret and arg can't be register at */
477 if (ret == TCG_TMP0 || arg == TCG_TMP0) {
478 tcg_abort();
479 }
480
481 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
482 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
483 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
484 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
485 }
486 }
487
488 static inline void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg)
489 {
490 if (use_mips32r2_instructions) {
491 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
492 tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
493 } else {
494 /* ret and arg must be different and can't be register at */
495 if (ret == arg || ret == TCG_TMP0 || arg == TCG_TMP0) {
496 tcg_abort();
497 }
498
499 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
500
501 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 24);
502 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
503
504 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, arg, 0xff00);
505 tcg_out_opc_sa(s, OPC_SLL, TCG_TMP0, TCG_TMP0, 8);
506 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
507
508 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
509 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, TCG_TMP0, 0xff00);
510 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
511 }
512 }
513
514 static inline void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
515 {
516 if (use_mips32r2_instructions) {
517 tcg_out_opc_reg(s, OPC_SEB, ret, 0, arg);
518 } else {
519 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
520 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 24);
521 }
522 }
523
524 static inline void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
525 {
526 if (use_mips32r2_instructions) {
527 tcg_out_opc_reg(s, OPC_SEH, ret, 0, arg);
528 } else {
529 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 16);
530 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
531 }
532 }
533
534 static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data,
535 TCGReg addr, intptr_t ofs)
536 {
537 int16_t lo = ofs;
538 if (ofs != lo) {
539 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - lo);
540 if (addr != TCG_REG_ZERO) {
541 tcg_out_opc_reg(s, OPC_ADDU, TCG_TMP0, TCG_TMP0, addr);
542 }
543 addr = TCG_TMP0;
544 }
545 tcg_out_opc_imm(s, opc, data, addr, lo);
546 }
547
548 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
549 TCGReg arg1, intptr_t arg2)
550 {
551 tcg_out_ldst(s, OPC_LW, arg, arg1, arg2);
552 }
553
554 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
555 TCGReg arg1, intptr_t arg2)
556 {
557 tcg_out_ldst(s, OPC_SW, arg, arg1, arg2);
558 }
559
560 static inline void tcg_out_addi(TCGContext *s, TCGReg reg, TCGArg val)
561 {
562 if (val == (int16_t)val) {
563 tcg_out_opc_imm(s, OPC_ADDIU, reg, reg, val);
564 } else {
565 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, val);
566 tcg_out_opc_reg(s, OPC_ADDU, reg, reg, TCG_TMP0);
567 }
568 }
569
570 /* Bit 0 set if inversion required; bit 1 set if swapping required. */
571 #define MIPS_CMP_INV 1
572 #define MIPS_CMP_SWAP 2
573
574 static const uint8_t mips_cmp_map[16] = {
575 [TCG_COND_LT] = 0,
576 [TCG_COND_LTU] = 0,
577 [TCG_COND_GE] = MIPS_CMP_INV,
578 [TCG_COND_GEU] = MIPS_CMP_INV,
579 [TCG_COND_LE] = MIPS_CMP_INV | MIPS_CMP_SWAP,
580 [TCG_COND_LEU] = MIPS_CMP_INV | MIPS_CMP_SWAP,
581 [TCG_COND_GT] = MIPS_CMP_SWAP,
582 [TCG_COND_GTU] = MIPS_CMP_SWAP,
583 };
584
585 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
586 TCGReg arg1, TCGReg arg2)
587 {
588 MIPSInsn s_opc = OPC_SLTU;
589 int cmp_map;
590
591 switch (cond) {
592 case TCG_COND_EQ:
593 if (arg2 != 0) {
594 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
595 arg1 = ret;
596 }
597 tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, 1);
598 break;
599
600 case TCG_COND_NE:
601 if (arg2 != 0) {
602 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
603 arg1 = ret;
604 }
605 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, arg1);
606 break;
607
608 case TCG_COND_LT:
609 case TCG_COND_GE:
610 case TCG_COND_LE:
611 case TCG_COND_GT:
612 s_opc = OPC_SLT;
613 /* FALLTHRU */
614
615 case TCG_COND_LTU:
616 case TCG_COND_GEU:
617 case TCG_COND_LEU:
618 case TCG_COND_GTU:
619 cmp_map = mips_cmp_map[cond];
620 if (cmp_map & MIPS_CMP_SWAP) {
621 TCGReg t = arg1;
622 arg1 = arg2;
623 arg2 = t;
624 }
625 tcg_out_opc_reg(s, s_opc, ret, arg1, arg2);
626 if (cmp_map & MIPS_CMP_INV) {
627 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
628 }
629 break;
630
631 default:
632 tcg_abort();
633 break;
634 }
635 }
636
637 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
638 TCGReg arg2, int label_index)
639 {
640 static const MIPSInsn b_zero[16] = {
641 [TCG_COND_LT] = OPC_BLTZ,
642 [TCG_COND_GT] = OPC_BGTZ,
643 [TCG_COND_LE] = OPC_BLEZ,
644 [TCG_COND_GE] = OPC_BGEZ,
645 };
646
647 TCGLabel *l;
648 MIPSInsn s_opc = OPC_SLTU;
649 MIPSInsn b_opc;
650 int cmp_map;
651
652 switch (cond) {
653 case TCG_COND_EQ:
654 b_opc = OPC_BEQ;
655 break;
656 case TCG_COND_NE:
657 b_opc = OPC_BNE;
658 break;
659
660 case TCG_COND_LT:
661 case TCG_COND_GT:
662 case TCG_COND_LE:
663 case TCG_COND_GE:
664 if (arg2 == 0) {
665 b_opc = b_zero[cond];
666 arg2 = arg1;
667 arg1 = 0;
668 break;
669 }
670 s_opc = OPC_SLT;
671 /* FALLTHRU */
672
673 case TCG_COND_LTU:
674 case TCG_COND_GTU:
675 case TCG_COND_LEU:
676 case TCG_COND_GEU:
677 cmp_map = mips_cmp_map[cond];
678 if (cmp_map & MIPS_CMP_SWAP) {
679 TCGReg t = arg1;
680 arg1 = arg2;
681 arg2 = t;
682 }
683 tcg_out_opc_reg(s, s_opc, TCG_TMP0, arg1, arg2);
684 b_opc = (cmp_map & MIPS_CMP_INV ? OPC_BEQ : OPC_BNE);
685 arg1 = TCG_TMP0;
686 arg2 = TCG_REG_ZERO;
687 break;
688
689 default:
690 tcg_abort();
691 break;
692 }
693
694 tcg_out_opc_br(s, b_opc, arg1, arg2);
695 l = &s->labels[label_index];
696 if (l->has_value) {
697 reloc_pc16(s->code_ptr - 1, l->u.value_ptr);
698 } else {
699 tcg_out_reloc(s, s->code_ptr - 1, R_MIPS_PC16, label_index, 0);
700 }
701 tcg_out_nop(s);
702 }
703
704 static TCGReg tcg_out_reduce_eq2(TCGContext *s, TCGReg tmp0, TCGReg tmp1,
705 TCGReg al, TCGReg ah,
706 TCGReg bl, TCGReg bh)
707 {
708 /* Merge highpart comparison into AH. */
709 if (bh != 0) {
710 if (ah != 0) {
711 tcg_out_opc_reg(s, OPC_XOR, tmp0, ah, bh);
712 ah = tmp0;
713 } else {
714 ah = bh;
715 }
716 }
717 /* Merge lowpart comparison into AL. */
718 if (bl != 0) {
719 if (al != 0) {
720 tcg_out_opc_reg(s, OPC_XOR, tmp1, al, bl);
721 al = tmp1;
722 } else {
723 al = bl;
724 }
725 }
726 /* Merge high and low part comparisons into AL. */
727 if (ah != 0) {
728 if (al != 0) {
729 tcg_out_opc_reg(s, OPC_OR, tmp0, ah, al);
730 al = tmp0;
731 } else {
732 al = ah;
733 }
734 }
735 return al;
736 }
737
738 static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
739 TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
740 {
741 TCGReg tmp0 = TCG_TMP0;
742 TCGReg tmp1 = ret;
743
744 assert(ret != TCG_TMP0);
745 if (ret == ah || ret == bh) {
746 assert(ret != TCG_TMP1);
747 tmp1 = TCG_TMP1;
748 }
749
750 switch (cond) {
751 case TCG_COND_EQ:
752 case TCG_COND_NE:
753 tmp1 = tcg_out_reduce_eq2(s, tmp0, tmp1, al, ah, bl, bh);
754 tcg_out_setcond(s, cond, ret, tmp1, TCG_REG_ZERO);
755 break;
756
757 default:
758 tcg_out_setcond(s, TCG_COND_EQ, tmp0, ah, bh);
759 tcg_out_setcond(s, tcg_unsigned_cond(cond), tmp1, al, bl);
760 tcg_out_opc_reg(s, OPC_AND, tmp1, tmp1, tmp0);
761 tcg_out_setcond(s, tcg_high_cond(cond), tmp0, ah, bh);
762 tcg_out_opc_reg(s, OPC_OR, ret, tmp1, tmp0);
763 break;
764 }
765 }
766
767 static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
768 TCGReg bl, TCGReg bh, int label_index)
769 {
770 TCGCond b_cond = TCG_COND_NE;
771 TCGReg tmp = TCG_TMP1;
772
773 /* With branches, we emit between 4 and 9 insns with 2 or 3 branches.
774 With setcond, we emit between 3 and 10 insns and only 1 branch,
775 which ought to get better branch prediction. */
776 switch (cond) {
777 case TCG_COND_EQ:
778 case TCG_COND_NE:
779 b_cond = cond;
780 tmp = tcg_out_reduce_eq2(s, TCG_TMP0, TCG_TMP1, al, ah, bl, bh);
781 break;
782
783 default:
784 /* Minimize code size by preferring a compare not requiring INV. */
785 if (mips_cmp_map[cond] & MIPS_CMP_INV) {
786 cond = tcg_invert_cond(cond);
787 b_cond = TCG_COND_EQ;
788 }
789 tcg_out_setcond2(s, cond, tmp, al, ah, bl, bh);
790 break;
791 }
792
793 tcg_out_brcond(s, b_cond, tmp, TCG_REG_ZERO, label_index);
794 }
795
796 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
797 TCGReg c1, TCGReg c2, TCGReg v)
798 {
799 MIPSInsn m_opc = OPC_MOVN;
800
801 switch (cond) {
802 case TCG_COND_EQ:
803 m_opc = OPC_MOVZ;
804 /* FALLTHRU */
805 case TCG_COND_NE:
806 if (c2 != 0) {
807 tcg_out_opc_reg(s, OPC_XOR, TCG_TMP0, c1, c2);
808 c1 = TCG_TMP0;
809 }
810 break;
811
812 default:
813 /* Minimize code size by preferring a compare not requiring INV. */
814 if (mips_cmp_map[cond] & MIPS_CMP_INV) {
815 cond = tcg_invert_cond(cond);
816 m_opc = OPC_MOVZ;
817 }
818 tcg_out_setcond(s, cond, TCG_TMP0, c1, c2);
819 c1 = TCG_TMP0;
820 break;
821 }
822
823 tcg_out_opc_reg(s, m_opc, ret, v, c1);
824 }
825
826 static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail)
827 {
828 /* Note that the ABI requires the called function's address to be
829 loaded into T9, even if a direct branch is in range. */
830 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg);
831
832 /* But do try a direct branch, allowing the cpu better insn prefetch. */
833 if (tail) {
834 if (!tcg_out_opc_jmp(s, OPC_J, arg)) {
835 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_T9, 0);
836 }
837 } else {
838 if (!tcg_out_opc_jmp(s, OPC_JAL, arg)) {
839 tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0);
840 }
841 }
842 }
843
844 static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg)
845 {
846 tcg_out_call_int(s, arg, false);
847 tcg_out_nop(s);
848 }
849
850 #if defined(CONFIG_SOFTMMU)
851 static void * const qemu_ld_helpers[16] = {
852 [MO_UB] = helper_ret_ldub_mmu,
853 [MO_SB] = helper_ret_ldsb_mmu,
854 [MO_LEUW] = helper_le_lduw_mmu,
855 [MO_LESW] = helper_le_ldsw_mmu,
856 [MO_LEUL] = helper_le_ldul_mmu,
857 [MO_LEQ] = helper_le_ldq_mmu,
858 [MO_BEUW] = helper_be_lduw_mmu,
859 [MO_BESW] = helper_be_ldsw_mmu,
860 [MO_BEUL] = helper_be_ldul_mmu,
861 [MO_BEQ] = helper_be_ldq_mmu,
862 };
863
864 static void * const qemu_st_helpers[16] = {
865 [MO_UB] = helper_ret_stb_mmu,
866 [MO_LEUW] = helper_le_stw_mmu,
867 [MO_LEUL] = helper_le_stl_mmu,
868 [MO_LEQ] = helper_le_stq_mmu,
869 [MO_BEUW] = helper_be_stw_mmu,
870 [MO_BEUL] = helper_be_stl_mmu,
871 [MO_BEQ] = helper_be_stq_mmu,
872 };
873
874 /* Helper routines for marshalling helper function arguments into
875 * the correct registers and stack.
876 * I is where we want to put this argument, and is updated and returned
877 * for the next call. ARG is the argument itself.
878 *
879 * We provide routines for arguments which are: immediate, 32 bit
880 * value in register, 16 and 8 bit values in register (which must be zero
881 * extended before use) and 64 bit value in a lo:hi register pair.
882 */
883
884 static int tcg_out_call_iarg_reg(TCGContext *s, int i, TCGReg arg)
885 {
886 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
887 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[i], arg);
888 } else {
889 tcg_out_st(s, TCG_TYPE_REG, arg, TCG_REG_SP, 4 * i);
890 }
891 return i + 1;
892 }
893
894 static int tcg_out_call_iarg_reg8(TCGContext *s, int i, TCGReg arg)
895 {
896 TCGReg tmp = TCG_TMP0;
897 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
898 tmp = tcg_target_call_iarg_regs[i];
899 }
900 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xff);
901 return tcg_out_call_iarg_reg(s, i, tmp);
902 }
903
904 static int tcg_out_call_iarg_reg16(TCGContext *s, int i, TCGReg arg)
905 {
906 TCGReg tmp = TCG_TMP0;
907 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
908 tmp = tcg_target_call_iarg_regs[i];
909 }
910 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xffff);
911 return tcg_out_call_iarg_reg(s, i, tmp);
912 }
913
914 static int tcg_out_call_iarg_imm(TCGContext *s, int i, TCGArg arg)
915 {
916 TCGReg tmp = TCG_TMP0;
917 if (arg == 0) {
918 tmp = TCG_REG_ZERO;
919 } else {
920 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
921 tmp = tcg_target_call_iarg_regs[i];
922 }
923 tcg_out_movi(s, TCG_TYPE_REG, tmp, arg);
924 }
925 return tcg_out_call_iarg_reg(s, i, tmp);
926 }
927
928 static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah)
929 {
930 i = (i + 1) & ~1;
931 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? ah : al));
932 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? al : ah));
933 return i;
934 }
935
936 /* Perform the tlb comparison operation. The complete host address is
937 placed in BASE. Clobbers AT, T0, A0. */
938 static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
939 TCGReg addrh, int mem_index, TCGMemOp s_bits,
940 tcg_insn_unit *label_ptr[2], bool is_load)
941 {
942 int cmp_off
943 = (is_load
944 ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
945 : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
946 int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
947
948 tcg_out_opc_sa(s, OPC_SRL, TCG_REG_A0, addrl,
949 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
950 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0,
951 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
952 tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0);
953
954 /* Compensate for very large offsets. */
955 if (add_off >= 0x8000) {
956 /* Most target env are smaller than 32k; none are larger than 64k.
957 Simplify the logic here merely to offset by 0x7ff0, giving us a
958 range just shy of 64k. Check this assumption. */
959 QEMU_BUILD_BUG_ON(offsetof(CPUArchState,
960 tlb_table[NB_MMU_MODES - 1][1])
961 > 0x7ff0 + 0x7fff);
962 tcg_out_opc_imm(s, OPC_ADDIU, TCG_REG_A0, TCG_REG_A0, 0x7ff0);
963 cmp_off -= 0x7ff0;
964 add_off -= 0x7ff0;
965 }
966
967 /* Load the tlb comparator. */
968 tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, TCG_REG_A0, cmp_off + LO_OFF);
969 if (TARGET_LONG_BITS == 64) {
970 tcg_out_opc_imm(s, OPC_LW, base, TCG_REG_A0, cmp_off + HI_OFF);
971 }
972
973 /* Mask the page bits, keeping the alignment bits to compare against.
974 In between, load the tlb addend for the fast path. */
975 tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1,
976 TARGET_PAGE_MASK | ((1 << s_bits) - 1));
977 tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off);
978 tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl);
979
980 label_ptr[0] = s->code_ptr;
981 tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
982
983 if (TARGET_LONG_BITS == 64) {
984 /* delay slot */
985 tcg_out_nop(s);
986
987 label_ptr[1] = s->code_ptr;
988 tcg_out_opc_br(s, OPC_BNE, addrh, base);
989 }
990
991 /* delay slot */
992 tcg_out_opc_reg(s, OPC_ADDU, base, TCG_REG_A0, addrl);
993 }
994
995 static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc,
996 TCGReg datalo, TCGReg datahi,
997 TCGReg addrlo, TCGReg addrhi,
998 int mem_index, void *raddr,
999 tcg_insn_unit *label_ptr[2])
1000 {
1001 TCGLabelQemuLdst *label = new_ldst_label(s);
1002
1003 label->is_ld = is_ld;
1004 label->opc = opc;
1005 label->datalo_reg = datalo;
1006 label->datahi_reg = datahi;
1007 label->addrlo_reg = addrlo;
1008 label->addrhi_reg = addrhi;
1009 label->mem_index = mem_index;
1010 label->raddr = raddr;
1011 label->label_ptr[0] = label_ptr[0];
1012 if (TARGET_LONG_BITS == 64) {
1013 label->label_ptr[1] = label_ptr[1];
1014 }
1015 }
1016
1017 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1018 {
1019 TCGMemOp opc = l->opc;
1020 TCGReg v0;
1021 int i;
1022
1023 /* resolve label address */
1024 reloc_pc16(l->label_ptr[0], s->code_ptr);
1025 if (TARGET_LONG_BITS == 64) {
1026 reloc_pc16(l->label_ptr[1], s->code_ptr);
1027 }
1028
1029 i = 1;
1030 if (TARGET_LONG_BITS == 64) {
1031 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1032 } else {
1033 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1034 }
1035 i = tcg_out_call_iarg_imm(s, i, l->mem_index);
1036 i = tcg_out_call_iarg_imm(s, i, (intptr_t)l->raddr);
1037 tcg_out_call_int(s, qemu_ld_helpers[opc], false);
1038 /* delay slot */
1039 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1040
1041 v0 = l->datalo_reg;
1042 if ((opc & MO_SIZE) == MO_64) {
1043 /* We eliminated V0 from the possible output registers, so it
1044 cannot be clobbered here. So we must move V1 first. */
1045 if (MIPS_BE) {
1046 tcg_out_mov(s, TCG_TYPE_I32, v0, TCG_REG_V1);
1047 v0 = l->datahi_reg;
1048 } else {
1049 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_V1);
1050 }
1051 }
1052
1053 reloc_pc16(s->code_ptr, l->raddr);
1054 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO);
1055 /* delay slot */
1056 tcg_out_mov(s, TCG_TYPE_REG, v0, TCG_REG_V0);
1057 }
1058
1059 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1060 {
1061 TCGMemOp opc = l->opc;
1062 TCGMemOp s_bits = opc & MO_SIZE;
1063 int i;
1064
1065 /* resolve label address */
1066 reloc_pc16(l->label_ptr[0], s->code_ptr);
1067 if (TARGET_LONG_BITS == 64) {
1068 reloc_pc16(l->label_ptr[1], s->code_ptr);
1069 }
1070
1071 i = 1;
1072 if (TARGET_LONG_BITS == 64) {
1073 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1074 } else {
1075 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1076 }
1077 switch (s_bits) {
1078 case MO_8:
1079 i = tcg_out_call_iarg_reg8(s, i, l->datalo_reg);
1080 break;
1081 case MO_16:
1082 i = tcg_out_call_iarg_reg16(s, i, l->datalo_reg);
1083 break;
1084 case MO_32:
1085 i = tcg_out_call_iarg_reg(s, i, l->datalo_reg);
1086 break;
1087 case MO_64:
1088 i = tcg_out_call_iarg_reg2(s, i, l->datalo_reg, l->datahi_reg);
1089 break;
1090 default:
1091 tcg_abort();
1092 }
1093 i = tcg_out_call_iarg_imm(s, i, l->mem_index);
1094
1095 /* Tail call to the store helper. Thus force the return address
1096 computation to take place in the return address register. */
1097 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)l->raddr);
1098 i = tcg_out_call_iarg_reg(s, i, TCG_REG_RA);
1099 tcg_out_call_int(s, qemu_st_helpers[opc], true);
1100 /* delay slot */
1101 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1102 }
1103 #endif
1104
1105 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1106 TCGReg base, TCGMemOp opc)
1107 {
1108 switch (opc) {
1109 case MO_UB:
1110 tcg_out_opc_imm(s, OPC_LBU, datalo, base, 0);
1111 break;
1112 case MO_SB:
1113 tcg_out_opc_imm(s, OPC_LB, datalo, base, 0);
1114 break;
1115 case MO_UW | MO_BSWAP:
1116 tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
1117 tcg_out_bswap16(s, datalo, TCG_TMP1);
1118 break;
1119 case MO_UW:
1120 tcg_out_opc_imm(s, OPC_LHU, datalo, base, 0);
1121 break;
1122 case MO_SW | MO_BSWAP:
1123 tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
1124 tcg_out_bswap16s(s, datalo, TCG_TMP1);
1125 break;
1126 case MO_SW:
1127 tcg_out_opc_imm(s, OPC_LH, datalo, base, 0);
1128 break;
1129 case MO_UL | MO_BSWAP:
1130 tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, 0);
1131 tcg_out_bswap32(s, datalo, TCG_TMP1);
1132 break;
1133 case MO_UL:
1134 tcg_out_opc_imm(s, OPC_LW, datalo, base, 0);
1135 break;
1136 case MO_Q | MO_BSWAP:
1137 tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, HI_OFF);
1138 tcg_out_bswap32(s, datalo, TCG_TMP1);
1139 tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, LO_OFF);
1140 tcg_out_bswap32(s, datahi, TCG_TMP1);
1141 break;
1142 case MO_Q:
1143 tcg_out_opc_imm(s, OPC_LW, datalo, base, LO_OFF);
1144 tcg_out_opc_imm(s, OPC_LW, datahi, base, HI_OFF);
1145 break;
1146 default:
1147 tcg_abort();
1148 }
1149 }
1150
1151 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
1152 {
1153 TCGReg addr_regl, addr_regh __attribute__((unused));
1154 TCGReg data_regl, data_regh;
1155 TCGMemOp opc;
1156 #if defined(CONFIG_SOFTMMU)
1157 tcg_insn_unit *label_ptr[2];
1158 int mem_index;
1159 TCGMemOp s_bits;
1160 #endif
1161 /* Note that we've eliminated V0 from the output registers,
1162 so we won't overwrite the base register during loading. */
1163 TCGReg base = TCG_REG_V0;
1164
1165 data_regl = *args++;
1166 data_regh = (is_64 ? *args++ : 0);
1167 addr_regl = *args++;
1168 addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1169 opc = *args++;
1170
1171 #if defined(CONFIG_SOFTMMU)
1172 mem_index = *args;
1173 s_bits = opc & MO_SIZE;
1174
1175 tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index,
1176 s_bits, label_ptr, 1);
1177 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
1178 add_qemu_ldst_label(s, 1, opc, data_regl, data_regh, addr_regl, addr_regh,
1179 mem_index, s->code_ptr, label_ptr);
1180 #else
1181 if (GUEST_BASE == 0 && data_regl != addr_regl) {
1182 base = addr_regl;
1183 } else if (GUEST_BASE == (int16_t)GUEST_BASE) {
1184 tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE);
1185 } else {
1186 tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE);
1187 tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
1188 }
1189 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
1190 #endif
1191 }
1192
1193 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1194 TCGReg base, TCGMemOp opc)
1195 {
1196 switch (opc) {
1197 case MO_8:
1198 tcg_out_opc_imm(s, OPC_SB, datalo, base, 0);
1199 break;
1200
1201 case MO_16 | MO_BSWAP:
1202 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, datalo, 0xffff);
1203 tcg_out_bswap16(s, TCG_TMP1, TCG_TMP1);
1204 datalo = TCG_TMP1;
1205 /* FALLTHRU */
1206 case MO_16:
1207 tcg_out_opc_imm(s, OPC_SH, datalo, base, 0);
1208 break;
1209
1210 case MO_32 | MO_BSWAP:
1211 tcg_out_bswap32(s, TCG_TMP1, datalo);
1212 datalo = TCG_TMP1;
1213 /* FALLTHRU */
1214 case MO_32:
1215 tcg_out_opc_imm(s, OPC_SW, datalo, base, 0);
1216 break;
1217
1218 case MO_64 | MO_BSWAP:
1219 tcg_out_bswap32(s, TCG_TMP1, datalo);
1220 tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, HI_OFF);
1221 tcg_out_bswap32(s, TCG_TMP1, datahi);
1222 tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, LO_OFF);
1223 break;
1224 case MO_64:
1225 tcg_out_opc_imm(s, OPC_SW, datalo, base, LO_OFF);
1226 tcg_out_opc_imm(s, OPC_SW, datahi, base, HI_OFF);
1227 break;
1228
1229 default:
1230 tcg_abort();
1231 }
1232 }
1233
1234 static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
1235 TCGReg ah, TCGArg bl, TCGArg bh, bool cbl,
1236 bool cbh, bool is_sub)
1237 {
1238 TCGReg th = TCG_TMP1;
1239
1240 /* If we have a negative constant such that negating it would
1241 make the high part zero, we can (usually) eliminate one insn. */
1242 if (cbl && cbh && bh == -1 && bl != 0) {
1243 bl = -bl;
1244 bh = 0;
1245 is_sub = !is_sub;
1246 }
1247
1248 /* By operating on the high part first, we get to use the final
1249 carry operation to move back from the temporary. */
1250 if (!cbh) {
1251 tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh);
1252 } else if (bh != 0 || ah == rl) {
1253 tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh));
1254 } else {
1255 th = ah;
1256 }
1257
1258 /* Note that tcg optimization should eliminate the bl == 0 case. */
1259 if (is_sub) {
1260 if (cbl) {
1261 tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl);
1262 tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl);
1263 } else {
1264 tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl);
1265 tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl);
1266 }
1267 tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0);
1268 } else {
1269 if (cbl) {
1270 tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl);
1271 tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl);
1272 } else {
1273 tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
1274 tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl));
1275 }
1276 tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0);
1277 }
1278 }
1279
1280 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
1281 {
1282 TCGReg addr_regl, addr_regh __attribute__((unused));
1283 TCGReg data_regl, data_regh, base;
1284 TCGMemOp opc;
1285 #if defined(CONFIG_SOFTMMU)
1286 tcg_insn_unit *label_ptr[2];
1287 int mem_index;
1288 TCGMemOp s_bits;
1289 #endif
1290
1291 data_regl = *args++;
1292 data_regh = (is_64 ? *args++ : 0);
1293 addr_regl = *args++;
1294 addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1295 opc = *args++;
1296
1297 #if defined(CONFIG_SOFTMMU)
1298 mem_index = *args;
1299 s_bits = opc & 3;
1300
1301 /* Note that we eliminated the helper's address argument,
1302 so we can reuse that for the base. */
1303 base = (TARGET_LONG_BITS == 32 ? TCG_REG_A1 : TCG_REG_A2);
1304 tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index,
1305 s_bits, label_ptr, 1);
1306 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1307 add_qemu_ldst_label(s, 0, opc, data_regl, data_regh, addr_regl, addr_regh,
1308 mem_index, s->code_ptr, label_ptr);
1309 #else
1310 if (GUEST_BASE == 0) {
1311 base = addr_regl;
1312 } else {
1313 base = TCG_REG_A0;
1314 if (GUEST_BASE == (int16_t)GUEST_BASE) {
1315 tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE);
1316 } else {
1317 tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE);
1318 tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
1319 }
1320 }
1321 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1322 #endif
1323 }
1324
1325 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1326 const TCGArg *args, const int *const_args)
1327 {
1328 MIPSInsn i1, i2;
1329 TCGArg a0, a1, a2;
1330 int c2;
1331
1332 a0 = args[0];
1333 a1 = args[1];
1334 a2 = args[2];
1335 c2 = const_args[2];
1336
1337 switch (opc) {
1338 case INDEX_op_exit_tb:
1339 {
1340 TCGReg b0 = TCG_REG_ZERO;
1341
1342 if (a0 & ~0xffff) {
1343 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
1344 b0 = TCG_REG_V0;
1345 }
1346 if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) {
1347 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
1348 (uintptr_t)tb_ret_addr);
1349 tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
1350 }
1351 tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
1352 }
1353 break;
1354 case INDEX_op_goto_tb:
1355 if (s->tb_jmp_offset) {
1356 /* direct jump method */
1357 s->tb_jmp_offset[a0] = tcg_current_code_size(s);
1358 /* Avoid clobbering the address during retranslation. */
1359 tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff));
1360 } else {
1361 /* indirect jump method */
1362 tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
1363 (uintptr_t)(s->tb_next + a0));
1364 tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
1365 }
1366 tcg_out_nop(s);
1367 s->tb_next_offset[a0] = tcg_current_code_size(s);
1368 break;
1369 case INDEX_op_br:
1370 tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO, a0);
1371 break;
1372
1373 case INDEX_op_ld8u_i32:
1374 i1 = OPC_LBU;
1375 goto do_ldst;
1376 case INDEX_op_ld8s_i32:
1377 i1 = OPC_LB;
1378 goto do_ldst;
1379 case INDEX_op_ld16u_i32:
1380 i1 = OPC_LHU;
1381 goto do_ldst;
1382 case INDEX_op_ld16s_i32:
1383 i1 = OPC_LH;
1384 goto do_ldst;
1385 case INDEX_op_ld_i32:
1386 i1 = OPC_LW;
1387 goto do_ldst;
1388 case INDEX_op_st8_i32:
1389 i1 = OPC_SB;
1390 goto do_ldst;
1391 case INDEX_op_st16_i32:
1392 i1 = OPC_SH;
1393 goto do_ldst;
1394 case INDEX_op_st_i32:
1395 i1 = OPC_SW;
1396 do_ldst:
1397 tcg_out_ldst(s, i1, a0, a1, a2);
1398 break;
1399
1400 case INDEX_op_add_i32:
1401 i1 = OPC_ADDU, i2 = OPC_ADDIU;
1402 goto do_binary;
1403 case INDEX_op_or_i32:
1404 i1 = OPC_OR, i2 = OPC_ORI;
1405 goto do_binary;
1406 case INDEX_op_xor_i32:
1407 i1 = OPC_XOR, i2 = OPC_XORI;
1408 do_binary:
1409 if (c2) {
1410 tcg_out_opc_imm(s, i2, a0, a1, a2);
1411 break;
1412 }
1413 do_binaryv:
1414 tcg_out_opc_reg(s, i1, a0, a1, a2);
1415 break;
1416
1417 case INDEX_op_sub_i32:
1418 if (c2) {
1419 tcg_out_opc_imm(s, OPC_ADDIU, a0, a1, -a2);
1420 break;
1421 }
1422 i1 = OPC_SUBU;
1423 goto do_binary;
1424 case INDEX_op_and_i32:
1425 if (c2 && a2 != (uint16_t)a2) {
1426 int msb = ctz32(~a2) - 1;
1427 assert(use_mips32r2_instructions);
1428 assert(is_p2m1(a2));
1429 tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0);
1430 break;
1431 }
1432 i1 = OPC_AND, i2 = OPC_ANDI;
1433 goto do_binary;
1434 case INDEX_op_nor_i32:
1435 i1 = OPC_NOR;
1436 goto do_binaryv;
1437
1438 case INDEX_op_mul_i32:
1439 if (use_mips32_instructions) {
1440 tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
1441 break;
1442 }
1443 i1 = OPC_MULT, i2 = OPC_MFLO;
1444 goto do_hilo1;
1445 case INDEX_op_mulsh_i32:
1446 i1 = OPC_MULT, i2 = OPC_MFHI;
1447 goto do_hilo1;
1448 case INDEX_op_muluh_i32:
1449 i1 = OPC_MULTU, i2 = OPC_MFHI;
1450 goto do_hilo1;
1451 case INDEX_op_div_i32:
1452 i1 = OPC_DIV, i2 = OPC_MFLO;
1453 goto do_hilo1;
1454 case INDEX_op_divu_i32:
1455 i1 = OPC_DIVU, i2 = OPC_MFLO;
1456 goto do_hilo1;
1457 case INDEX_op_rem_i32:
1458 i1 = OPC_DIV, i2 = OPC_MFHI;
1459 goto do_hilo1;
1460 case INDEX_op_remu_i32:
1461 i1 = OPC_DIVU, i2 = OPC_MFHI;
1462 do_hilo1:
1463 tcg_out_opc_reg(s, i1, 0, a1, a2);
1464 tcg_out_opc_reg(s, i2, a0, 0, 0);
1465 break;
1466
1467 case INDEX_op_muls2_i32:
1468 i1 = OPC_MULT;
1469 goto do_hilo2;
1470 case INDEX_op_mulu2_i32:
1471 i1 = OPC_MULTU;
1472 do_hilo2:
1473 tcg_out_opc_reg(s, i1, 0, a2, args[3]);
1474 tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
1475 tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0);
1476 break;
1477
1478 case INDEX_op_not_i32:
1479 i1 = OPC_NOR;
1480 goto do_unary;
1481 case INDEX_op_bswap16_i32:
1482 i1 = OPC_WSBH;
1483 goto do_unary;
1484 case INDEX_op_ext8s_i32:
1485 i1 = OPC_SEB;
1486 goto do_unary;
1487 case INDEX_op_ext16s_i32:
1488 i1 = OPC_SEH;
1489 do_unary:
1490 tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1);
1491 break;
1492
1493 case INDEX_op_sar_i32:
1494 i1 = OPC_SRAV, i2 = OPC_SRA;
1495 goto do_shift;
1496 case INDEX_op_shl_i32:
1497 i1 = OPC_SLLV, i2 = OPC_SLL;
1498 goto do_shift;
1499 case INDEX_op_shr_i32:
1500 i1 = OPC_SRLV, i2 = OPC_SRL;
1501 goto do_shift;
1502 case INDEX_op_rotr_i32:
1503 i1 = OPC_ROTRV, i2 = OPC_ROTR;
1504 do_shift:
1505 if (c2) {
1506 tcg_out_opc_sa(s, i2, a0, a1, a2);
1507 } else {
1508 tcg_out_opc_reg(s, i1, a0, a2, a1);
1509 }
1510 break;
1511 case INDEX_op_rotl_i32:
1512 if (c2) {
1513 tcg_out_opc_sa(s, OPC_ROTR, a0, a1, 32 - a2);
1514 } else {
1515 tcg_out_opc_reg(s, OPC_SUBU, TCG_TMP0, TCG_REG_ZERO, a2);
1516 tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1);
1517 }
1518 break;
1519
1520 case INDEX_op_bswap32_i32:
1521 tcg_out_opc_reg(s, OPC_WSBH, a0, 0, a1);
1522 tcg_out_opc_sa(s, OPC_ROTR, a0, a0, 16);
1523 break;
1524
1525 case INDEX_op_deposit_i32:
1526 tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]);
1527 break;
1528
1529 case INDEX_op_brcond_i32:
1530 tcg_out_brcond(s, a2, a0, a1, args[3]);
1531 break;
1532 case INDEX_op_brcond2_i32:
1533 tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], args[5]);
1534 break;
1535
1536 case INDEX_op_movcond_i32:
1537 tcg_out_movcond(s, args[5], a0, a1, a2, args[3]);
1538 break;
1539
1540 case INDEX_op_setcond_i32:
1541 tcg_out_setcond(s, args[3], a0, a1, a2);
1542 break;
1543 case INDEX_op_setcond2_i32:
1544 tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
1545 break;
1546
1547 case INDEX_op_qemu_ld_i32:
1548 tcg_out_qemu_ld(s, args, false);
1549 break;
1550 case INDEX_op_qemu_ld_i64:
1551 tcg_out_qemu_ld(s, args, true);
1552 break;
1553 case INDEX_op_qemu_st_i32:
1554 tcg_out_qemu_st(s, args, false);
1555 break;
1556 case INDEX_op_qemu_st_i64:
1557 tcg_out_qemu_st(s, args, true);
1558 break;
1559
1560 case INDEX_op_add2_i32:
1561 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1562 const_args[4], const_args[5], false);
1563 break;
1564 case INDEX_op_sub2_i32:
1565 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1566 const_args[4], const_args[5], true);
1567 break;
1568
1569 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1570 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
1571 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1572 default:
1573 tcg_abort();
1574 }
1575 }
1576
1577 static const TCGTargetOpDef mips_op_defs[] = {
1578 { INDEX_op_exit_tb, { } },
1579 { INDEX_op_goto_tb, { } },
1580 { INDEX_op_br, { } },
1581
1582 { INDEX_op_ld8u_i32, { "r", "r" } },
1583 { INDEX_op_ld8s_i32, { "r", "r" } },
1584 { INDEX_op_ld16u_i32, { "r", "r" } },
1585 { INDEX_op_ld16s_i32, { "r", "r" } },
1586 { INDEX_op_ld_i32, { "r", "r" } },
1587 { INDEX_op_st8_i32, { "rZ", "r" } },
1588 { INDEX_op_st16_i32, { "rZ", "r" } },
1589 { INDEX_op_st_i32, { "rZ", "r" } },
1590
1591 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1592 { INDEX_op_mul_i32, { "r", "rZ", "rZ" } },
1593 { INDEX_op_muls2_i32, { "r", "r", "rZ", "rZ" } },
1594 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rZ" } },
1595 { INDEX_op_mulsh_i32, { "r", "rZ", "rZ" } },
1596 { INDEX_op_muluh_i32, { "r", "rZ", "rZ" } },
1597 { INDEX_op_div_i32, { "r", "rZ", "rZ" } },
1598 { INDEX_op_divu_i32, { "r", "rZ", "rZ" } },
1599 { INDEX_op_rem_i32, { "r", "rZ", "rZ" } },
1600 { INDEX_op_remu_i32, { "r", "rZ", "rZ" } },
1601 { INDEX_op_sub_i32, { "r", "rZ", "rN" } },
1602
1603 { INDEX_op_and_i32, { "r", "rZ", "rIK" } },
1604 { INDEX_op_nor_i32, { "r", "rZ", "rZ" } },
1605 { INDEX_op_not_i32, { "r", "rZ" } },
1606 { INDEX_op_or_i32, { "r", "rZ", "rIZ" } },
1607 { INDEX_op_xor_i32, { "r", "rZ", "rIZ" } },
1608
1609 { INDEX_op_shl_i32, { "r", "rZ", "ri" } },
1610 { INDEX_op_shr_i32, { "r", "rZ", "ri" } },
1611 { INDEX_op_sar_i32, { "r", "rZ", "ri" } },
1612 { INDEX_op_rotr_i32, { "r", "rZ", "ri" } },
1613 { INDEX_op_rotl_i32, { "r", "rZ", "ri" } },
1614
1615 { INDEX_op_bswap16_i32, { "r", "r" } },
1616 { INDEX_op_bswap32_i32, { "r", "r" } },
1617
1618 { INDEX_op_ext8s_i32, { "r", "rZ" } },
1619 { INDEX_op_ext16s_i32, { "r", "rZ" } },
1620
1621 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
1622
1623 { INDEX_op_brcond_i32, { "rZ", "rZ" } },
1624 { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "0" } },
1625 { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } },
1626 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rZ", "rZ" } },
1627
1628 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } },
1629 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } },
1630 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rZ", "rZ" } },
1631
1632 #if TARGET_LONG_BITS == 32
1633 { INDEX_op_qemu_ld_i32, { "L", "lZ" } },
1634 { INDEX_op_qemu_st_i32, { "SZ", "SZ" } },
1635 { INDEX_op_qemu_ld_i64, { "L", "L", "lZ" } },
1636 { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ" } },
1637 #else
1638 { INDEX_op_qemu_ld_i32, { "L", "lZ", "lZ" } },
1639 { INDEX_op_qemu_st_i32, { "SZ", "SZ", "SZ" } },
1640 { INDEX_op_qemu_ld_i64, { "L", "L", "lZ", "lZ" } },
1641 { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ", "SZ" } },
1642 #endif
1643 { -1 },
1644 };
1645
1646 static int tcg_target_callee_save_regs[] = {
1647 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
1648 TCG_REG_S1,
1649 TCG_REG_S2,
1650 TCG_REG_S3,
1651 TCG_REG_S4,
1652 TCG_REG_S5,
1653 TCG_REG_S6,
1654 TCG_REG_S7,
1655 TCG_REG_S8,
1656 TCG_REG_RA, /* should be last for ABI compliance */
1657 };
1658
1659 /* The Linux kernel doesn't provide any information about the available
1660 instruction set. Probe it using a signal handler. */
1661
1662 #include <signal.h>
1663
1664 #ifndef use_movnz_instructions
1665 bool use_movnz_instructions = false;
1666 #endif
1667
1668 #ifndef use_mips32_instructions
1669 bool use_mips32_instructions = false;
1670 #endif
1671
1672 #ifndef use_mips32r2_instructions
1673 bool use_mips32r2_instructions = false;
1674 #endif
1675
1676 static volatile sig_atomic_t got_sigill;
1677
1678 static void sigill_handler(int signo, siginfo_t *si, void *data)
1679 {
1680 /* Skip the faulty instruction */
1681 ucontext_t *uc = (ucontext_t *)data;
1682 uc->uc_mcontext.pc += 4;
1683
1684 got_sigill = 1;
1685 }
1686
1687 static void tcg_target_detect_isa(void)
1688 {
1689 struct sigaction sa_old, sa_new;
1690
1691 memset(&sa_new, 0, sizeof(sa_new));
1692 sa_new.sa_flags = SA_SIGINFO;
1693 sa_new.sa_sigaction = sigill_handler;
1694 sigaction(SIGILL, &sa_new, &sa_old);
1695
1696 /* Probe for movn/movz, necessary to implement movcond. */
1697 #ifndef use_movnz_instructions
1698 got_sigill = 0;
1699 asm volatile(".set push\n"
1700 ".set mips32\n"
1701 "movn $zero, $zero, $zero\n"
1702 "movz $zero, $zero, $zero\n"
1703 ".set pop\n"
1704 : : : );
1705 use_movnz_instructions = !got_sigill;
1706 #endif
1707
1708 /* Probe for MIPS32 instructions. As no subsetting is allowed
1709 by the specification, it is only necessary to probe for one
1710 of the instructions. */
1711 #ifndef use_mips32_instructions
1712 got_sigill = 0;
1713 asm volatile(".set push\n"
1714 ".set mips32\n"
1715 "mul $zero, $zero\n"
1716 ".set pop\n"
1717 : : : );
1718 use_mips32_instructions = !got_sigill;
1719 #endif
1720
1721 /* Probe for MIPS32r2 instructions if MIPS32 instructions are
1722 available. As no subsetting is allowed by the specification,
1723 it is only necessary to probe for one of the instructions. */
1724 #ifndef use_mips32r2_instructions
1725 if (use_mips32_instructions) {
1726 got_sigill = 0;
1727 asm volatile(".set push\n"
1728 ".set mips32r2\n"
1729 "seb $zero, $zero\n"
1730 ".set pop\n"
1731 : : : );
1732 use_mips32r2_instructions = !got_sigill;
1733 }
1734 #endif
1735
1736 sigaction(SIGILL, &sa_old, NULL);
1737 }
1738
1739 /* Generate global QEMU prologue and epilogue code */
1740 static void tcg_target_qemu_prologue(TCGContext *s)
1741 {
1742 int i, frame_size;
1743
1744 /* reserve some stack space, also for TCG temps. */
1745 frame_size = ARRAY_SIZE(tcg_target_callee_save_regs) * 4
1746 + TCG_STATIC_CALL_ARGS_SIZE
1747 + CPU_TEMP_BUF_NLONGS * sizeof(long);
1748 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
1749 ~(TCG_TARGET_STACK_ALIGN - 1);
1750 tcg_set_frame(s, TCG_REG_SP, ARRAY_SIZE(tcg_target_callee_save_regs) * 4
1751 + TCG_STATIC_CALL_ARGS_SIZE,
1752 CPU_TEMP_BUF_NLONGS * sizeof(long));
1753
1754 /* TB prologue */
1755 tcg_out_addi(s, TCG_REG_SP, -frame_size);
1756 for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) {
1757 tcg_out_st(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i],
1758 TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4);
1759 }
1760
1761 /* Call generated code */
1762 tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0);
1763 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1764 tb_ret_addr = s->code_ptr;
1765
1766 /* TB epilogue */
1767 for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) {
1768 tcg_out_ld(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i],
1769 TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4);
1770 }
1771
1772 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
1773 tcg_out_addi(s, TCG_REG_SP, frame_size);
1774 }
1775
1776 static void tcg_target_init(TCGContext *s)
1777 {
1778 tcg_target_detect_isa();
1779 tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I32], 0xffffffff);
1780 tcg_regset_set(tcg_target_call_clobber_regs,
1781 (1 << TCG_REG_V0) |
1782 (1 << TCG_REG_V1) |
1783 (1 << TCG_REG_A0) |
1784 (1 << TCG_REG_A1) |
1785 (1 << TCG_REG_A2) |
1786 (1 << TCG_REG_A3) |
1787 (1 << TCG_REG_T0) |
1788 (1 << TCG_REG_T1) |
1789 (1 << TCG_REG_T2) |
1790 (1 << TCG_REG_T3) |
1791 (1 << TCG_REG_T4) |
1792 (1 << TCG_REG_T5) |
1793 (1 << TCG_REG_T6) |
1794 (1 << TCG_REG_T7) |
1795 (1 << TCG_REG_T8) |
1796 (1 << TCG_REG_T9));
1797
1798 tcg_regset_clear(s->reserved_regs);
1799 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); /* zero register */
1800 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K0); /* kernel use only */
1801 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K1); /* kernel use only */
1802 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* internal use */
1803 tcg_regset_set_reg(s->reserved_regs, TCG_TMP1); /* internal use */
1804 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */
1805 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
1806 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */
1807
1808 tcg_add_target_add_op_defs(mips_op_defs);
1809 }
1810
1811 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1812 {
1813 uint32_t *ptr = (uint32_t *)jmp_addr;
1814 *ptr = deposit32(*ptr, 0, 26, addr >> 2);
1815 flush_icache_range(jmp_addr, jmp_addr + 4);
1816 }