linux-user: add SO_PEERCRED support for getsockopt
[qemu.git] / tcg / arm / tcg-target.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #if defined(__ARM_ARCH_7__) || \
26 defined(__ARM_ARCH_7A__) || \
27 defined(__ARM_ARCH_7EM__) || \
28 defined(__ARM_ARCH_7M__) || \
29 defined(__ARM_ARCH_7R__)
30 #define USE_ARMV7_INSTRUCTIONS
31 #endif
32
33 #if defined(USE_ARMV7_INSTRUCTIONS) || \
34 defined(__ARM_ARCH_6J__) || \
35 defined(__ARM_ARCH_6K__) || \
36 defined(__ARM_ARCH_6T2__) || \
37 defined(__ARM_ARCH_6Z__) || \
38 defined(__ARM_ARCH_6ZK__)
39 #define USE_ARMV6_INSTRUCTIONS
40 #endif
41
42 #if defined(USE_ARMV6_INSTRUCTIONS) || \
43 defined(__ARM_ARCH_5T__) || \
44 defined(__ARM_ARCH_5TE__) || \
45 defined(__ARM_ARCH_5TEJ__)
46 #define USE_ARMV5_INSTRUCTIONS
47 #endif
48
49 #ifdef USE_ARMV5_INSTRUCTIONS
50 static const int use_armv5_instructions = 1;
51 #else
52 static const int use_armv5_instructions = 0;
53 #endif
54 #undef USE_ARMV5_INSTRUCTIONS
55
56 #ifdef USE_ARMV6_INSTRUCTIONS
57 static const int use_armv6_instructions = 1;
58 #else
59 static const int use_armv6_instructions = 0;
60 #endif
61 #undef USE_ARMV6_INSTRUCTIONS
62
63 #ifdef USE_ARMV7_INSTRUCTIONS
64 static const int use_armv7_instructions = 1;
65 #else
66 static const int use_armv7_instructions = 0;
67 #endif
68 #undef USE_ARMV7_INSTRUCTIONS
69
70 #ifndef NDEBUG
71 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
72 "%r0",
73 "%r1",
74 "%r2",
75 "%r3",
76 "%r4",
77 "%r5",
78 "%r6",
79 "%r7",
80 "%r8",
81 "%r9",
82 "%r10",
83 "%r11",
84 "%r12",
85 "%r13",
86 "%r14",
87 "%pc",
88 };
89 #endif
90
91 static const int tcg_target_reg_alloc_order[] = {
92 TCG_REG_R4,
93 TCG_REG_R5,
94 TCG_REG_R6,
95 TCG_REG_R7,
96 TCG_REG_R8,
97 TCG_REG_R9,
98 TCG_REG_R10,
99 TCG_REG_R11,
100 TCG_REG_R13,
101 TCG_REG_R0,
102 TCG_REG_R1,
103 TCG_REG_R2,
104 TCG_REG_R3,
105 TCG_REG_R12,
106 TCG_REG_R14,
107 };
108
109 static const int tcg_target_call_iarg_regs[4] = {
110 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
111 };
112 static const int tcg_target_call_oarg_regs[2] = {
113 TCG_REG_R0, TCG_REG_R1
114 };
115
116 static inline void reloc_abs32(void *code_ptr, tcg_target_long target)
117 {
118 *(uint32_t *) code_ptr = target;
119 }
120
121 static inline void reloc_pc24(void *code_ptr, tcg_target_long target)
122 {
123 uint32_t offset = ((target - ((tcg_target_long) code_ptr + 8)) >> 2);
124
125 *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & ~0xffffff)
126 | (offset & 0xffffff);
127 }
128
129 static void patch_reloc(uint8_t *code_ptr, int type,
130 tcg_target_long value, tcg_target_long addend)
131 {
132 switch (type) {
133 case R_ARM_ABS32:
134 reloc_abs32(code_ptr, value);
135 break;
136
137 case R_ARM_CALL:
138 case R_ARM_JUMP24:
139 default:
140 tcg_abort();
141
142 case R_ARM_PC24:
143 reloc_pc24(code_ptr, value);
144 break;
145 }
146 }
147
148 /* maximum number of register used for input function arguments */
149 static inline int tcg_target_get_call_iarg_regs_count(int flags)
150 {
151 return 4;
152 }
153
154 /* parse target specific constraints */
155 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
156 {
157 const char *ct_str;
158
159 ct_str = *pct_str;
160 switch (ct_str[0]) {
161 case 'I':
162 ct->ct |= TCG_CT_CONST_ARM;
163 break;
164
165 case 'r':
166 ct->ct |= TCG_CT_REG;
167 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
168 break;
169
170 /* qemu_ld address */
171 case 'l':
172 ct->ct |= TCG_CT_REG;
173 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
174 #ifdef CONFIG_SOFTMMU
175 /* r0 and r1 will be overwritten when reading the tlb entry,
176 so don't use these. */
177 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
178 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
179 #endif
180 break;
181 case 'L':
182 ct->ct |= TCG_CT_REG;
183 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
184 #ifdef CONFIG_SOFTMMU
185 /* r1 is still needed to load data_reg or data_reg2,
186 so don't use it. */
187 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
188 #endif
189 break;
190
191 /* qemu_st address & data_reg */
192 case 's':
193 ct->ct |= TCG_CT_REG;
194 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
195 /* r0 and r1 will be overwritten when reading the tlb entry
196 (softmmu only) and doing the byte swapping, so don't
197 use these. */
198 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
199 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
200 break;
201 /* qemu_st64 data_reg2 */
202 case 'S':
203 ct->ct |= TCG_CT_REG;
204 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
205 /* r0 and r1 will be overwritten when reading the tlb entry
206 (softmmu only) and doing the byte swapping, so don't
207 use these. */
208 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
209 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
210 #ifdef CONFIG_SOFTMMU
211 /* r2 is still needed to load data_reg, so don't use it. */
212 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
213 #endif
214 break;
215
216 default:
217 return -1;
218 }
219 ct_str++;
220 *pct_str = ct_str;
221
222 return 0;
223 }
224
225 static inline uint32_t rotl(uint32_t val, int n)
226 {
227 return (val << n) | (val >> (32 - n));
228 }
229
230 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
231 right-rotated by an even amount between 0 and 30. */
232 static inline int encode_imm(uint32_t imm)
233 {
234 int shift;
235
236 /* simple case, only lower bits */
237 if ((imm & ~0xff) == 0)
238 return 0;
239 /* then try a simple even shift */
240 shift = ctz32(imm) & ~1;
241 if (((imm >> shift) & ~0xff) == 0)
242 return 32 - shift;
243 /* now try harder with rotations */
244 if ((rotl(imm, 2) & ~0xff) == 0)
245 return 2;
246 if ((rotl(imm, 4) & ~0xff) == 0)
247 return 4;
248 if ((rotl(imm, 6) & ~0xff) == 0)
249 return 6;
250 /* imm can't be encoded */
251 return -1;
252 }
253
254 static inline int check_fit_imm(uint32_t imm)
255 {
256 return encode_imm(imm) >= 0;
257 }
258
259 /* Test if a constant matches the constraint.
260 * TODO: define constraints for:
261 *
262 * ldr/str offset: between -0xfff and 0xfff
263 * ldrh/strh offset: between -0xff and 0xff
264 * mov operand2: values represented with x << (2 * y), x < 0x100
265 * add, sub, eor...: ditto
266 */
267 static inline int tcg_target_const_match(tcg_target_long val,
268 const TCGArgConstraint *arg_ct)
269 {
270 int ct;
271 ct = arg_ct->ct;
272 if (ct & TCG_CT_CONST)
273 return 1;
274 else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val))
275 return 1;
276 else
277 return 0;
278 }
279
280 enum arm_data_opc_e {
281 ARITH_AND = 0x0,
282 ARITH_EOR = 0x1,
283 ARITH_SUB = 0x2,
284 ARITH_RSB = 0x3,
285 ARITH_ADD = 0x4,
286 ARITH_ADC = 0x5,
287 ARITH_SBC = 0x6,
288 ARITH_RSC = 0x7,
289 ARITH_TST = 0x8,
290 ARITH_CMP = 0xa,
291 ARITH_CMN = 0xb,
292 ARITH_ORR = 0xc,
293 ARITH_MOV = 0xd,
294 ARITH_BIC = 0xe,
295 ARITH_MVN = 0xf,
296 };
297
298 #define TO_CPSR(opc) \
299 ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
300
301 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
302 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
303 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
304 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
305 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
306 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
307 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
308 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
309
310 enum arm_cond_code_e {
311 COND_EQ = 0x0,
312 COND_NE = 0x1,
313 COND_CS = 0x2, /* Unsigned greater or equal */
314 COND_CC = 0x3, /* Unsigned less than */
315 COND_MI = 0x4, /* Negative */
316 COND_PL = 0x5, /* Zero or greater */
317 COND_VS = 0x6, /* Overflow */
318 COND_VC = 0x7, /* No overflow */
319 COND_HI = 0x8, /* Unsigned greater than */
320 COND_LS = 0x9, /* Unsigned less or equal */
321 COND_GE = 0xa,
322 COND_LT = 0xb,
323 COND_GT = 0xc,
324 COND_LE = 0xd,
325 COND_AL = 0xe,
326 };
327
328 static const uint8_t tcg_cond_to_arm_cond[10] = {
329 [TCG_COND_EQ] = COND_EQ,
330 [TCG_COND_NE] = COND_NE,
331 [TCG_COND_LT] = COND_LT,
332 [TCG_COND_GE] = COND_GE,
333 [TCG_COND_LE] = COND_LE,
334 [TCG_COND_GT] = COND_GT,
335 /* unsigned */
336 [TCG_COND_LTU] = COND_CC,
337 [TCG_COND_GEU] = COND_CS,
338 [TCG_COND_LEU] = COND_LS,
339 [TCG_COND_GTU] = COND_HI,
340 };
341
342 static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
343 {
344 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
345 }
346
347 static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
348 {
349 tcg_out32(s, (cond << 28) | 0x0a000000 |
350 (((offset - 8) >> 2) & 0x00ffffff));
351 }
352
353 static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
354 {
355 /* We pay attention here to not modify the branch target by skipping
356 the corresponding bytes. This ensure that caches and memory are
357 kept coherent during retranslation. */
358 #ifdef HOST_WORDS_BIGENDIAN
359 tcg_out8(s, (cond << 4) | 0x0a);
360 s->code_ptr += 3;
361 #else
362 s->code_ptr += 3;
363 tcg_out8(s, (cond << 4) | 0x0a);
364 #endif
365 }
366
367 static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
368 {
369 tcg_out32(s, (cond << 28) | 0x0b000000 |
370 (((offset - 8) >> 2) & 0x00ffffff));
371 }
372
373 static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
374 {
375 tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
376 }
377
378 static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
379 {
380 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
381 (((offset - 8) >> 2) & 0x00ffffff));
382 }
383
384 static inline void tcg_out_dat_reg(TCGContext *s,
385 int cond, int opc, int rd, int rn, int rm, int shift)
386 {
387 tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) |
388 (rn << 16) | (rd << 12) | shift | rm);
389 }
390
391 static inline void tcg_out_dat_reg2(TCGContext *s,
392 int cond, int opc0, int opc1, int rd0, int rd1,
393 int rn0, int rn1, int rm0, int rm1, int shift)
394 {
395 if (rd0 == rn1 || rd0 == rm1) {
396 tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
397 (rn0 << 16) | (8 << 12) | shift | rm0);
398 tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
399 (rn1 << 16) | (rd1 << 12) | shift | rm1);
400 tcg_out_dat_reg(s, cond, ARITH_MOV,
401 rd0, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
402 } else {
403 tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
404 (rn0 << 16) | (rd0 << 12) | shift | rm0);
405 tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
406 (rn1 << 16) | (rd1 << 12) | shift | rm1);
407 }
408 }
409
410 static inline void tcg_out_dat_imm(TCGContext *s,
411 int cond, int opc, int rd, int rn, int im)
412 {
413 tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) |
414 (rn << 16) | (rd << 12) | im);
415 }
416
417 static inline void tcg_out_movi32(TCGContext *s,
418 int cond, int rd, uint32_t arg)
419 {
420 /* TODO: This is very suboptimal, we can easily have a constant
421 * pool somewhere after all the instructions. */
422 if ((int)arg < 0 && (int)arg >= -0x100) {
423 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff);
424 } else if (use_armv7_instructions) {
425 /* use movw/movt */
426 /* movw */
427 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
428 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
429 if (arg & 0xffff0000) {
430 /* movt */
431 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
432 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
433 }
434 } else {
435 int opc = ARITH_MOV;
436 int rn = 0;
437
438 do {
439 int i, rot;
440
441 i = ctz32(arg) & ~1;
442 rot = ((32 - i) << 7) & 0xf00;
443 tcg_out_dat_imm(s, cond, opc, rd, rn, ((arg >> i) & 0xff) | rot);
444 arg &= ~(0xff << i);
445
446 opc = ARITH_ORR;
447 rn = rd;
448 } while (arg);
449 }
450 }
451
452 static inline void tcg_out_mul32(TCGContext *s,
453 int cond, int rd, int rs, int rm)
454 {
455 if (rd != rm)
456 tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
457 (rs << 8) | 0x90 | rm);
458 else if (rd != rs)
459 tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
460 (rm << 8) | 0x90 | rs);
461 else {
462 tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) |
463 (rs << 8) | 0x90 | rm);
464 tcg_out_dat_reg(s, cond, ARITH_MOV,
465 rd, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
466 }
467 }
468
469 static inline void tcg_out_umull32(TCGContext *s,
470 int cond, int rd0, int rd1, int rs, int rm)
471 {
472 if (rd0 != rm && rd1 != rm)
473 tcg_out32(s, (cond << 28) | 0x800090 |
474 (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
475 else if (rd0 != rs && rd1 != rs)
476 tcg_out32(s, (cond << 28) | 0x800090 |
477 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
478 else {
479 tcg_out_dat_reg(s, cond, ARITH_MOV,
480 TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
481 tcg_out32(s, (cond << 28) | 0x800098 |
482 (rd1 << 16) | (rd0 << 12) | (rs << 8));
483 }
484 }
485
486 static inline void tcg_out_smull32(TCGContext *s,
487 int cond, int rd0, int rd1, int rs, int rm)
488 {
489 if (rd0 != rm && rd1 != rm)
490 tcg_out32(s, (cond << 28) | 0xc00090 |
491 (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
492 else if (rd0 != rs && rd1 != rs)
493 tcg_out32(s, (cond << 28) | 0xc00090 |
494 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
495 else {
496 tcg_out_dat_reg(s, cond, ARITH_MOV,
497 TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
498 tcg_out32(s, (cond << 28) | 0xc00098 |
499 (rd1 << 16) | (rd0 << 12) | (rs << 8));
500 }
501 }
502
503 static inline void tcg_out_ext8s(TCGContext *s, int cond,
504 int rd, int rn)
505 {
506 if (use_armv6_instructions) {
507 /* sxtb */
508 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
509 } else {
510 tcg_out_dat_reg(s, cond, ARITH_MOV,
511 rd, 0, rn, SHIFT_IMM_LSL(24));
512 tcg_out_dat_reg(s, cond, ARITH_MOV,
513 rd, 0, rd, SHIFT_IMM_ASR(24));
514 }
515 }
516
517 static inline void tcg_out_ext8u(TCGContext *s, int cond,
518 int rd, int rn)
519 {
520 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
521 }
522
523 static inline void tcg_out_ext16s(TCGContext *s, int cond,
524 int rd, int rn)
525 {
526 if (use_armv6_instructions) {
527 /* sxth */
528 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
529 } else {
530 tcg_out_dat_reg(s, cond, ARITH_MOV,
531 rd, 0, rn, SHIFT_IMM_LSL(16));
532 tcg_out_dat_reg(s, cond, ARITH_MOV,
533 rd, 0, rd, SHIFT_IMM_ASR(16));
534 }
535 }
536
537 static inline void tcg_out_ext16u(TCGContext *s, int cond,
538 int rd, int rn)
539 {
540 if (use_armv6_instructions) {
541 /* uxth */
542 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
543 } else {
544 tcg_out_dat_reg(s, cond, ARITH_MOV,
545 rd, 0, rn, SHIFT_IMM_LSL(16));
546 tcg_out_dat_reg(s, cond, ARITH_MOV,
547 rd, 0, rd, SHIFT_IMM_LSR(16));
548 }
549 }
550
551 static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
552 {
553 if (use_armv6_instructions) {
554 /* revsh */
555 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
556 } else {
557 tcg_out_dat_reg(s, cond, ARITH_MOV,
558 TCG_REG_R8, 0, rn, SHIFT_IMM_LSL(24));
559 tcg_out_dat_reg(s, cond, ARITH_MOV,
560 TCG_REG_R8, 0, TCG_REG_R8, SHIFT_IMM_ASR(16));
561 tcg_out_dat_reg(s, cond, ARITH_ORR,
562 rd, TCG_REG_R8, rn, SHIFT_IMM_LSR(8));
563 }
564 }
565
566 static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
567 {
568 if (use_armv6_instructions) {
569 /* rev16 */
570 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
571 } else {
572 tcg_out_dat_reg(s, cond, ARITH_MOV,
573 TCG_REG_R8, 0, rn, SHIFT_IMM_LSL(24));
574 tcg_out_dat_reg(s, cond, ARITH_MOV,
575 TCG_REG_R8, 0, TCG_REG_R8, SHIFT_IMM_LSR(16));
576 tcg_out_dat_reg(s, cond, ARITH_ORR,
577 rd, TCG_REG_R8, rn, SHIFT_IMM_LSR(8));
578 }
579 }
580
581 static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
582 {
583 if (use_armv6_instructions) {
584 /* rev */
585 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
586 } else {
587 tcg_out_dat_reg(s, cond, ARITH_EOR,
588 TCG_REG_R8, rn, rn, SHIFT_IMM_ROR(16));
589 tcg_out_dat_imm(s, cond, ARITH_BIC,
590 TCG_REG_R8, TCG_REG_R8, 0xff | 0x800);
591 tcg_out_dat_reg(s, cond, ARITH_MOV,
592 rd, 0, rn, SHIFT_IMM_ROR(8));
593 tcg_out_dat_reg(s, cond, ARITH_EOR,
594 rd, rd, TCG_REG_R8, SHIFT_IMM_LSR(8));
595 }
596 }
597
598 static inline void tcg_out_ld32_12(TCGContext *s, int cond,
599 int rd, int rn, tcg_target_long im)
600 {
601 if (im >= 0)
602 tcg_out32(s, (cond << 28) | 0x05900000 |
603 (rn << 16) | (rd << 12) | (im & 0xfff));
604 else
605 tcg_out32(s, (cond << 28) | 0x05100000 |
606 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
607 }
608
609 static inline void tcg_out_st32_12(TCGContext *s, int cond,
610 int rd, int rn, tcg_target_long im)
611 {
612 if (im >= 0)
613 tcg_out32(s, (cond << 28) | 0x05800000 |
614 (rn << 16) | (rd << 12) | (im & 0xfff));
615 else
616 tcg_out32(s, (cond << 28) | 0x05000000 |
617 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
618 }
619
620 static inline void tcg_out_ld32_r(TCGContext *s, int cond,
621 int rd, int rn, int rm)
622 {
623 tcg_out32(s, (cond << 28) | 0x07900000 |
624 (rn << 16) | (rd << 12) | rm);
625 }
626
627 static inline void tcg_out_st32_r(TCGContext *s, int cond,
628 int rd, int rn, int rm)
629 {
630 tcg_out32(s, (cond << 28) | 0x07800000 |
631 (rn << 16) | (rd << 12) | rm);
632 }
633
634 /* Register pre-increment with base writeback. */
635 static inline void tcg_out_ld32_rwb(TCGContext *s, int cond,
636 int rd, int rn, int rm)
637 {
638 tcg_out32(s, (cond << 28) | 0x07b00000 |
639 (rn << 16) | (rd << 12) | rm);
640 }
641
642 static inline void tcg_out_st32_rwb(TCGContext *s, int cond,
643 int rd, int rn, int rm)
644 {
645 tcg_out32(s, (cond << 28) | 0x07a00000 |
646 (rn << 16) | (rd << 12) | rm);
647 }
648
649 static inline void tcg_out_ld16u_8(TCGContext *s, int cond,
650 int rd, int rn, tcg_target_long im)
651 {
652 if (im >= 0)
653 tcg_out32(s, (cond << 28) | 0x01d000b0 |
654 (rn << 16) | (rd << 12) |
655 ((im & 0xf0) << 4) | (im & 0xf));
656 else
657 tcg_out32(s, (cond << 28) | 0x015000b0 |
658 (rn << 16) | (rd << 12) |
659 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
660 }
661
662 static inline void tcg_out_st16_8(TCGContext *s, int cond,
663 int rd, int rn, tcg_target_long im)
664 {
665 if (im >= 0)
666 tcg_out32(s, (cond << 28) | 0x01c000b0 |
667 (rn << 16) | (rd << 12) |
668 ((im & 0xf0) << 4) | (im & 0xf));
669 else
670 tcg_out32(s, (cond << 28) | 0x014000b0 |
671 (rn << 16) | (rd << 12) |
672 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
673 }
674
675 static inline void tcg_out_ld16u_r(TCGContext *s, int cond,
676 int rd, int rn, int rm)
677 {
678 tcg_out32(s, (cond << 28) | 0x019000b0 |
679 (rn << 16) | (rd << 12) | rm);
680 }
681
682 static inline void tcg_out_st16_r(TCGContext *s, int cond,
683 int rd, int rn, int rm)
684 {
685 tcg_out32(s, (cond << 28) | 0x018000b0 |
686 (rn << 16) | (rd << 12) | rm);
687 }
688
689 static inline void tcg_out_ld16s_8(TCGContext *s, int cond,
690 int rd, int rn, tcg_target_long im)
691 {
692 if (im >= 0)
693 tcg_out32(s, (cond << 28) | 0x01d000f0 |
694 (rn << 16) | (rd << 12) |
695 ((im & 0xf0) << 4) | (im & 0xf));
696 else
697 tcg_out32(s, (cond << 28) | 0x015000f0 |
698 (rn << 16) | (rd << 12) |
699 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
700 }
701
702 static inline void tcg_out_ld16s_r(TCGContext *s, int cond,
703 int rd, int rn, int rm)
704 {
705 tcg_out32(s, (cond << 28) | 0x019000f0 |
706 (rn << 16) | (rd << 12) | rm);
707 }
708
709 static inline void tcg_out_ld8_12(TCGContext *s, int cond,
710 int rd, int rn, tcg_target_long im)
711 {
712 if (im >= 0)
713 tcg_out32(s, (cond << 28) | 0x05d00000 |
714 (rn << 16) | (rd << 12) | (im & 0xfff));
715 else
716 tcg_out32(s, (cond << 28) | 0x05500000 |
717 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
718 }
719
720 static inline void tcg_out_st8_12(TCGContext *s, int cond,
721 int rd, int rn, tcg_target_long im)
722 {
723 if (im >= 0)
724 tcg_out32(s, (cond << 28) | 0x05c00000 |
725 (rn << 16) | (rd << 12) | (im & 0xfff));
726 else
727 tcg_out32(s, (cond << 28) | 0x05400000 |
728 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
729 }
730
731 static inline void tcg_out_ld8_r(TCGContext *s, int cond,
732 int rd, int rn, int rm)
733 {
734 tcg_out32(s, (cond << 28) | 0x07d00000 |
735 (rn << 16) | (rd << 12) | rm);
736 }
737
738 static inline void tcg_out_st8_r(TCGContext *s, int cond,
739 int rd, int rn, int rm)
740 {
741 tcg_out32(s, (cond << 28) | 0x07c00000 |
742 (rn << 16) | (rd << 12) | rm);
743 }
744
745 static inline void tcg_out_ld8s_8(TCGContext *s, int cond,
746 int rd, int rn, tcg_target_long im)
747 {
748 if (im >= 0)
749 tcg_out32(s, (cond << 28) | 0x01d000d0 |
750 (rn << 16) | (rd << 12) |
751 ((im & 0xf0) << 4) | (im & 0xf));
752 else
753 tcg_out32(s, (cond << 28) | 0x015000d0 |
754 (rn << 16) | (rd << 12) |
755 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
756 }
757
758 static inline void tcg_out_ld8s_r(TCGContext *s, int cond,
759 int rd, int rn, int rm)
760 {
761 tcg_out32(s, (cond << 28) | 0x019000d0 |
762 (rn << 16) | (rd << 12) | rm);
763 }
764
765 static inline void tcg_out_ld32u(TCGContext *s, int cond,
766 int rd, int rn, int32_t offset)
767 {
768 if (offset > 0xfff || offset < -0xfff) {
769 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
770 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8);
771 } else
772 tcg_out_ld32_12(s, cond, rd, rn, offset);
773 }
774
775 static inline void tcg_out_st32(TCGContext *s, int cond,
776 int rd, int rn, int32_t offset)
777 {
778 if (offset > 0xfff || offset < -0xfff) {
779 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
780 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8);
781 } else
782 tcg_out_st32_12(s, cond, rd, rn, offset);
783 }
784
785 static inline void tcg_out_ld16u(TCGContext *s, int cond,
786 int rd, int rn, int32_t offset)
787 {
788 if (offset > 0xff || offset < -0xff) {
789 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
790 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8);
791 } else
792 tcg_out_ld16u_8(s, cond, rd, rn, offset);
793 }
794
795 static inline void tcg_out_ld16s(TCGContext *s, int cond,
796 int rd, int rn, int32_t offset)
797 {
798 if (offset > 0xff || offset < -0xff) {
799 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
800 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8);
801 } else
802 tcg_out_ld16s_8(s, cond, rd, rn, offset);
803 }
804
805 static inline void tcg_out_st16(TCGContext *s, int cond,
806 int rd, int rn, int32_t offset)
807 {
808 if (offset > 0xff || offset < -0xff) {
809 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
810 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_R8);
811 } else
812 tcg_out_st16_8(s, cond, rd, rn, offset);
813 }
814
815 static inline void tcg_out_ld8u(TCGContext *s, int cond,
816 int rd, int rn, int32_t offset)
817 {
818 if (offset > 0xfff || offset < -0xfff) {
819 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
820 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8);
821 } else
822 tcg_out_ld8_12(s, cond, rd, rn, offset);
823 }
824
825 static inline void tcg_out_ld8s(TCGContext *s, int cond,
826 int rd, int rn, int32_t offset)
827 {
828 if (offset > 0xff || offset < -0xff) {
829 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
830 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8);
831 } else
832 tcg_out_ld8s_8(s, cond, rd, rn, offset);
833 }
834
835 static inline void tcg_out_st8(TCGContext *s, int cond,
836 int rd, int rn, int32_t offset)
837 {
838 if (offset > 0xfff || offset < -0xfff) {
839 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
840 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8);
841 } else
842 tcg_out_st8_12(s, cond, rd, rn, offset);
843 }
844
845 /* The _goto case is normally between TBs within the same code buffer,
846 * and with the code buffer limited to 16MB we shouldn't need the long
847 * case.
848 *
849 * .... except to the prologue that is in its own buffer.
850 */
851 static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
852 {
853 int32_t val;
854
855 if (addr & 1) {
856 /* goto to a Thumb destination isn't supported */
857 tcg_abort();
858 }
859
860 val = addr - (tcg_target_long) s->code_ptr;
861 if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
862 tcg_out_b(s, cond, val);
863 else {
864 if (cond == COND_AL) {
865 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
866 tcg_out32(s, addr);
867 } else {
868 tcg_out_movi32(s, cond, TCG_REG_R8, val - 8);
869 tcg_out_dat_reg(s, cond, ARITH_ADD,
870 TCG_REG_PC, TCG_REG_PC,
871 TCG_REG_R8, SHIFT_IMM_LSL(0));
872 }
873 }
874 }
875
876 /* The call case is mostly used for helpers - so it's not unreasonable
877 * for them to be beyond branch range */
878 static inline void tcg_out_call(TCGContext *s, uint32_t addr)
879 {
880 int32_t val;
881
882 val = addr - (tcg_target_long) s->code_ptr;
883 if (val - 8 < 0x02000000 && val - 8 >= -0x02000000) {
884 if (addr & 1) {
885 /* Use BLX if the target is in Thumb mode */
886 if (!use_armv5_instructions) {
887 tcg_abort();
888 }
889 tcg_out_blx_imm(s, val);
890 } else {
891 tcg_out_bl(s, COND_AL, val);
892 }
893 } else {
894 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
895 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
896 tcg_out32(s, addr);
897 }
898 }
899
900 static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
901 {
902 if (use_armv5_instructions) {
903 tcg_out_blx(s, cond, arg);
904 } else {
905 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
906 TCG_REG_PC, SHIFT_IMM_LSL(0));
907 tcg_out_bx(s, cond, arg);
908 }
909 }
910
911 static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
912 {
913 TCGLabel *l = &s->labels[label_index];
914
915 if (l->has_value)
916 tcg_out_goto(s, cond, l->u.value);
917 else if (cond == COND_AL) {
918 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
919 tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
920 s->code_ptr += 4;
921 } else {
922 /* Probably this should be preferred even for COND_AL... */
923 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
924 tcg_out_b_noaddr(s, cond);
925 }
926 }
927
928 #ifdef CONFIG_SOFTMMU
929
930 #include "../../softmmu_defs.h"
931
932 static void *qemu_ld_helpers[4] = {
933 __ldb_mmu,
934 __ldw_mmu,
935 __ldl_mmu,
936 __ldq_mmu,
937 };
938
939 static void *qemu_st_helpers[4] = {
940 __stb_mmu,
941 __stw_mmu,
942 __stl_mmu,
943 __stq_mmu,
944 };
945 #endif
946
947 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
948
949 static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
950 {
951 int addr_reg, data_reg, data_reg2, bswap;
952 #ifdef CONFIG_SOFTMMU
953 int mem_index, s_bits;
954 # if TARGET_LONG_BITS == 64
955 int addr_reg2;
956 # endif
957 uint32_t *label_ptr;
958 #endif
959
960 #ifdef TARGET_WORDS_BIGENDIAN
961 bswap = 1;
962 #else
963 bswap = 0;
964 #endif
965 data_reg = *args++;
966 if (opc == 3)
967 data_reg2 = *args++;
968 else
969 data_reg2 = 0; /* suppress warning */
970 addr_reg = *args++;
971 #ifdef CONFIG_SOFTMMU
972 # if TARGET_LONG_BITS == 64
973 addr_reg2 = *args++;
974 # endif
975 mem_index = *args;
976 s_bits = opc & 3;
977
978 /* Should generate something like the following:
979 * shr r8, addr_reg, #TARGET_PAGE_BITS
980 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
981 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
982 */
983 # if CPU_TLB_BITS > 8
984 # error
985 # endif
986 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_R8,
987 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
988 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
989 TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
990 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0,
991 TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
992 /* In the
993 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
994 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
995 * not exceed otherwise, so use an
996 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
997 * before.
998 */
999 if (mem_index)
1000 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
1001 (mem_index << (TLB_SHIFT & 1)) |
1002 ((16 - (TLB_SHIFT >> 1)) << 8));
1003 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
1004 offsetof(CPUState, tlb_table[0][0].addr_read));
1005 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
1006 TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1007 /* Check alignment. */
1008 if (s_bits)
1009 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1010 0, addr_reg, (1 << s_bits) - 1);
1011 # if TARGET_LONG_BITS == 64
1012 /* XXX: possibly we could use a block data load or writeback in
1013 * the first access. */
1014 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1015 offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
1016 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1017 TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
1018 # endif
1019 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1020 offsetof(CPUState, tlb_table[0][0].addend));
1021
1022 switch (opc) {
1023 case 0:
1024 tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1025 break;
1026 case 0 | 4:
1027 tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1028 break;
1029 case 1:
1030 tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1031 if (bswap) {
1032 tcg_out_bswap16(s, COND_EQ, data_reg, data_reg);
1033 }
1034 break;
1035 case 1 | 4:
1036 if (bswap) {
1037 tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1038 tcg_out_bswap16s(s, COND_EQ, data_reg, data_reg);
1039 } else {
1040 tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1041 }
1042 break;
1043 case 2:
1044 default:
1045 tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1046 if (bswap) {
1047 tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1048 }
1049 break;
1050 case 3:
1051 if (bswap) {
1052 tcg_out_ld32_rwb(s, COND_EQ, data_reg2, TCG_REG_R1, addr_reg);
1053 tcg_out_ld32_12(s, COND_EQ, data_reg, TCG_REG_R1, 4);
1054 tcg_out_bswap32(s, COND_EQ, data_reg2, data_reg2);
1055 tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1056 } else {
1057 tcg_out_ld32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1058 tcg_out_ld32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1059 }
1060 break;
1061 }
1062
1063 label_ptr = (void *) s->code_ptr;
1064 tcg_out_b_noaddr(s, COND_EQ);
1065
1066 /* TODO: move this code to where the constants pool will be */
1067 if (addr_reg != TCG_REG_R0) {
1068 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1069 TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
1070 }
1071 # if TARGET_LONG_BITS == 32
1072 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R1, 0, mem_index);
1073 # else
1074 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1075 TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1076 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1077 # endif
1078 tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[s_bits]);
1079
1080 switch (opc) {
1081 case 0 | 4:
1082 tcg_out_ext8s(s, COND_AL, data_reg, TCG_REG_R0);
1083 break;
1084 case 1 | 4:
1085 tcg_out_ext16s(s, COND_AL, data_reg, TCG_REG_R0);
1086 break;
1087 case 0:
1088 case 1:
1089 case 2:
1090 default:
1091 if (data_reg != TCG_REG_R0) {
1092 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1093 data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
1094 }
1095 break;
1096 case 3:
1097 if (data_reg != TCG_REG_R0) {
1098 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1099 data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
1100 }
1101 if (data_reg2 != TCG_REG_R1) {
1102 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1103 data_reg2, 0, TCG_REG_R1, SHIFT_IMM_LSL(0));
1104 }
1105 break;
1106 }
1107
1108 reloc_pc24(label_ptr, (tcg_target_long)s->code_ptr);
1109 #else /* !CONFIG_SOFTMMU */
1110 if (GUEST_BASE) {
1111 uint32_t offset = GUEST_BASE;
1112 int i;
1113 int rot;
1114
1115 while (offset) {
1116 i = ctz32(offset) & ~1;
1117 rot = ((32 - i) << 7) & 0xf00;
1118
1119 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R8, addr_reg,
1120 ((offset >> i) & 0xff) | rot);
1121 addr_reg = TCG_REG_R8;
1122 offset &= ~(0xff << i);
1123 }
1124 }
1125 switch (opc) {
1126 case 0:
1127 tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
1128 break;
1129 case 0 | 4:
1130 tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
1131 break;
1132 case 1:
1133 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1134 if (bswap) {
1135 tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
1136 }
1137 break;
1138 case 1 | 4:
1139 if (bswap) {
1140 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1141 tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
1142 } else {
1143 tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1144 }
1145 break;
1146 case 2:
1147 default:
1148 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1149 if (bswap) {
1150 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1151 }
1152 break;
1153 case 3:
1154 /* TODO: use block load -
1155 * check that data_reg2 > data_reg or the other way */
1156 if (data_reg == addr_reg) {
1157 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1158 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1159 } else {
1160 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1161 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1162 }
1163 if (bswap) {
1164 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1165 tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
1166 }
1167 break;
1168 }
1169 #endif
1170 }
1171
1172 static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1173 {
1174 int addr_reg, data_reg, data_reg2, bswap;
1175 #ifdef CONFIG_SOFTMMU
1176 int mem_index, s_bits;
1177 # if TARGET_LONG_BITS == 64
1178 int addr_reg2;
1179 # endif
1180 uint32_t *label_ptr;
1181 #endif
1182
1183 #ifdef TARGET_WORDS_BIGENDIAN
1184 bswap = 1;
1185 #else
1186 bswap = 0;
1187 #endif
1188 data_reg = *args++;
1189 if (opc == 3)
1190 data_reg2 = *args++;
1191 else
1192 data_reg2 = 0; /* suppress warning */
1193 addr_reg = *args++;
1194 #ifdef CONFIG_SOFTMMU
1195 # if TARGET_LONG_BITS == 64
1196 addr_reg2 = *args++;
1197 # endif
1198 mem_index = *args;
1199 s_bits = opc & 3;
1200
1201 /* Should generate something like the following:
1202 * shr r8, addr_reg, #TARGET_PAGE_BITS
1203 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
1204 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1205 */
1206 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1207 TCG_REG_R8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1208 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
1209 TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
1210 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0,
1211 TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
1212 /* In the
1213 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1214 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1215 * not exceed otherwise, so use an
1216 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1217 * before.
1218 */
1219 if (mem_index)
1220 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
1221 (mem_index << (TLB_SHIFT & 1)) |
1222 ((16 - (TLB_SHIFT >> 1)) << 8));
1223 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
1224 offsetof(CPUState, tlb_table[0][0].addr_write));
1225 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
1226 TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1227 /* Check alignment. */
1228 if (s_bits)
1229 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1230 0, addr_reg, (1 << s_bits) - 1);
1231 # if TARGET_LONG_BITS == 64
1232 /* XXX: possibly we could use a block data load or writeback in
1233 * the first access. */
1234 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1235 offsetof(CPUState, tlb_table[0][0].addr_write) + 4);
1236 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1237 TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
1238 # endif
1239 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1240 offsetof(CPUState, tlb_table[0][0].addend));
1241
1242 switch (opc) {
1243 case 0:
1244 tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1245 break;
1246 case 1:
1247 if (bswap) {
1248 tcg_out_bswap16(s, COND_EQ, TCG_REG_R0, data_reg);
1249 tcg_out_st16_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1250 } else {
1251 tcg_out_st16_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1252 }
1253 break;
1254 case 2:
1255 default:
1256 if (bswap) {
1257 tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
1258 tcg_out_st32_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1259 } else {
1260 tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1261 }
1262 break;
1263 case 3:
1264 if (bswap) {
1265 tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg2);
1266 tcg_out_st32_rwb(s, COND_EQ, TCG_REG_R0, TCG_REG_R1, addr_reg);
1267 tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
1268 tcg_out_st32_12(s, COND_EQ, TCG_REG_R0, TCG_REG_R1, 4);
1269 } else {
1270 tcg_out_st32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1271 tcg_out_st32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1272 }
1273 break;
1274 }
1275
1276 label_ptr = (void *) s->code_ptr;
1277 tcg_out_b_noaddr(s, COND_EQ);
1278
1279 /* TODO: move this code to where the constants pool will be */
1280 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1281 TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
1282 # if TARGET_LONG_BITS == 32
1283 switch (opc) {
1284 case 0:
1285 tcg_out_ext8u(s, COND_AL, TCG_REG_R1, data_reg);
1286 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1287 break;
1288 case 1:
1289 tcg_out_ext16u(s, COND_AL, TCG_REG_R1, data_reg);
1290 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1291 break;
1292 case 2:
1293 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1294 TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0));
1295 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1296 break;
1297 case 3:
1298 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R8, 0, mem_index);
1299 tcg_out32(s, (COND_AL << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1300 if (data_reg != TCG_REG_R2) {
1301 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1302 TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1303 }
1304 if (data_reg2 != TCG_REG_R3) {
1305 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1306 TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0));
1307 }
1308 break;
1309 }
1310 # else
1311 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1312 TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1313 switch (opc) {
1314 case 0:
1315 tcg_out_ext8u(s, COND_AL, TCG_REG_R2, data_reg);
1316 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1317 break;
1318 case 1:
1319 tcg_out_ext16u(s, COND_AL, TCG_REG_R2, data_reg);
1320 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1321 break;
1322 case 2:
1323 if (data_reg != TCG_REG_R2) {
1324 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1325 TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1326 }
1327 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1328 break;
1329 case 3:
1330 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R8, 0, mem_index);
1331 tcg_out32(s, (COND_AL << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1332 if (data_reg != TCG_REG_R2) {
1333 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1334 TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1335 }
1336 if (data_reg2 != TCG_REG_R3) {
1337 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1338 TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0));
1339 }
1340 break;
1341 }
1342 # endif
1343
1344 tcg_out_call(s, (tcg_target_long) qemu_st_helpers[s_bits]);
1345 if (opc == 3)
1346 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10);
1347
1348 reloc_pc24(label_ptr, (tcg_target_long)s->code_ptr);
1349 #else /* !CONFIG_SOFTMMU */
1350 if (GUEST_BASE) {
1351 uint32_t offset = GUEST_BASE;
1352 int i;
1353 int rot;
1354
1355 while (offset) {
1356 i = ctz32(offset) & ~1;
1357 rot = ((32 - i) << 7) & 0xf00;
1358
1359 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R1, addr_reg,
1360 ((offset >> i) & 0xff) | rot);
1361 addr_reg = TCG_REG_R1;
1362 offset &= ~(0xff << i);
1363 }
1364 }
1365 switch (opc) {
1366 case 0:
1367 tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1368 break;
1369 case 1:
1370 if (bswap) {
1371 tcg_out_bswap16(s, COND_AL, TCG_REG_R0, data_reg);
1372 tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1373 } else {
1374 tcg_out_st16_8(s, COND_AL, data_reg, addr_reg, 0);
1375 }
1376 break;
1377 case 2:
1378 default:
1379 if (bswap) {
1380 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1381 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1382 } else {
1383 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1384 }
1385 break;
1386 case 3:
1387 /* TODO: use block store -
1388 * check that data_reg2 > data_reg or the other way */
1389 if (bswap) {
1390 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
1391 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1392 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1393 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 4);
1394 } else {
1395 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1396 tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1397 }
1398 break;
1399 }
1400 #endif
1401 }
1402
1403 static uint8_t *tb_ret_addr;
1404
1405 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1406 const TCGArg *args, const int *const_args)
1407 {
1408 int c;
1409
1410 switch (opc) {
1411 case INDEX_op_exit_tb:
1412 {
1413 uint8_t *ld_ptr = s->code_ptr;
1414 if (args[0] >> 8)
1415 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1416 else
1417 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
1418 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1419 if (args[0] >> 8) {
1420 *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1421 tcg_out32(s, args[0]);
1422 }
1423 }
1424 break;
1425 case INDEX_op_goto_tb:
1426 if (s->tb_jmp_offset) {
1427 /* Direct jump method */
1428 #if defined(USE_DIRECT_JUMP)
1429 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1430 tcg_out_b_noaddr(s, COND_AL);
1431 #else
1432 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
1433 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1434 tcg_out32(s, 0);
1435 #endif
1436 } else {
1437 /* Indirect jump method */
1438 #if 1
1439 c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1440 if (c > 0xfff || c < -0xfff) {
1441 tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1442 (tcg_target_long) (s->tb_next + args[0]));
1443 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1444 } else
1445 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
1446 #else
1447 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1448 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1449 tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1450 #endif
1451 }
1452 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1453 break;
1454 case INDEX_op_call:
1455 if (const_args[0])
1456 tcg_out_call(s, args[0]);
1457 else
1458 tcg_out_callr(s, COND_AL, args[0]);
1459 break;
1460 case INDEX_op_jmp:
1461 if (const_args[0])
1462 tcg_out_goto(s, COND_AL, args[0]);
1463 else
1464 tcg_out_bx(s, COND_AL, args[0]);
1465 break;
1466 case INDEX_op_br:
1467 tcg_out_goto_label(s, COND_AL, args[0]);
1468 break;
1469
1470 case INDEX_op_ld8u_i32:
1471 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1472 break;
1473 case INDEX_op_ld8s_i32:
1474 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1475 break;
1476 case INDEX_op_ld16u_i32:
1477 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1478 break;
1479 case INDEX_op_ld16s_i32:
1480 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1481 break;
1482 case INDEX_op_ld_i32:
1483 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1484 break;
1485 case INDEX_op_st8_i32:
1486 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1487 break;
1488 case INDEX_op_st16_i32:
1489 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1490 break;
1491 case INDEX_op_st_i32:
1492 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1493 break;
1494
1495 case INDEX_op_mov_i32:
1496 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1497 args[0], 0, args[1], SHIFT_IMM_LSL(0));
1498 break;
1499 case INDEX_op_movi_i32:
1500 tcg_out_movi32(s, COND_AL, args[0], args[1]);
1501 break;
1502 case INDEX_op_add_i32:
1503 c = ARITH_ADD;
1504 goto gen_arith;
1505 case INDEX_op_sub_i32:
1506 c = ARITH_SUB;
1507 goto gen_arith;
1508 case INDEX_op_and_i32:
1509 c = ARITH_AND;
1510 goto gen_arith;
1511 case INDEX_op_andc_i32:
1512 c = ARITH_BIC;
1513 goto gen_arith;
1514 case INDEX_op_or_i32:
1515 c = ARITH_ORR;
1516 goto gen_arith;
1517 case INDEX_op_xor_i32:
1518 c = ARITH_EOR;
1519 /* Fall through. */
1520 gen_arith:
1521 if (const_args[2]) {
1522 int rot;
1523 rot = encode_imm(args[2]);
1524 tcg_out_dat_imm(s, COND_AL, c,
1525 args[0], args[1], rotl(args[2], rot) | (rot << 7));
1526 } else
1527 tcg_out_dat_reg(s, COND_AL, c,
1528 args[0], args[1], args[2], SHIFT_IMM_LSL(0));
1529 break;
1530 case INDEX_op_add2_i32:
1531 tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC,
1532 args[0], args[1], args[2], args[3],
1533 args[4], args[5], SHIFT_IMM_LSL(0));
1534 break;
1535 case INDEX_op_sub2_i32:
1536 tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC,
1537 args[0], args[1], args[2], args[3],
1538 args[4], args[5], SHIFT_IMM_LSL(0));
1539 break;
1540 case INDEX_op_neg_i32:
1541 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1542 break;
1543 case INDEX_op_not_i32:
1544 tcg_out_dat_reg(s, COND_AL,
1545 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1546 break;
1547 case INDEX_op_mul_i32:
1548 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1549 break;
1550 case INDEX_op_mulu2_i32:
1551 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1552 break;
1553 /* XXX: Perhaps args[2] & 0x1f is wrong */
1554 case INDEX_op_shl_i32:
1555 c = const_args[2] ?
1556 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1557 goto gen_shift32;
1558 case INDEX_op_shr_i32:
1559 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1560 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1561 goto gen_shift32;
1562 case INDEX_op_sar_i32:
1563 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1564 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1565 goto gen_shift32;
1566 case INDEX_op_rotr_i32:
1567 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1568 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
1569 /* Fall through. */
1570 gen_shift32:
1571 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1572 break;
1573
1574 case INDEX_op_rotl_i32:
1575 if (const_args[2]) {
1576 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1577 ((0x20 - args[2]) & 0x1f) ?
1578 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1579 SHIFT_IMM_LSL(0));
1580 } else {
1581 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_R8, args[1], 0x20);
1582 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1583 SHIFT_REG_ROR(TCG_REG_R8));
1584 }
1585 break;
1586
1587 case INDEX_op_brcond_i32:
1588 if (const_args[1]) {
1589 int rot;
1590 rot = encode_imm(args[1]);
1591 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1592 args[0], rotl(args[1], rot) | (rot << 7));
1593 } else {
1594 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1595 args[0], args[1], SHIFT_IMM_LSL(0));
1596 }
1597 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1598 break;
1599 case INDEX_op_brcond2_i32:
1600 /* The resulting conditions are:
1601 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1602 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1603 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1604 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1605 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1606 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1607 */
1608 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1609 args[1], args[3], SHIFT_IMM_LSL(0));
1610 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1611 args[0], args[2], SHIFT_IMM_LSL(0));
1612 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1613 break;
1614 case INDEX_op_setcond_i32:
1615 if (const_args[2]) {
1616 int rot;
1617 rot = encode_imm(args[2]);
1618 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1619 args[1], rotl(args[2], rot) | (rot << 7));
1620 } else {
1621 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1622 args[1], args[2], SHIFT_IMM_LSL(0));
1623 }
1624 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1625 ARITH_MOV, args[0], 0, 1);
1626 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1627 ARITH_MOV, args[0], 0, 0);
1628 break;
1629 case INDEX_op_setcond2_i32:
1630 /* See brcond2_i32 comment */
1631 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1632 args[2], args[4], SHIFT_IMM_LSL(0));
1633 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1634 args[1], args[3], SHIFT_IMM_LSL(0));
1635 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
1636 ARITH_MOV, args[0], 0, 1);
1637 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
1638 ARITH_MOV, args[0], 0, 0);
1639 break;
1640
1641 case INDEX_op_qemu_ld8u:
1642 tcg_out_qemu_ld(s, args, 0);
1643 break;
1644 case INDEX_op_qemu_ld8s:
1645 tcg_out_qemu_ld(s, args, 0 | 4);
1646 break;
1647 case INDEX_op_qemu_ld16u:
1648 tcg_out_qemu_ld(s, args, 1);
1649 break;
1650 case INDEX_op_qemu_ld16s:
1651 tcg_out_qemu_ld(s, args, 1 | 4);
1652 break;
1653 case INDEX_op_qemu_ld32:
1654 tcg_out_qemu_ld(s, args, 2);
1655 break;
1656 case INDEX_op_qemu_ld64:
1657 tcg_out_qemu_ld(s, args, 3);
1658 break;
1659
1660 case INDEX_op_qemu_st8:
1661 tcg_out_qemu_st(s, args, 0);
1662 break;
1663 case INDEX_op_qemu_st16:
1664 tcg_out_qemu_st(s, args, 1);
1665 break;
1666 case INDEX_op_qemu_st32:
1667 tcg_out_qemu_st(s, args, 2);
1668 break;
1669 case INDEX_op_qemu_st64:
1670 tcg_out_qemu_st(s, args, 3);
1671 break;
1672
1673 case INDEX_op_bswap16_i32:
1674 tcg_out_bswap16(s, COND_AL, args[0], args[1]);
1675 break;
1676 case INDEX_op_bswap32_i32:
1677 tcg_out_bswap32(s, COND_AL, args[0], args[1]);
1678 break;
1679
1680 case INDEX_op_ext8s_i32:
1681 tcg_out_ext8s(s, COND_AL, args[0], args[1]);
1682 break;
1683 case INDEX_op_ext16s_i32:
1684 tcg_out_ext16s(s, COND_AL, args[0], args[1]);
1685 break;
1686 case INDEX_op_ext16u_i32:
1687 tcg_out_ext16u(s, COND_AL, args[0], args[1]);
1688 break;
1689
1690 default:
1691 tcg_abort();
1692 }
1693 }
1694
1695 static const TCGTargetOpDef arm_op_defs[] = {
1696 { INDEX_op_exit_tb, { } },
1697 { INDEX_op_goto_tb, { } },
1698 { INDEX_op_call, { "ri" } },
1699 { INDEX_op_jmp, { "ri" } },
1700 { INDEX_op_br, { } },
1701
1702 { INDEX_op_mov_i32, { "r", "r" } },
1703 { INDEX_op_movi_i32, { "r" } },
1704
1705 { INDEX_op_ld8u_i32, { "r", "r" } },
1706 { INDEX_op_ld8s_i32, { "r", "r" } },
1707 { INDEX_op_ld16u_i32, { "r", "r" } },
1708 { INDEX_op_ld16s_i32, { "r", "r" } },
1709 { INDEX_op_ld_i32, { "r", "r" } },
1710 { INDEX_op_st8_i32, { "r", "r" } },
1711 { INDEX_op_st16_i32, { "r", "r" } },
1712 { INDEX_op_st_i32, { "r", "r" } },
1713
1714 /* TODO: "r", "r", "ri" */
1715 { INDEX_op_add_i32, { "r", "r", "rI" } },
1716 { INDEX_op_sub_i32, { "r", "r", "rI" } },
1717 { INDEX_op_mul_i32, { "r", "r", "r" } },
1718 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1719 { INDEX_op_and_i32, { "r", "r", "rI" } },
1720 { INDEX_op_andc_i32, { "r", "r", "rI" } },
1721 { INDEX_op_or_i32, { "r", "r", "rI" } },
1722 { INDEX_op_xor_i32, { "r", "r", "rI" } },
1723 { INDEX_op_neg_i32, { "r", "r" } },
1724 { INDEX_op_not_i32, { "r", "r" } },
1725
1726 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1727 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1728 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1729 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1730 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1731
1732 { INDEX_op_brcond_i32, { "r", "rI" } },
1733 { INDEX_op_setcond_i32, { "r", "r", "rI" } },
1734
1735 /* TODO: "r", "r", "r", "r", "ri", "ri" */
1736 { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
1737 { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
1738 { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
1739 { INDEX_op_setcond2_i32, { "r", "r", "r", "r", "r" } },
1740
1741 #if TARGET_LONG_BITS == 32
1742 { INDEX_op_qemu_ld8u, { "r", "l" } },
1743 { INDEX_op_qemu_ld8s, { "r", "l" } },
1744 { INDEX_op_qemu_ld16u, { "r", "l" } },
1745 { INDEX_op_qemu_ld16s, { "r", "l" } },
1746 { INDEX_op_qemu_ld32, { "r", "l" } },
1747 { INDEX_op_qemu_ld64, { "L", "L", "l" } },
1748
1749 { INDEX_op_qemu_st8, { "s", "s" } },
1750 { INDEX_op_qemu_st16, { "s", "s" } },
1751 { INDEX_op_qemu_st32, { "s", "s" } },
1752 { INDEX_op_qemu_st64, { "S", "S", "s" } },
1753 #else
1754 { INDEX_op_qemu_ld8u, { "r", "l", "l" } },
1755 { INDEX_op_qemu_ld8s, { "r", "l", "l" } },
1756 { INDEX_op_qemu_ld16u, { "r", "l", "l" } },
1757 { INDEX_op_qemu_ld16s, { "r", "l", "l" } },
1758 { INDEX_op_qemu_ld32, { "r", "l", "l" } },
1759 { INDEX_op_qemu_ld64, { "L", "L", "l", "l" } },
1760
1761 { INDEX_op_qemu_st8, { "s", "s", "s" } },
1762 { INDEX_op_qemu_st16, { "s", "s", "s" } },
1763 { INDEX_op_qemu_st32, { "s", "s", "s" } },
1764 { INDEX_op_qemu_st64, { "S", "S", "s", "s" } },
1765 #endif
1766
1767 { INDEX_op_bswap16_i32, { "r", "r" } },
1768 { INDEX_op_bswap32_i32, { "r", "r" } },
1769
1770 { INDEX_op_ext8s_i32, { "r", "r" } },
1771 { INDEX_op_ext16s_i32, { "r", "r" } },
1772 { INDEX_op_ext16u_i32, { "r", "r" } },
1773
1774 { -1 },
1775 };
1776
1777 static void tcg_target_init(TCGContext *s)
1778 {
1779 #if !defined(CONFIG_USER_ONLY)
1780 /* fail safe */
1781 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1782 tcg_abort();
1783 #endif
1784
1785 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
1786 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1787 (1 << TCG_REG_R0) |
1788 (1 << TCG_REG_R1) |
1789 (1 << TCG_REG_R2) |
1790 (1 << TCG_REG_R3) |
1791 (1 << TCG_REG_R12) |
1792 (1 << TCG_REG_R14));
1793
1794 tcg_regset_clear(s->reserved_regs);
1795 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1796 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8);
1797 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
1798
1799 tcg_add_target_add_op_defs(arm_op_defs);
1800 tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf),
1801 CPU_TEMP_BUF_NLONGS * sizeof(long));
1802 }
1803
1804 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
1805 TCGReg arg1, tcg_target_long arg2)
1806 {
1807 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
1808 }
1809
1810 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1811 TCGReg arg1, tcg_target_long arg2)
1812 {
1813 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
1814 }
1815
1816 static inline void tcg_out_mov(TCGContext *s, TCGType type,
1817 TCGReg ret, TCGReg arg)
1818 {
1819 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
1820 }
1821
1822 static inline void tcg_out_movi(TCGContext *s, TCGType type,
1823 TCGReg ret, tcg_target_long arg)
1824 {
1825 tcg_out_movi32(s, COND_AL, ret, arg);
1826 }
1827
1828 static void tcg_target_qemu_prologue(TCGContext *s)
1829 {
1830 /* Calling convention requires us to save r4-r11 and lr;
1831 * save also r12 to maintain stack 8-alignment.
1832 */
1833
1834 /* stmdb sp!, { r4 - r12, lr } */
1835 tcg_out32(s, (COND_AL << 28) | 0x092d5ff0);
1836
1837 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1838
1839 tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
1840 tb_ret_addr = s->code_ptr;
1841
1842 /* ldmia sp!, { r4 - r12, pc } */
1843 tcg_out32(s, (COND_AL << 28) | 0x08bd9ff0);
1844 }