linux-user, scripts: add a script to update syscall.tbl
[qemu.git] / target / xtensa / translate.c
1 /*
2 * Xtensa ISA:
3 * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
4 *
5 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of the Open Source and Linux Lab nor the
16 * names of its contributors may be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include "qemu/osdep.h"
32
33 #include "cpu.h"
34 #include "exec/exec-all.h"
35 #include "disas/disas.h"
36 #include "tcg/tcg-op.h"
37 #include "qemu/log.h"
38 #include "qemu/qemu-print.h"
39 #include "exec/cpu_ldst.h"
40 #include "hw/semihosting/semihost.h"
41 #include "exec/translator.h"
42
43 #include "exec/helper-proto.h"
44 #include "exec/helper-gen.h"
45
46 #include "trace-tcg.h"
47 #include "exec/log.h"
48
49
50 struct DisasContext {
51 DisasContextBase base;
52 const XtensaConfig *config;
53 uint32_t pc;
54 int cring;
55 int ring;
56 uint32_t lbeg_off;
57 uint32_t lend;
58
59 bool sar_5bit;
60 bool sar_m32_5bit;
61 bool sar_m32_allocated;
62 TCGv_i32 sar_m32;
63
64 unsigned window;
65 unsigned callinc;
66 bool cwoe;
67
68 bool debug;
69 bool icount;
70 TCGv_i32 next_icount;
71
72 unsigned cpenable;
73
74 uint32_t op_flags;
75 xtensa_insnbuf insnbuf;
76 xtensa_insnbuf slotbuf;
77 };
78
79 static TCGv_i32 cpu_pc;
80 static TCGv_i32 cpu_R[16];
81 static TCGv_i32 cpu_FR[16];
82 static TCGv_i32 cpu_MR[4];
83 static TCGv_i32 cpu_BR[16];
84 static TCGv_i32 cpu_BR4[4];
85 static TCGv_i32 cpu_BR8[2];
86 static TCGv_i32 cpu_SR[256];
87 static TCGv_i32 cpu_UR[256];
88 static TCGv_i32 cpu_windowbase_next;
89 static TCGv_i32 cpu_exclusive_addr;
90 static TCGv_i32 cpu_exclusive_val;
91
92 static GHashTable *xtensa_regfile_table;
93
94 #include "exec/gen-icount.h"
95
96 static char *sr_name[256];
97 static char *ur_name[256];
98
99 void xtensa_collect_sr_names(const XtensaConfig *config)
100 {
101 xtensa_isa isa = config->isa;
102 int n = xtensa_isa_num_sysregs(isa);
103 int i;
104
105 for (i = 0; i < n; ++i) {
106 int sr = xtensa_sysreg_number(isa, i);
107
108 if (sr >= 0 && sr < 256) {
109 const char *name = xtensa_sysreg_name(isa, i);
110 char **pname =
111 (xtensa_sysreg_is_user(isa, i) ? ur_name : sr_name) + sr;
112
113 if (*pname) {
114 if (strstr(*pname, name) == NULL) {
115 char *new_name =
116 malloc(strlen(*pname) + strlen(name) + 2);
117
118 strcpy(new_name, *pname);
119 strcat(new_name, "/");
120 strcat(new_name, name);
121 free(*pname);
122 *pname = new_name;
123 }
124 } else {
125 *pname = strdup(name);
126 }
127 }
128 }
129 }
130
131 void xtensa_translate_init(void)
132 {
133 static const char * const regnames[] = {
134 "ar0", "ar1", "ar2", "ar3",
135 "ar4", "ar5", "ar6", "ar7",
136 "ar8", "ar9", "ar10", "ar11",
137 "ar12", "ar13", "ar14", "ar15",
138 };
139 static const char * const fregnames[] = {
140 "f0", "f1", "f2", "f3",
141 "f4", "f5", "f6", "f7",
142 "f8", "f9", "f10", "f11",
143 "f12", "f13", "f14", "f15",
144 };
145 static const char * const mregnames[] = {
146 "m0", "m1", "m2", "m3",
147 };
148 static const char * const bregnames[] = {
149 "b0", "b1", "b2", "b3",
150 "b4", "b5", "b6", "b7",
151 "b8", "b9", "b10", "b11",
152 "b12", "b13", "b14", "b15",
153 };
154 int i;
155
156 cpu_pc = tcg_global_mem_new_i32(cpu_env,
157 offsetof(CPUXtensaState, pc), "pc");
158
159 for (i = 0; i < 16; i++) {
160 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
161 offsetof(CPUXtensaState, regs[i]),
162 regnames[i]);
163 }
164
165 for (i = 0; i < 16; i++) {
166 cpu_FR[i] = tcg_global_mem_new_i32(cpu_env,
167 offsetof(CPUXtensaState,
168 fregs[i].f32[FP_F32_LOW]),
169 fregnames[i]);
170 }
171
172 for (i = 0; i < 4; i++) {
173 cpu_MR[i] = tcg_global_mem_new_i32(cpu_env,
174 offsetof(CPUXtensaState,
175 sregs[MR + i]),
176 mregnames[i]);
177 }
178
179 for (i = 0; i < 16; i++) {
180 cpu_BR[i] = tcg_global_mem_new_i32(cpu_env,
181 offsetof(CPUXtensaState,
182 sregs[BR]),
183 bregnames[i]);
184 if (i % 4 == 0) {
185 cpu_BR4[i / 4] = tcg_global_mem_new_i32(cpu_env,
186 offsetof(CPUXtensaState,
187 sregs[BR]),
188 bregnames[i]);
189 }
190 if (i % 8 == 0) {
191 cpu_BR8[i / 8] = tcg_global_mem_new_i32(cpu_env,
192 offsetof(CPUXtensaState,
193 sregs[BR]),
194 bregnames[i]);
195 }
196 }
197
198 for (i = 0; i < 256; ++i) {
199 if (sr_name[i]) {
200 cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
201 offsetof(CPUXtensaState,
202 sregs[i]),
203 sr_name[i]);
204 }
205 }
206
207 for (i = 0; i < 256; ++i) {
208 if (ur_name[i]) {
209 cpu_UR[i] = tcg_global_mem_new_i32(cpu_env,
210 offsetof(CPUXtensaState,
211 uregs[i]),
212 ur_name[i]);
213 }
214 }
215
216 cpu_windowbase_next =
217 tcg_global_mem_new_i32(cpu_env,
218 offsetof(CPUXtensaState, windowbase_next),
219 "windowbase_next");
220 cpu_exclusive_addr =
221 tcg_global_mem_new_i32(cpu_env,
222 offsetof(CPUXtensaState, exclusive_addr),
223 "exclusive_addr");
224 cpu_exclusive_val =
225 tcg_global_mem_new_i32(cpu_env,
226 offsetof(CPUXtensaState, exclusive_val),
227 "exclusive_val");
228 }
229
230 void **xtensa_get_regfile_by_name(const char *name)
231 {
232 if (xtensa_regfile_table == NULL) {
233 xtensa_regfile_table = g_hash_table_new(g_str_hash, g_str_equal);
234 g_hash_table_insert(xtensa_regfile_table,
235 (void *)"AR", (void *)cpu_R);
236 g_hash_table_insert(xtensa_regfile_table,
237 (void *)"MR", (void *)cpu_MR);
238 g_hash_table_insert(xtensa_regfile_table,
239 (void *)"FR", (void *)cpu_FR);
240 g_hash_table_insert(xtensa_regfile_table,
241 (void *)"BR", (void *)cpu_BR);
242 g_hash_table_insert(xtensa_regfile_table,
243 (void *)"BR4", (void *)cpu_BR4);
244 g_hash_table_insert(xtensa_regfile_table,
245 (void *)"BR8", (void *)cpu_BR8);
246 }
247 return (void **)g_hash_table_lookup(xtensa_regfile_table, (void *)name);
248 }
249
250 static inline bool option_enabled(DisasContext *dc, int opt)
251 {
252 return xtensa_option_enabled(dc->config, opt);
253 }
254
255 static void init_sar_tracker(DisasContext *dc)
256 {
257 dc->sar_5bit = false;
258 dc->sar_m32_5bit = false;
259 dc->sar_m32_allocated = false;
260 }
261
262 static void reset_sar_tracker(DisasContext *dc)
263 {
264 if (dc->sar_m32_allocated) {
265 tcg_temp_free(dc->sar_m32);
266 }
267 }
268
269 static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa)
270 {
271 tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
272 if (dc->sar_m32_5bit) {
273 tcg_gen_discard_i32(dc->sar_m32);
274 }
275 dc->sar_5bit = true;
276 dc->sar_m32_5bit = false;
277 }
278
279 static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa)
280 {
281 TCGv_i32 tmp = tcg_const_i32(32);
282 if (!dc->sar_m32_allocated) {
283 dc->sar_m32 = tcg_temp_local_new_i32();
284 dc->sar_m32_allocated = true;
285 }
286 tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
287 tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32);
288 dc->sar_5bit = false;
289 dc->sar_m32_5bit = true;
290 tcg_temp_free(tmp);
291 }
292
293 static void gen_exception(DisasContext *dc, int excp)
294 {
295 TCGv_i32 tmp = tcg_const_i32(excp);
296 gen_helper_exception(cpu_env, tmp);
297 tcg_temp_free(tmp);
298 }
299
300 static void gen_exception_cause(DisasContext *dc, uint32_t cause)
301 {
302 TCGv_i32 tpc = tcg_const_i32(dc->pc);
303 TCGv_i32 tcause = tcg_const_i32(cause);
304 gen_helper_exception_cause(cpu_env, tpc, tcause);
305 tcg_temp_free(tpc);
306 tcg_temp_free(tcause);
307 if (cause == ILLEGAL_INSTRUCTION_CAUSE ||
308 cause == SYSCALL_CAUSE) {
309 dc->base.is_jmp = DISAS_NORETURN;
310 }
311 }
312
313 static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause,
314 TCGv_i32 vaddr)
315 {
316 TCGv_i32 tpc = tcg_const_i32(dc->pc);
317 TCGv_i32 tcause = tcg_const_i32(cause);
318 gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr);
319 tcg_temp_free(tpc);
320 tcg_temp_free(tcause);
321 }
322
323 static void gen_debug_exception(DisasContext *dc, uint32_t cause)
324 {
325 TCGv_i32 tpc = tcg_const_i32(dc->pc);
326 TCGv_i32 tcause = tcg_const_i32(cause);
327 gen_helper_debug_exception(cpu_env, tpc, tcause);
328 tcg_temp_free(tpc);
329 tcg_temp_free(tcause);
330 if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) {
331 dc->base.is_jmp = DISAS_NORETURN;
332 }
333 }
334
335 static bool gen_check_privilege(DisasContext *dc)
336 {
337 #ifndef CONFIG_USER_ONLY
338 if (!dc->cring) {
339 return true;
340 }
341 #endif
342 gen_exception_cause(dc, PRIVILEGED_CAUSE);
343 dc->base.is_jmp = DISAS_NORETURN;
344 return false;
345 }
346
347 static bool gen_check_cpenable(DisasContext *dc, uint32_t cp_mask)
348 {
349 cp_mask &= ~dc->cpenable;
350
351 if (option_enabled(dc, XTENSA_OPTION_COPROCESSOR) && cp_mask) {
352 gen_exception_cause(dc, COPROCESSOR0_DISABLED + ctz32(cp_mask));
353 dc->base.is_jmp = DISAS_NORETURN;
354 return false;
355 }
356 return true;
357 }
358
359 static int gen_postprocess(DisasContext *dc, int slot);
360
361 static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
362 {
363 tcg_gen_mov_i32(cpu_pc, dest);
364 if (dc->icount) {
365 tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
366 }
367 if (dc->base.singlestep_enabled) {
368 gen_exception(dc, EXCP_DEBUG);
369 } else {
370 if (dc->op_flags & XTENSA_OP_POSTPROCESS) {
371 slot = gen_postprocess(dc, slot);
372 }
373 if (slot >= 0) {
374 tcg_gen_goto_tb(slot);
375 tcg_gen_exit_tb(dc->base.tb, slot);
376 } else {
377 tcg_gen_exit_tb(NULL, 0);
378 }
379 }
380 dc->base.is_jmp = DISAS_NORETURN;
381 }
382
383 static void gen_jump(DisasContext *dc, TCGv dest)
384 {
385 gen_jump_slot(dc, dest, -1);
386 }
387
388 static int adjust_jump_slot(DisasContext *dc, uint32_t dest, int slot)
389 {
390 if (((dc->base.pc_first ^ dest) & TARGET_PAGE_MASK) != 0) {
391 return -1;
392 } else {
393 return slot;
394 }
395 }
396
397 static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
398 {
399 TCGv_i32 tmp = tcg_const_i32(dest);
400 gen_jump_slot(dc, tmp, adjust_jump_slot(dc, dest, slot));
401 tcg_temp_free(tmp);
402 }
403
404 static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest,
405 int slot)
406 {
407 TCGv_i32 tcallinc = tcg_const_i32(callinc);
408
409 tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS],
410 tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN);
411 tcg_temp_free(tcallinc);
412 tcg_gen_movi_i32(cpu_R[callinc << 2],
413 (callinc << 30) | (dc->base.pc_next & 0x3fffffff));
414 gen_jump_slot(dc, dest, slot);
415 }
416
417 static bool gen_check_loop_end(DisasContext *dc, int slot)
418 {
419 if (dc->base.pc_next == dc->lend) {
420 TCGLabel *label = gen_new_label();
421
422 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label);
423 tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1);
424 if (dc->lbeg_off) {
425 gen_jumpi(dc, dc->base.pc_next - dc->lbeg_off, slot);
426 } else {
427 gen_jump(dc, cpu_SR[LBEG]);
428 }
429 gen_set_label(label);
430 gen_jumpi(dc, dc->base.pc_next, -1);
431 return true;
432 }
433 return false;
434 }
435
436 static void gen_jumpi_check_loop_end(DisasContext *dc, int slot)
437 {
438 if (!gen_check_loop_end(dc, slot)) {
439 gen_jumpi(dc, dc->base.pc_next, slot);
440 }
441 }
442
443 static void gen_brcond(DisasContext *dc, TCGCond cond,
444 TCGv_i32 t0, TCGv_i32 t1, uint32_t addr)
445 {
446 TCGLabel *label = gen_new_label();
447
448 tcg_gen_brcond_i32(cond, t0, t1, label);
449 gen_jumpi_check_loop_end(dc, 0);
450 gen_set_label(label);
451 gen_jumpi(dc, addr, 1);
452 }
453
454 static void gen_brcondi(DisasContext *dc, TCGCond cond,
455 TCGv_i32 t0, uint32_t t1, uint32_t addr)
456 {
457 TCGv_i32 tmp = tcg_const_i32(t1);
458 gen_brcond(dc, cond, t0, tmp, addr);
459 tcg_temp_free(tmp);
460 }
461
462 static bool test_ill_sr(DisasContext *dc, const OpcodeArg arg[],
463 const uint32_t par[])
464 {
465 return !xtensa_option_enabled(dc->config, par[1]);
466 }
467
468 static bool test_ill_ccompare(DisasContext *dc, const OpcodeArg arg[],
469 const uint32_t par[])
470 {
471 unsigned n = par[0] - CCOMPARE;
472
473 return test_ill_sr(dc, arg, par) || n >= dc->config->nccompare;
474 }
475
476 static bool test_ill_dbreak(DisasContext *dc, const OpcodeArg arg[],
477 const uint32_t par[])
478 {
479 unsigned n = MAX_NDBREAK;
480
481 if (par[0] >= DBREAKA && par[0] < DBREAKA + MAX_NDBREAK) {
482 n = par[0] - DBREAKA;
483 }
484 if (par[0] >= DBREAKC && par[0] < DBREAKC + MAX_NDBREAK) {
485 n = par[0] - DBREAKC;
486 }
487 return test_ill_sr(dc, arg, par) || n >= dc->config->ndbreak;
488 }
489
490 static bool test_ill_ibreak(DisasContext *dc, const OpcodeArg arg[],
491 const uint32_t par[])
492 {
493 unsigned n = par[0] - IBREAKA;
494
495 return test_ill_sr(dc, arg, par) || n >= dc->config->nibreak;
496 }
497
498 static bool test_ill_hpi(DisasContext *dc, const OpcodeArg arg[],
499 const uint32_t par[])
500 {
501 unsigned n = MAX_NLEVEL + 1;
502
503 if (par[0] >= EXCSAVE1 && par[0] < EXCSAVE1 + MAX_NLEVEL) {
504 n = par[0] - EXCSAVE1 + 1;
505 }
506 if (par[0] >= EPC1 && par[0] < EPC1 + MAX_NLEVEL) {
507 n = par[0] - EPC1 + 1;
508 }
509 if (par[0] >= EPS2 && par[0] < EPS2 + MAX_NLEVEL - 1) {
510 n = par[0] - EPS2 + 2;
511 }
512 return test_ill_sr(dc, arg, par) || n > dc->config->nlevel;
513 }
514
515 static void gen_load_store_alignment(DisasContext *dc, int shift,
516 TCGv_i32 addr, bool no_hw_alignment)
517 {
518 if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
519 tcg_gen_andi_i32(addr, addr, ~0 << shift);
520 } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) &&
521 no_hw_alignment) {
522 TCGLabel *label = gen_new_label();
523 TCGv_i32 tmp = tcg_temp_new_i32();
524 tcg_gen_andi_i32(tmp, addr, ~(~0 << shift));
525 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
526 gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
527 gen_set_label(label);
528 tcg_temp_free(tmp);
529 }
530 }
531
532 #ifndef CONFIG_USER_ONLY
533 static void gen_waiti(DisasContext *dc, uint32_t imm4)
534 {
535 TCGv_i32 pc = tcg_const_i32(dc->base.pc_next);
536 TCGv_i32 intlevel = tcg_const_i32(imm4);
537
538 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
539 gen_io_start();
540 }
541 gen_helper_waiti(cpu_env, pc, intlevel);
542 tcg_temp_free(pc);
543 tcg_temp_free(intlevel);
544 }
545 #endif
546
547 static bool gen_window_check(DisasContext *dc, uint32_t mask)
548 {
549 unsigned r = 31 - clz32(mask);
550
551 if (r / 4 > dc->window) {
552 TCGv_i32 pc = tcg_const_i32(dc->pc);
553 TCGv_i32 w = tcg_const_i32(r / 4);
554
555 gen_helper_window_check(cpu_env, pc, w);
556 dc->base.is_jmp = DISAS_NORETURN;
557 return false;
558 }
559 return true;
560 }
561
562 static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned)
563 {
564 TCGv_i32 m = tcg_temp_new_i32();
565
566 if (hi) {
567 (is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16);
568 } else {
569 (is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v);
570 }
571 return m;
572 }
573
574 static void gen_zero_check(DisasContext *dc, const OpcodeArg arg[])
575 {
576 TCGLabel *label = gen_new_label();
577
578 tcg_gen_brcondi_i32(TCG_COND_NE, arg[2].in, 0, label);
579 gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE);
580 gen_set_label(label);
581 }
582
583 static inline unsigned xtensa_op0_insn_len(DisasContext *dc, uint8_t op0)
584 {
585 return xtensa_isa_length_from_chars(dc->config->isa, &op0);
586 }
587
588 static int gen_postprocess(DisasContext *dc, int slot)
589 {
590 uint32_t op_flags = dc->op_flags;
591
592 #ifndef CONFIG_USER_ONLY
593 if (op_flags & XTENSA_OP_CHECK_INTERRUPTS) {
594 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
595 gen_io_start();
596 }
597 gen_helper_check_interrupts(cpu_env);
598 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
599 gen_io_end();
600 }
601 }
602 #endif
603 if (op_flags & XTENSA_OP_SYNC_REGISTER_WINDOW) {
604 gen_helper_sync_windowbase(cpu_env);
605 }
606 if (op_flags & XTENSA_OP_EXIT_TB_M1) {
607 slot = -1;
608 }
609 return slot;
610 }
611
612 struct opcode_arg_copy {
613 uint32_t resource;
614 void *temp;
615 OpcodeArg *arg;
616 };
617
618 struct opcode_arg_info {
619 uint32_t resource;
620 int index;
621 };
622
623 struct slot_prop {
624 XtensaOpcodeOps *ops;
625 OpcodeArg arg[MAX_OPCODE_ARGS];
626 struct opcode_arg_info in[MAX_OPCODE_ARGS];
627 struct opcode_arg_info out[MAX_OPCODE_ARGS];
628 unsigned n_in;
629 unsigned n_out;
630 uint32_t op_flags;
631 };
632
633 enum resource_type {
634 RES_REGFILE,
635 RES_STATE,
636 RES_MAX,
637 };
638
639 static uint32_t encode_resource(enum resource_type r, unsigned g, unsigned n)
640 {
641 assert(r < RES_MAX && g < 256 && n < 65536);
642 return (r << 24) | (g << 16) | n;
643 }
644
645 static enum resource_type get_resource_type(uint32_t resource)
646 {
647 return resource >> 24;
648 }
649
650 /*
651 * a depends on b if b must be executed before a,
652 * because a's side effects will destroy b's inputs.
653 */
654 static bool op_depends_on(const struct slot_prop *a,
655 const struct slot_prop *b)
656 {
657 unsigned i = 0;
658 unsigned j = 0;
659
660 if (a->op_flags & XTENSA_OP_CONTROL_FLOW) {
661 return true;
662 }
663 if ((a->op_flags & XTENSA_OP_LOAD_STORE) <
664 (b->op_flags & XTENSA_OP_LOAD_STORE)) {
665 return true;
666 }
667 while (i < a->n_out && j < b->n_in) {
668 if (a->out[i].resource < b->in[j].resource) {
669 ++i;
670 } else if (a->out[i].resource > b->in[j].resource) {
671 ++j;
672 } else {
673 return true;
674 }
675 }
676 return false;
677 }
678
679 /*
680 * Try to break a dependency on b, append temporary register copy records
681 * to the end of copy and update n_copy in case of success.
682 * This is not always possible: e.g. control flow must always be the last,
683 * load/store must be first and state dependencies are not supported yet.
684 */
685 static bool break_dependency(struct slot_prop *a,
686 struct slot_prop *b,
687 struct opcode_arg_copy *copy,
688 unsigned *n_copy)
689 {
690 unsigned i = 0;
691 unsigned j = 0;
692 unsigned n = *n_copy;
693 bool rv = false;
694
695 if (a->op_flags & XTENSA_OP_CONTROL_FLOW) {
696 return false;
697 }
698 if ((a->op_flags & XTENSA_OP_LOAD_STORE) <
699 (b->op_flags & XTENSA_OP_LOAD_STORE)) {
700 return false;
701 }
702 while (i < a->n_out && j < b->n_in) {
703 if (a->out[i].resource < b->in[j].resource) {
704 ++i;
705 } else if (a->out[i].resource > b->in[j].resource) {
706 ++j;
707 } else {
708 int index = b->in[j].index;
709
710 if (get_resource_type(a->out[i].resource) != RES_REGFILE ||
711 index < 0) {
712 return false;
713 }
714 copy[n].resource = b->in[j].resource;
715 copy[n].arg = b->arg + index;
716 ++n;
717 ++j;
718 rv = true;
719 }
720 }
721 *n_copy = n;
722 return rv;
723 }
724
725 /*
726 * Calculate evaluation order for slot opcodes.
727 * Build opcode order graph and output its nodes in topological sort order.
728 * An edge a -> b in the graph means that opcode a must be followed by
729 * opcode b.
730 */
731 static bool tsort(struct slot_prop *slot,
732 struct slot_prop *sorted[],
733 unsigned n,
734 struct opcode_arg_copy *copy,
735 unsigned *n_copy)
736 {
737 struct tsnode {
738 unsigned n_in_edge;
739 unsigned n_out_edge;
740 unsigned out_edge[MAX_INSN_SLOTS];
741 } node[MAX_INSN_SLOTS];
742
743 unsigned in[MAX_INSN_SLOTS];
744 unsigned i, j;
745 unsigned n_in = 0;
746 unsigned n_out = 0;
747 unsigned n_edge = 0;
748 unsigned in_idx = 0;
749 unsigned node_idx = 0;
750
751 for (i = 0; i < n; ++i) {
752 node[i].n_in_edge = 0;
753 node[i].n_out_edge = 0;
754 }
755
756 for (i = 0; i < n; ++i) {
757 unsigned n_out_edge = 0;
758
759 for (j = 0; j < n; ++j) {
760 if (i != j && op_depends_on(slot + j, slot + i)) {
761 node[i].out_edge[n_out_edge] = j;
762 ++node[j].n_in_edge;
763 ++n_out_edge;
764 ++n_edge;
765 }
766 }
767 node[i].n_out_edge = n_out_edge;
768 }
769
770 for (i = 0; i < n; ++i) {
771 if (!node[i].n_in_edge) {
772 in[n_in] = i;
773 ++n_in;
774 }
775 }
776
777 again:
778 for (; in_idx < n_in; ++in_idx) {
779 i = in[in_idx];
780 sorted[n_out] = slot + i;
781 ++n_out;
782 for (j = 0; j < node[i].n_out_edge; ++j) {
783 --n_edge;
784 if (--node[node[i].out_edge[j]].n_in_edge == 0) {
785 in[n_in] = node[i].out_edge[j];
786 ++n_in;
787 }
788 }
789 }
790 if (n_edge) {
791 for (; node_idx < n; ++node_idx) {
792 struct tsnode *cnode = node + node_idx;
793
794 if (cnode->n_in_edge) {
795 for (j = 0; j < cnode->n_out_edge; ++j) {
796 unsigned k = cnode->out_edge[j];
797
798 if (break_dependency(slot + k, slot + node_idx,
799 copy, n_copy) &&
800 --node[k].n_in_edge == 0) {
801 in[n_in] = k;
802 ++n_in;
803 --n_edge;
804 cnode->out_edge[j] =
805 cnode->out_edge[cnode->n_out_edge - 1];
806 --cnode->n_out_edge;
807 goto again;
808 }
809 }
810 }
811 }
812 }
813 return n_edge == 0;
814 }
815
816 static void opcode_add_resource(struct slot_prop *op,
817 uint32_t resource, char direction,
818 int index)
819 {
820 switch (direction) {
821 case 'm':
822 case 'i':
823 assert(op->n_in < ARRAY_SIZE(op->in));
824 op->in[op->n_in].resource = resource;
825 op->in[op->n_in].index = index;
826 ++op->n_in;
827 /* fall through */
828 case 'o':
829 if (direction == 'm' || direction == 'o') {
830 assert(op->n_out < ARRAY_SIZE(op->out));
831 op->out[op->n_out].resource = resource;
832 op->out[op->n_out].index = index;
833 ++op->n_out;
834 }
835 break;
836 default:
837 g_assert_not_reached();
838 }
839 }
840
841 static int resource_compare(const void *a, const void *b)
842 {
843 const struct opcode_arg_info *pa = a;
844 const struct opcode_arg_info *pb = b;
845
846 return pa->resource < pb->resource ?
847 -1 : (pa->resource > pb->resource ? 1 : 0);
848 }
849
850 static int arg_copy_compare(const void *a, const void *b)
851 {
852 const struct opcode_arg_copy *pa = a;
853 const struct opcode_arg_copy *pb = b;
854
855 return pa->resource < pb->resource ?
856 -1 : (pa->resource > pb->resource ? 1 : 0);
857 }
858
859 static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
860 {
861 xtensa_isa isa = dc->config->isa;
862 unsigned char b[MAX_INSN_LENGTH] = {translator_ldub(env, dc->pc)};
863 unsigned len = xtensa_op0_insn_len(dc, b[0]);
864 xtensa_format fmt;
865 int slot, slots;
866 unsigned i;
867 uint32_t op_flags = 0;
868 struct slot_prop slot_prop[MAX_INSN_SLOTS];
869 struct slot_prop *ordered[MAX_INSN_SLOTS];
870 struct opcode_arg_copy arg_copy[MAX_INSN_SLOTS * MAX_OPCODE_ARGS];
871 unsigned n_arg_copy = 0;
872 uint32_t debug_cause = 0;
873 uint32_t windowed_register = 0;
874 uint32_t coprocessor = 0;
875
876 if (len == XTENSA_UNDEFINED) {
877 qemu_log_mask(LOG_GUEST_ERROR,
878 "unknown instruction length (pc = %08x)\n",
879 dc->pc);
880 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
881 return;
882 }
883
884 dc->base.pc_next = dc->pc + len;
885 for (i = 1; i < len; ++i) {
886 b[i] = translator_ldub(env, dc->pc + i);
887 }
888 xtensa_insnbuf_from_chars(isa, dc->insnbuf, b, len);
889 fmt = xtensa_format_decode(isa, dc->insnbuf);
890 if (fmt == XTENSA_UNDEFINED) {
891 qemu_log_mask(LOG_GUEST_ERROR,
892 "unrecognized instruction format (pc = %08x)\n",
893 dc->pc);
894 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
895 return;
896 }
897 slots = xtensa_format_num_slots(isa, fmt);
898 for (slot = 0; slot < slots; ++slot) {
899 xtensa_opcode opc;
900 int opnd, vopnd, opnds;
901 OpcodeArg *arg = slot_prop[slot].arg;
902 XtensaOpcodeOps *ops;
903
904 xtensa_format_get_slot(isa, fmt, slot, dc->insnbuf, dc->slotbuf);
905 opc = xtensa_opcode_decode(isa, fmt, slot, dc->slotbuf);
906 if (opc == XTENSA_UNDEFINED) {
907 qemu_log_mask(LOG_GUEST_ERROR,
908 "unrecognized opcode in slot %d (pc = %08x)\n",
909 slot, dc->pc);
910 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
911 return;
912 }
913 opnds = xtensa_opcode_num_operands(isa, opc);
914
915 for (opnd = vopnd = 0; opnd < opnds; ++opnd) {
916 void **register_file = NULL;
917
918 if (xtensa_operand_is_register(isa, opc, opnd)) {
919 xtensa_regfile rf = xtensa_operand_regfile(isa, opc, opnd);
920
921 register_file = dc->config->regfile[rf];
922
923 if (rf == dc->config->a_regfile) {
924 uint32_t v;
925
926 xtensa_operand_get_field(isa, opc, opnd, fmt, slot,
927 dc->slotbuf, &v);
928 xtensa_operand_decode(isa, opc, opnd, &v);
929 windowed_register |= 1u << v;
930 }
931 }
932 if (xtensa_operand_is_visible(isa, opc, opnd)) {
933 uint32_t v;
934
935 xtensa_operand_get_field(isa, opc, opnd, fmt, slot,
936 dc->slotbuf, &v);
937 xtensa_operand_decode(isa, opc, opnd, &v);
938 arg[vopnd].raw_imm = v;
939 if (xtensa_operand_is_PCrelative(isa, opc, opnd)) {
940 xtensa_operand_undo_reloc(isa, opc, opnd, &v, dc->pc);
941 }
942 arg[vopnd].imm = v;
943 if (register_file) {
944 arg[vopnd].in = register_file[v];
945 arg[vopnd].out = register_file[v];
946 }
947 ++vopnd;
948 }
949 }
950 ops = dc->config->opcode_ops[opc];
951 slot_prop[slot].ops = ops;
952
953 if (ops) {
954 op_flags |= ops->op_flags;
955 } else {
956 qemu_log_mask(LOG_UNIMP,
957 "unimplemented opcode '%s' in slot %d (pc = %08x)\n",
958 xtensa_opcode_name(isa, opc), slot, dc->pc);
959 op_flags |= XTENSA_OP_ILL;
960 }
961 if ((op_flags & XTENSA_OP_ILL) ||
962 (ops && ops->test_ill && ops->test_ill(dc, arg, ops->par))) {
963 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
964 return;
965 }
966 if (ops->op_flags & XTENSA_OP_DEBUG_BREAK) {
967 debug_cause |= ops->par[0];
968 }
969 if (ops->test_overflow) {
970 windowed_register |= ops->test_overflow(dc, arg, ops->par);
971 }
972 coprocessor |= ops->coprocessor;
973
974 if (slots > 1) {
975 slot_prop[slot].n_in = 0;
976 slot_prop[slot].n_out = 0;
977 slot_prop[slot].op_flags = ops->op_flags & XTENSA_OP_LOAD_STORE;
978
979 opnds = xtensa_opcode_num_operands(isa, opc);
980
981 for (opnd = vopnd = 0; opnd < opnds; ++opnd) {
982 bool visible = xtensa_operand_is_visible(isa, opc, opnd);
983
984 if (xtensa_operand_is_register(isa, opc, opnd)) {
985 xtensa_regfile rf = xtensa_operand_regfile(isa, opc, opnd);
986 uint32_t v = 0;
987
988 xtensa_operand_get_field(isa, opc, opnd, fmt, slot,
989 dc->slotbuf, &v);
990 xtensa_operand_decode(isa, opc, opnd, &v);
991 opcode_add_resource(slot_prop + slot,
992 encode_resource(RES_REGFILE, rf, v),
993 xtensa_operand_inout(isa, opc, opnd),
994 visible ? vopnd : -1);
995 }
996 if (visible) {
997 ++vopnd;
998 }
999 }
1000
1001 opnds = xtensa_opcode_num_stateOperands(isa, opc);
1002
1003 for (opnd = 0; opnd < opnds; ++opnd) {
1004 xtensa_state state = xtensa_stateOperand_state(isa, opc, opnd);
1005
1006 opcode_add_resource(slot_prop + slot,
1007 encode_resource(RES_STATE, 0, state),
1008 xtensa_stateOperand_inout(isa, opc, opnd),
1009 -1);
1010 }
1011 if (xtensa_opcode_is_branch(isa, opc) ||
1012 xtensa_opcode_is_jump(isa, opc) ||
1013 xtensa_opcode_is_loop(isa, opc) ||
1014 xtensa_opcode_is_call(isa, opc)) {
1015 slot_prop[slot].op_flags |= XTENSA_OP_CONTROL_FLOW;
1016 }
1017
1018 qsort(slot_prop[slot].in, slot_prop[slot].n_in,
1019 sizeof(slot_prop[slot].in[0]), resource_compare);
1020 qsort(slot_prop[slot].out, slot_prop[slot].n_out,
1021 sizeof(slot_prop[slot].out[0]), resource_compare);
1022 }
1023 }
1024
1025 if (slots > 1) {
1026 if (!tsort(slot_prop, ordered, slots, arg_copy, &n_arg_copy)) {
1027 qemu_log_mask(LOG_UNIMP,
1028 "Circular resource dependencies (pc = %08x)\n",
1029 dc->pc);
1030 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
1031 return;
1032 }
1033 } else {
1034 ordered[0] = slot_prop + 0;
1035 }
1036
1037 if ((op_flags & XTENSA_OP_PRIVILEGED) &&
1038 !gen_check_privilege(dc)) {
1039 return;
1040 }
1041
1042 if (op_flags & XTENSA_OP_SYSCALL) {
1043 gen_exception_cause(dc, SYSCALL_CAUSE);
1044 return;
1045 }
1046
1047 if ((op_flags & XTENSA_OP_DEBUG_BREAK) && dc->debug) {
1048 gen_debug_exception(dc, debug_cause);
1049 return;
1050 }
1051
1052 if (windowed_register && !gen_window_check(dc, windowed_register)) {
1053 return;
1054 }
1055
1056 if (op_flags & XTENSA_OP_UNDERFLOW) {
1057 TCGv_i32 tmp = tcg_const_i32(dc->pc);
1058
1059 gen_helper_test_underflow_retw(cpu_env, tmp);
1060 tcg_temp_free(tmp);
1061 }
1062
1063 if (op_flags & XTENSA_OP_ALLOCA) {
1064 TCGv_i32 tmp = tcg_const_i32(dc->pc);
1065
1066 gen_helper_movsp(cpu_env, tmp);
1067 tcg_temp_free(tmp);
1068 }
1069
1070 if (coprocessor && !gen_check_cpenable(dc, coprocessor)) {
1071 return;
1072 }
1073
1074 if (n_arg_copy) {
1075 uint32_t resource;
1076 void *temp;
1077 unsigned j;
1078
1079 qsort(arg_copy, n_arg_copy, sizeof(*arg_copy), arg_copy_compare);
1080 for (i = j = 0; i < n_arg_copy; ++i) {
1081 if (i == 0 || arg_copy[i].resource != resource) {
1082 resource = arg_copy[i].resource;
1083 temp = tcg_temp_local_new();
1084 tcg_gen_mov_i32(temp, arg_copy[i].arg->in);
1085 arg_copy[i].temp = temp;
1086
1087 if (i != j) {
1088 arg_copy[j] = arg_copy[i];
1089 }
1090 ++j;
1091 }
1092 arg_copy[i].arg->in = temp;
1093 }
1094 n_arg_copy = j;
1095 }
1096
1097 if (op_flags & XTENSA_OP_DIVIDE_BY_ZERO) {
1098 for (slot = 0; slot < slots; ++slot) {
1099 if (slot_prop[slot].ops->op_flags & XTENSA_OP_DIVIDE_BY_ZERO) {
1100 gen_zero_check(dc, slot_prop[slot].arg);
1101 }
1102 }
1103 }
1104
1105 dc->op_flags = op_flags;
1106
1107 for (slot = 0; slot < slots; ++slot) {
1108 struct slot_prop *pslot = ordered[slot];
1109 XtensaOpcodeOps *ops = pslot->ops;
1110
1111 ops->translate(dc, pslot->arg, ops->par);
1112 }
1113
1114 for (i = 0; i < n_arg_copy; ++i) {
1115 tcg_temp_free(arg_copy[i].temp);
1116 }
1117
1118 if (dc->base.is_jmp == DISAS_NEXT) {
1119 gen_postprocess(dc, 0);
1120 dc->op_flags = 0;
1121 if (op_flags & XTENSA_OP_EXIT_TB_M1) {
1122 /* Change in mmu index, memory mapping or tb->flags; exit tb */
1123 gen_jumpi_check_loop_end(dc, -1);
1124 } else if (op_flags & XTENSA_OP_EXIT_TB_0) {
1125 gen_jumpi_check_loop_end(dc, 0);
1126 } else {
1127 gen_check_loop_end(dc, 0);
1128 }
1129 }
1130 dc->pc = dc->base.pc_next;
1131 }
1132
1133 static inline unsigned xtensa_insn_len(CPUXtensaState *env, DisasContext *dc)
1134 {
1135 uint8_t b0 = cpu_ldub_code(env, dc->pc);
1136 return xtensa_op0_insn_len(dc, b0);
1137 }
1138
1139 static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc)
1140 {
1141 unsigned i;
1142
1143 for (i = 0; i < dc->config->nibreak; ++i) {
1144 if ((env->sregs[IBREAKENABLE] & (1 << i)) &&
1145 env->sregs[IBREAKA + i] == dc->pc) {
1146 gen_debug_exception(dc, DEBUGCAUSE_IB);
1147 break;
1148 }
1149 }
1150 }
1151
1152 static void xtensa_tr_init_disas_context(DisasContextBase *dcbase,
1153 CPUState *cpu)
1154 {
1155 DisasContext *dc = container_of(dcbase, DisasContext, base);
1156 CPUXtensaState *env = cpu->env_ptr;
1157 uint32_t tb_flags = dc->base.tb->flags;
1158
1159 dc->config = env->config;
1160 dc->pc = dc->base.pc_first;
1161 dc->ring = tb_flags & XTENSA_TBFLAG_RING_MASK;
1162 dc->cring = (tb_flags & XTENSA_TBFLAG_EXCM) ? 0 : dc->ring;
1163 dc->lbeg_off = (dc->base.tb->cs_base & XTENSA_CSBASE_LBEG_OFF_MASK) >>
1164 XTENSA_CSBASE_LBEG_OFF_SHIFT;
1165 dc->lend = (dc->base.tb->cs_base & XTENSA_CSBASE_LEND_MASK) +
1166 (dc->base.pc_first & TARGET_PAGE_MASK);
1167 dc->debug = tb_flags & XTENSA_TBFLAG_DEBUG;
1168 dc->icount = tb_flags & XTENSA_TBFLAG_ICOUNT;
1169 dc->cpenable = (tb_flags & XTENSA_TBFLAG_CPENABLE_MASK) >>
1170 XTENSA_TBFLAG_CPENABLE_SHIFT;
1171 dc->window = ((tb_flags & XTENSA_TBFLAG_WINDOW_MASK) >>
1172 XTENSA_TBFLAG_WINDOW_SHIFT);
1173 dc->cwoe = tb_flags & XTENSA_TBFLAG_CWOE;
1174 dc->callinc = ((tb_flags & XTENSA_TBFLAG_CALLINC_MASK) >>
1175 XTENSA_TBFLAG_CALLINC_SHIFT);
1176
1177 if (dc->config->isa) {
1178 dc->insnbuf = xtensa_insnbuf_alloc(dc->config->isa);
1179 dc->slotbuf = xtensa_insnbuf_alloc(dc->config->isa);
1180 }
1181 init_sar_tracker(dc);
1182 }
1183
1184 static void xtensa_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
1185 {
1186 DisasContext *dc = container_of(dcbase, DisasContext, base);
1187
1188 if (dc->icount) {
1189 dc->next_icount = tcg_temp_local_new_i32();
1190 }
1191 }
1192
1193 static void xtensa_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
1194 {
1195 tcg_gen_insn_start(dcbase->pc_next);
1196 }
1197
1198 static bool xtensa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
1199 const CPUBreakpoint *bp)
1200 {
1201 DisasContext *dc = container_of(dcbase, DisasContext, base);
1202
1203 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1204 gen_exception(dc, EXCP_DEBUG);
1205 dc->base.is_jmp = DISAS_NORETURN;
1206 /* The address covered by the breakpoint must be included in
1207 [tb->pc, tb->pc + tb->size) in order to for it to be
1208 properly cleared -- thus we increment the PC here so that
1209 the logic setting tb->size below does the right thing. */
1210 dc->base.pc_next += 2;
1211 return true;
1212 }
1213
1214 static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
1215 {
1216 DisasContext *dc = container_of(dcbase, DisasContext, base);
1217 CPUXtensaState *env = cpu->env_ptr;
1218 target_ulong page_start;
1219
1220 /* These two conditions only apply to the first insn in the TB,
1221 but this is the first TranslateOps hook that allows exiting. */
1222 if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT)
1223 && (dc->base.tb->flags & XTENSA_TBFLAG_YIELD)) {
1224 gen_exception(dc, EXCP_YIELD);
1225 dc->base.is_jmp = DISAS_NORETURN;
1226 return;
1227 }
1228 if (dc->base.tb->flags & XTENSA_TBFLAG_EXCEPTION) {
1229 gen_exception(dc, EXCP_DEBUG);
1230 dc->base.is_jmp = DISAS_NORETURN;
1231 return;
1232 }
1233
1234 if (dc->icount) {
1235 TCGLabel *label = gen_new_label();
1236
1237 tcg_gen_addi_i32(dc->next_icount, cpu_SR[ICOUNT], 1);
1238 tcg_gen_brcondi_i32(TCG_COND_NE, dc->next_icount, 0, label);
1239 tcg_gen_mov_i32(dc->next_icount, cpu_SR[ICOUNT]);
1240 if (dc->debug) {
1241 gen_debug_exception(dc, DEBUGCAUSE_IC);
1242 }
1243 gen_set_label(label);
1244 }
1245
1246 if (dc->debug) {
1247 gen_ibreak_check(env, dc);
1248 }
1249
1250 disas_xtensa_insn(env, dc);
1251
1252 if (dc->icount) {
1253 tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
1254 }
1255
1256 /* End the TB if the next insn will cross into the next page. */
1257 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1258 if (dc->base.is_jmp == DISAS_NEXT &&
1259 (dc->pc - page_start >= TARGET_PAGE_SIZE ||
1260 dc->pc - page_start + xtensa_insn_len(env, dc) > TARGET_PAGE_SIZE)) {
1261 dc->base.is_jmp = DISAS_TOO_MANY;
1262 }
1263 }
1264
1265 static void xtensa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1266 {
1267 DisasContext *dc = container_of(dcbase, DisasContext, base);
1268
1269 reset_sar_tracker(dc);
1270 if (dc->config->isa) {
1271 xtensa_insnbuf_free(dc->config->isa, dc->insnbuf);
1272 xtensa_insnbuf_free(dc->config->isa, dc->slotbuf);
1273 }
1274 if (dc->icount) {
1275 tcg_temp_free(dc->next_icount);
1276 }
1277
1278 switch (dc->base.is_jmp) {
1279 case DISAS_NORETURN:
1280 break;
1281 case DISAS_TOO_MANY:
1282 if (dc->base.singlestep_enabled) {
1283 tcg_gen_movi_i32(cpu_pc, dc->pc);
1284 gen_exception(dc, EXCP_DEBUG);
1285 } else {
1286 gen_jumpi(dc, dc->pc, 0);
1287 }
1288 break;
1289 default:
1290 g_assert_not_reached();
1291 }
1292 }
1293
1294 static void xtensa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
1295 {
1296 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
1297 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
1298 }
1299
1300 static const TranslatorOps xtensa_translator_ops = {
1301 .init_disas_context = xtensa_tr_init_disas_context,
1302 .tb_start = xtensa_tr_tb_start,
1303 .insn_start = xtensa_tr_insn_start,
1304 .breakpoint_check = xtensa_tr_breakpoint_check,
1305 .translate_insn = xtensa_tr_translate_insn,
1306 .tb_stop = xtensa_tr_tb_stop,
1307 .disas_log = xtensa_tr_disas_log,
1308 };
1309
1310 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1311 {
1312 DisasContext dc = {};
1313 translator_loop(&xtensa_translator_ops, &dc.base, cpu, tb, max_insns);
1314 }
1315
1316 void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1317 {
1318 XtensaCPU *cpu = XTENSA_CPU(cs);
1319 CPUXtensaState *env = &cpu->env;
1320 xtensa_isa isa = env->config->isa;
1321 int i, j;
1322
1323 qemu_fprintf(f, "PC=%08x\n\n", env->pc);
1324
1325 for (i = j = 0; i < xtensa_isa_num_sysregs(isa); ++i) {
1326 const uint32_t *reg =
1327 xtensa_sysreg_is_user(isa, i) ? env->uregs : env->sregs;
1328 int regno = xtensa_sysreg_number(isa, i);
1329
1330 if (regno >= 0) {
1331 qemu_fprintf(f, "%12s=%08x%c",
1332 xtensa_sysreg_name(isa, i),
1333 reg[regno],
1334 (j++ % 4) == 3 ? '\n' : ' ');
1335 }
1336 }
1337
1338 qemu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
1339
1340 for (i = 0; i < 16; ++i) {
1341 qemu_fprintf(f, " A%02d=%08x%c",
1342 i, env->regs[i], (i % 4) == 3 ? '\n' : ' ');
1343 }
1344
1345 xtensa_sync_phys_from_window(env);
1346 qemu_fprintf(f, "\n");
1347
1348 for (i = 0; i < env->config->nareg; ++i) {
1349 qemu_fprintf(f, "AR%02d=%08x ", i, env->phys_regs[i]);
1350 if (i % 4 == 3) {
1351 bool ws = (env->sregs[WINDOW_START] & (1 << (i / 4))) != 0;
1352 bool cw = env->sregs[WINDOW_BASE] == i / 4;
1353
1354 qemu_fprintf(f, "%c%c\n", ws ? '<' : ' ', cw ? '=' : ' ');
1355 }
1356 }
1357
1358 if ((flags & CPU_DUMP_FPU) &&
1359 xtensa_option_enabled(env->config, XTENSA_OPTION_FP_COPROCESSOR)) {
1360 qemu_fprintf(f, "\n");
1361
1362 for (i = 0; i < 16; ++i) {
1363 qemu_fprintf(f, "F%02d=%08x (%+10.8e)%c", i,
1364 float32_val(env->fregs[i].f32[FP_F32_LOW]),
1365 *(float *)(env->fregs[i].f32 + FP_F32_LOW),
1366 (i % 2) == 1 ? '\n' : ' ');
1367 }
1368 }
1369 }
1370
1371 void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb,
1372 target_ulong *data)
1373 {
1374 env->pc = data[0];
1375 }
1376
1377 static void translate_abs(DisasContext *dc, const OpcodeArg arg[],
1378 const uint32_t par[])
1379 {
1380 tcg_gen_abs_i32(arg[0].out, arg[1].in);
1381 }
1382
1383 static void translate_add(DisasContext *dc, const OpcodeArg arg[],
1384 const uint32_t par[])
1385 {
1386 tcg_gen_add_i32(arg[0].out, arg[1].in, arg[2].in);
1387 }
1388
1389 static void translate_addi(DisasContext *dc, const OpcodeArg arg[],
1390 const uint32_t par[])
1391 {
1392 tcg_gen_addi_i32(arg[0].out, arg[1].in, arg[2].imm);
1393 }
1394
1395 static void translate_addx(DisasContext *dc, const OpcodeArg arg[],
1396 const uint32_t par[])
1397 {
1398 TCGv_i32 tmp = tcg_temp_new_i32();
1399 tcg_gen_shli_i32(tmp, arg[1].in, par[0]);
1400 tcg_gen_add_i32(arg[0].out, tmp, arg[2].in);
1401 tcg_temp_free(tmp);
1402 }
1403
1404 static void translate_all(DisasContext *dc, const OpcodeArg arg[],
1405 const uint32_t par[])
1406 {
1407 uint32_t shift = par[1];
1408 TCGv_i32 mask = tcg_const_i32(((1 << shift) - 1) << arg[1].imm);
1409 TCGv_i32 tmp = tcg_temp_new_i32();
1410
1411 tcg_gen_and_i32(tmp, arg[1].in, mask);
1412 if (par[0]) {
1413 tcg_gen_addi_i32(tmp, tmp, 1 << arg[1].imm);
1414 } else {
1415 tcg_gen_add_i32(tmp, tmp, mask);
1416 }
1417 tcg_gen_shri_i32(tmp, tmp, arg[1].imm + shift);
1418 tcg_gen_deposit_i32(arg[0].out, arg[0].out,
1419 tmp, arg[0].imm, 1);
1420 tcg_temp_free(mask);
1421 tcg_temp_free(tmp);
1422 }
1423
1424 static void translate_and(DisasContext *dc, const OpcodeArg arg[],
1425 const uint32_t par[])
1426 {
1427 tcg_gen_and_i32(arg[0].out, arg[1].in, arg[2].in);
1428 }
1429
1430 static void translate_ball(DisasContext *dc, const OpcodeArg arg[],
1431 const uint32_t par[])
1432 {
1433 TCGv_i32 tmp = tcg_temp_new_i32();
1434 tcg_gen_and_i32(tmp, arg[0].in, arg[1].in);
1435 gen_brcond(dc, par[0], tmp, arg[1].in, arg[2].imm);
1436 tcg_temp_free(tmp);
1437 }
1438
1439 static void translate_bany(DisasContext *dc, const OpcodeArg arg[],
1440 const uint32_t par[])
1441 {
1442 TCGv_i32 tmp = tcg_temp_new_i32();
1443 tcg_gen_and_i32(tmp, arg[0].in, arg[1].in);
1444 gen_brcondi(dc, par[0], tmp, 0, arg[2].imm);
1445 tcg_temp_free(tmp);
1446 }
1447
1448 static void translate_b(DisasContext *dc, const OpcodeArg arg[],
1449 const uint32_t par[])
1450 {
1451 gen_brcond(dc, par[0], arg[0].in, arg[1].in, arg[2].imm);
1452 }
1453
1454 static void translate_bb(DisasContext *dc, const OpcodeArg arg[],
1455 const uint32_t par[])
1456 {
1457 #ifdef TARGET_WORDS_BIGENDIAN
1458 TCGv_i32 bit = tcg_const_i32(0x80000000u);
1459 #else
1460 TCGv_i32 bit = tcg_const_i32(0x00000001u);
1461 #endif
1462 TCGv_i32 tmp = tcg_temp_new_i32();
1463 tcg_gen_andi_i32(tmp, arg[1].in, 0x1f);
1464 #ifdef TARGET_WORDS_BIGENDIAN
1465 tcg_gen_shr_i32(bit, bit, tmp);
1466 #else
1467 tcg_gen_shl_i32(bit, bit, tmp);
1468 #endif
1469 tcg_gen_and_i32(tmp, arg[0].in, bit);
1470 gen_brcondi(dc, par[0], tmp, 0, arg[2].imm);
1471 tcg_temp_free(tmp);
1472 tcg_temp_free(bit);
1473 }
1474
1475 static void translate_bbi(DisasContext *dc, const OpcodeArg arg[],
1476 const uint32_t par[])
1477 {
1478 TCGv_i32 tmp = tcg_temp_new_i32();
1479 #ifdef TARGET_WORDS_BIGENDIAN
1480 tcg_gen_andi_i32(tmp, arg[0].in, 0x80000000u >> arg[1].imm);
1481 #else
1482 tcg_gen_andi_i32(tmp, arg[0].in, 0x00000001u << arg[1].imm);
1483 #endif
1484 gen_brcondi(dc, par[0], tmp, 0, arg[2].imm);
1485 tcg_temp_free(tmp);
1486 }
1487
1488 static void translate_bi(DisasContext *dc, const OpcodeArg arg[],
1489 const uint32_t par[])
1490 {
1491 gen_brcondi(dc, par[0], arg[0].in, arg[1].imm, arg[2].imm);
1492 }
1493
1494 static void translate_bz(DisasContext *dc, const OpcodeArg arg[],
1495 const uint32_t par[])
1496 {
1497 gen_brcondi(dc, par[0], arg[0].in, 0, arg[1].imm);
1498 }
1499
1500 enum {
1501 BOOLEAN_AND,
1502 BOOLEAN_ANDC,
1503 BOOLEAN_OR,
1504 BOOLEAN_ORC,
1505 BOOLEAN_XOR,
1506 };
1507
1508 static void translate_boolean(DisasContext *dc, const OpcodeArg arg[],
1509 const uint32_t par[])
1510 {
1511 static void (* const op[])(TCGv_i32, TCGv_i32, TCGv_i32) = {
1512 [BOOLEAN_AND] = tcg_gen_and_i32,
1513 [BOOLEAN_ANDC] = tcg_gen_andc_i32,
1514 [BOOLEAN_OR] = tcg_gen_or_i32,
1515 [BOOLEAN_ORC] = tcg_gen_orc_i32,
1516 [BOOLEAN_XOR] = tcg_gen_xor_i32,
1517 };
1518
1519 TCGv_i32 tmp1 = tcg_temp_new_i32();
1520 TCGv_i32 tmp2 = tcg_temp_new_i32();
1521
1522 tcg_gen_shri_i32(tmp1, arg[1].in, arg[1].imm);
1523 tcg_gen_shri_i32(tmp2, arg[2].in, arg[2].imm);
1524 op[par[0]](tmp1, tmp1, tmp2);
1525 tcg_gen_deposit_i32(arg[0].out, arg[0].out, tmp1, arg[0].imm, 1);
1526 tcg_temp_free(tmp1);
1527 tcg_temp_free(tmp2);
1528 }
1529
1530 static void translate_bp(DisasContext *dc, const OpcodeArg arg[],
1531 const uint32_t par[])
1532 {
1533 TCGv_i32 tmp = tcg_temp_new_i32();
1534
1535 tcg_gen_andi_i32(tmp, arg[0].in, 1 << arg[0].imm);
1536 gen_brcondi(dc, par[0], tmp, 0, arg[1].imm);
1537 tcg_temp_free(tmp);
1538 }
1539
1540 static void translate_call0(DisasContext *dc, const OpcodeArg arg[],
1541 const uint32_t par[])
1542 {
1543 tcg_gen_movi_i32(cpu_R[0], dc->base.pc_next);
1544 gen_jumpi(dc, arg[0].imm, 0);
1545 }
1546
1547 static void translate_callw(DisasContext *dc, const OpcodeArg arg[],
1548 const uint32_t par[])
1549 {
1550 TCGv_i32 tmp = tcg_const_i32(arg[0].imm);
1551 gen_callw_slot(dc, par[0], tmp, adjust_jump_slot(dc, arg[0].imm, 0));
1552 tcg_temp_free(tmp);
1553 }
1554
1555 static void translate_callx0(DisasContext *dc, const OpcodeArg arg[],
1556 const uint32_t par[])
1557 {
1558 TCGv_i32 tmp = tcg_temp_new_i32();
1559 tcg_gen_mov_i32(tmp, arg[0].in);
1560 tcg_gen_movi_i32(cpu_R[0], dc->base.pc_next);
1561 gen_jump(dc, tmp);
1562 tcg_temp_free(tmp);
1563 }
1564
1565 static void translate_callxw(DisasContext *dc, const OpcodeArg arg[],
1566 const uint32_t par[])
1567 {
1568 TCGv_i32 tmp = tcg_temp_new_i32();
1569
1570 tcg_gen_mov_i32(tmp, arg[0].in);
1571 gen_callw_slot(dc, par[0], tmp, -1);
1572 tcg_temp_free(tmp);
1573 }
1574
1575 static void translate_clamps(DisasContext *dc, const OpcodeArg arg[],
1576 const uint32_t par[])
1577 {
1578 TCGv_i32 tmp1 = tcg_const_i32(-1u << arg[2].imm);
1579 TCGv_i32 tmp2 = tcg_const_i32((1 << arg[2].imm) - 1);
1580
1581 tcg_gen_smax_i32(tmp1, tmp1, arg[1].in);
1582 tcg_gen_smin_i32(arg[0].out, tmp1, tmp2);
1583 tcg_temp_free(tmp1);
1584 tcg_temp_free(tmp2);
1585 }
1586
1587 static void translate_clrb_expstate(DisasContext *dc, const OpcodeArg arg[],
1588 const uint32_t par[])
1589 {
1590 /* TODO: GPIO32 may be a part of coprocessor */
1591 tcg_gen_andi_i32(cpu_UR[EXPSTATE], cpu_UR[EXPSTATE], ~(1u << arg[0].imm));
1592 }
1593
1594 static void translate_clrex(DisasContext *dc, const OpcodeArg arg[],
1595 const uint32_t par[])
1596 {
1597 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
1598 }
1599
1600 static void translate_const16(DisasContext *dc, const OpcodeArg arg[],
1601 const uint32_t par[])
1602 {
1603 TCGv_i32 c = tcg_const_i32(arg[1].imm);
1604
1605 tcg_gen_deposit_i32(arg[0].out, c, arg[0].in, 16, 16);
1606 tcg_temp_free(c);
1607 }
1608
1609 static void translate_dcache(DisasContext *dc, const OpcodeArg arg[],
1610 const uint32_t par[])
1611 {
1612 TCGv_i32 addr = tcg_temp_new_i32();
1613 TCGv_i32 res = tcg_temp_new_i32();
1614
1615 tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm);
1616 tcg_gen_qemu_ld8u(res, addr, dc->cring);
1617 tcg_temp_free(addr);
1618 tcg_temp_free(res);
1619 }
1620
1621 static void translate_depbits(DisasContext *dc, const OpcodeArg arg[],
1622 const uint32_t par[])
1623 {
1624 tcg_gen_deposit_i32(arg[1].out, arg[1].in, arg[0].in,
1625 arg[2].imm, arg[3].imm);
1626 }
1627
1628 static void translate_diwbuip(DisasContext *dc, const OpcodeArg arg[],
1629 const uint32_t par[])
1630 {
1631 tcg_gen_addi_i32(arg[0].out, arg[0].in, dc->config->dcache_line_bytes);
1632 }
1633
1634 static bool test_ill_entry(DisasContext *dc, const OpcodeArg arg[],
1635 const uint32_t par[])
1636 {
1637 if (arg[0].imm > 3 || !dc->cwoe) {
1638 qemu_log_mask(LOG_GUEST_ERROR,
1639 "Illegal entry instruction(pc = %08x)\n", dc->pc);
1640 return true;
1641 } else {
1642 return false;
1643 }
1644 }
1645
1646 static uint32_t test_overflow_entry(DisasContext *dc, const OpcodeArg arg[],
1647 const uint32_t par[])
1648 {
1649 return 1 << (dc->callinc * 4);
1650 }
1651
1652 static void translate_entry(DisasContext *dc, const OpcodeArg arg[],
1653 const uint32_t par[])
1654 {
1655 TCGv_i32 pc = tcg_const_i32(dc->pc);
1656 TCGv_i32 s = tcg_const_i32(arg[0].imm);
1657 TCGv_i32 imm = tcg_const_i32(arg[1].imm);
1658 gen_helper_entry(cpu_env, pc, s, imm);
1659 tcg_temp_free(imm);
1660 tcg_temp_free(s);
1661 tcg_temp_free(pc);
1662 }
1663
1664 static void translate_extui(DisasContext *dc, const OpcodeArg arg[],
1665 const uint32_t par[])
1666 {
1667 int maskimm = (1 << arg[3].imm) - 1;
1668
1669 TCGv_i32 tmp = tcg_temp_new_i32();
1670 tcg_gen_shri_i32(tmp, arg[1].in, arg[2].imm);
1671 tcg_gen_andi_i32(arg[0].out, tmp, maskimm);
1672 tcg_temp_free(tmp);
1673 }
1674
1675 static void translate_getex(DisasContext *dc, const OpcodeArg arg[],
1676 const uint32_t par[])
1677 {
1678 TCGv_i32 tmp = tcg_temp_new_i32();
1679
1680 tcg_gen_extract_i32(tmp, cpu_SR[ATOMCTL], 8, 1);
1681 tcg_gen_deposit_i32(cpu_SR[ATOMCTL], cpu_SR[ATOMCTL], arg[0].in, 8, 1);
1682 tcg_gen_mov_i32(arg[0].out, tmp);
1683 tcg_temp_free(tmp);
1684 }
1685
1686 static void translate_icache(DisasContext *dc, const OpcodeArg arg[],
1687 const uint32_t par[])
1688 {
1689 #ifndef CONFIG_USER_ONLY
1690 TCGv_i32 addr = tcg_temp_new_i32();
1691
1692 tcg_gen_movi_i32(cpu_pc, dc->pc);
1693 tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm);
1694 gen_helper_itlb_hit_test(cpu_env, addr);
1695 tcg_temp_free(addr);
1696 #endif
1697 }
1698
1699 static void translate_itlb(DisasContext *dc, const OpcodeArg arg[],
1700 const uint32_t par[])
1701 {
1702 #ifndef CONFIG_USER_ONLY
1703 TCGv_i32 dtlb = tcg_const_i32(par[0]);
1704
1705 gen_helper_itlb(cpu_env, arg[0].in, dtlb);
1706 tcg_temp_free(dtlb);
1707 #endif
1708 }
1709
1710 static void translate_j(DisasContext *dc, const OpcodeArg arg[],
1711 const uint32_t par[])
1712 {
1713 gen_jumpi(dc, arg[0].imm, 0);
1714 }
1715
1716 static void translate_jx(DisasContext *dc, const OpcodeArg arg[],
1717 const uint32_t par[])
1718 {
1719 gen_jump(dc, arg[0].in);
1720 }
1721
1722 static void translate_l32e(DisasContext *dc, const OpcodeArg arg[],
1723 const uint32_t par[])
1724 {
1725 TCGv_i32 addr = tcg_temp_new_i32();
1726
1727 tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
1728 gen_load_store_alignment(dc, 2, addr, false);
1729 tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, MO_TEUL);
1730 tcg_temp_free(addr);
1731 }
1732
1733 #ifdef CONFIG_USER_ONLY
1734 static void gen_check_exclusive(DisasContext *dc, TCGv_i32 addr, bool is_write)
1735 {
1736 }
1737 #else
1738 static void gen_check_exclusive(DisasContext *dc, TCGv_i32 addr, bool is_write)
1739 {
1740 if (!option_enabled(dc, XTENSA_OPTION_MPU)) {
1741 TCGv_i32 tpc = tcg_const_i32(dc->pc);
1742 TCGv_i32 write = tcg_const_i32(is_write);
1743
1744 gen_helper_check_exclusive(cpu_env, tpc, addr, write);
1745 tcg_temp_free(tpc);
1746 tcg_temp_free(write);
1747 }
1748 }
1749 #endif
1750
1751 static void translate_l32ex(DisasContext *dc, const OpcodeArg arg[],
1752 const uint32_t par[])
1753 {
1754 TCGv_i32 addr = tcg_temp_new_i32();
1755
1756 tcg_gen_mov_i32(addr, arg[1].in);
1757 gen_load_store_alignment(dc, 2, addr, true);
1758 gen_check_exclusive(dc, addr, false);
1759 tcg_gen_qemu_ld_i32(arg[0].out, addr, dc->ring, MO_TEUL);
1760 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
1761 tcg_gen_mov_i32(cpu_exclusive_val, arg[0].out);
1762 tcg_temp_free(addr);
1763 }
1764
1765 static void translate_ldst(DisasContext *dc, const OpcodeArg arg[],
1766 const uint32_t par[])
1767 {
1768 TCGv_i32 addr = tcg_temp_new_i32();
1769
1770 tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
1771 if (par[0] & MO_SIZE) {
1772 gen_load_store_alignment(dc, par[0] & MO_SIZE, addr, par[1]);
1773 }
1774 if (par[2]) {
1775 if (par[1]) {
1776 tcg_gen_mb(TCG_BAR_STRL | TCG_MO_ALL);
1777 }
1778 tcg_gen_qemu_st_tl(arg[0].in, addr, dc->cring, par[0]);
1779 } else {
1780 tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->cring, par[0]);
1781 if (par[1]) {
1782 tcg_gen_mb(TCG_BAR_LDAQ | TCG_MO_ALL);
1783 }
1784 }
1785 tcg_temp_free(addr);
1786 }
1787
1788 static void translate_l32r(DisasContext *dc, const OpcodeArg arg[],
1789 const uint32_t par[])
1790 {
1791 TCGv_i32 tmp;
1792
1793 if (dc->base.tb->flags & XTENSA_TBFLAG_LITBASE) {
1794 tmp = tcg_const_i32(arg[1].raw_imm - 1);
1795 tcg_gen_add_i32(tmp, cpu_SR[LITBASE], tmp);
1796 } else {
1797 tmp = tcg_const_i32(arg[1].imm);
1798 }
1799 tcg_gen_qemu_ld32u(arg[0].out, tmp, dc->cring);
1800 tcg_temp_free(tmp);
1801 }
1802
1803 static void translate_loop(DisasContext *dc, const OpcodeArg arg[],
1804 const uint32_t par[])
1805 {
1806 uint32_t lend = arg[1].imm;
1807
1808 tcg_gen_subi_i32(cpu_SR[LCOUNT], arg[0].in, 1);
1809 tcg_gen_movi_i32(cpu_SR[LBEG], dc->base.pc_next);
1810 tcg_gen_movi_i32(cpu_SR[LEND], lend);
1811
1812 if (par[0] != TCG_COND_NEVER) {
1813 TCGLabel *label = gen_new_label();
1814 tcg_gen_brcondi_i32(par[0], arg[0].in, 0, label);
1815 gen_jumpi(dc, lend, 1);
1816 gen_set_label(label);
1817 }
1818
1819 gen_jumpi(dc, dc->base.pc_next, 0);
1820 }
1821
1822 enum {
1823 MAC16_UMUL,
1824 MAC16_MUL,
1825 MAC16_MULA,
1826 MAC16_MULS,
1827 MAC16_NONE,
1828 };
1829
1830 enum {
1831 MAC16_LL,
1832 MAC16_HL,
1833 MAC16_LH,
1834 MAC16_HH,
1835
1836 MAC16_HX = 0x1,
1837 MAC16_XH = 0x2,
1838 };
1839
1840 static void translate_mac16(DisasContext *dc, const OpcodeArg arg[],
1841 const uint32_t par[])
1842 {
1843 int op = par[0];
1844 unsigned half = par[1];
1845 uint32_t ld_offset = par[2];
1846 unsigned off = ld_offset ? 2 : 0;
1847 TCGv_i32 vaddr = tcg_temp_new_i32();
1848 TCGv_i32 mem32 = tcg_temp_new_i32();
1849
1850 if (ld_offset) {
1851 tcg_gen_addi_i32(vaddr, arg[1].in, ld_offset);
1852 gen_load_store_alignment(dc, 2, vaddr, false);
1853 tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring);
1854 }
1855 if (op != MAC16_NONE) {
1856 TCGv_i32 m1 = gen_mac16_m(arg[off].in,
1857 half & MAC16_HX, op == MAC16_UMUL);
1858 TCGv_i32 m2 = gen_mac16_m(arg[off + 1].in,
1859 half & MAC16_XH, op == MAC16_UMUL);
1860
1861 if (op == MAC16_MUL || op == MAC16_UMUL) {
1862 tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2);
1863 if (op == MAC16_UMUL) {
1864 tcg_gen_movi_i32(cpu_SR[ACCHI], 0);
1865 } else {
1866 tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31);
1867 }
1868 } else {
1869 TCGv_i32 lo = tcg_temp_new_i32();
1870 TCGv_i32 hi = tcg_temp_new_i32();
1871
1872 tcg_gen_mul_i32(lo, m1, m2);
1873 tcg_gen_sari_i32(hi, lo, 31);
1874 if (op == MAC16_MULA) {
1875 tcg_gen_add2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI],
1876 cpu_SR[ACCLO], cpu_SR[ACCHI],
1877 lo, hi);
1878 } else {
1879 tcg_gen_sub2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI],
1880 cpu_SR[ACCLO], cpu_SR[ACCHI],
1881 lo, hi);
1882 }
1883 tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]);
1884
1885 tcg_temp_free_i32(lo);
1886 tcg_temp_free_i32(hi);
1887 }
1888 tcg_temp_free(m1);
1889 tcg_temp_free(m2);
1890 }
1891 if (ld_offset) {
1892 tcg_gen_mov_i32(arg[1].out, vaddr);
1893 tcg_gen_mov_i32(cpu_SR[MR + arg[0].imm], mem32);
1894 }
1895 tcg_temp_free(vaddr);
1896 tcg_temp_free(mem32);
1897 }
1898
1899 static void translate_memw(DisasContext *dc, const OpcodeArg arg[],
1900 const uint32_t par[])
1901 {
1902 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1903 }
1904
1905 static void translate_smin(DisasContext *dc, const OpcodeArg arg[],
1906 const uint32_t par[])
1907 {
1908 tcg_gen_smin_i32(arg[0].out, arg[1].in, arg[2].in);
1909 }
1910
1911 static void translate_umin(DisasContext *dc, const OpcodeArg arg[],
1912 const uint32_t par[])
1913 {
1914 tcg_gen_umin_i32(arg[0].out, arg[1].in, arg[2].in);
1915 }
1916
1917 static void translate_smax(DisasContext *dc, const OpcodeArg arg[],
1918 const uint32_t par[])
1919 {
1920 tcg_gen_smax_i32(arg[0].out, arg[1].in, arg[2].in);
1921 }
1922
1923 static void translate_umax(DisasContext *dc, const OpcodeArg arg[],
1924 const uint32_t par[])
1925 {
1926 tcg_gen_umax_i32(arg[0].out, arg[1].in, arg[2].in);
1927 }
1928
1929 static void translate_mov(DisasContext *dc, const OpcodeArg arg[],
1930 const uint32_t par[])
1931 {
1932 tcg_gen_mov_i32(arg[0].out, arg[1].in);
1933 }
1934
1935 static void translate_movcond(DisasContext *dc, const OpcodeArg arg[],
1936 const uint32_t par[])
1937 {
1938 TCGv_i32 zero = tcg_const_i32(0);
1939
1940 tcg_gen_movcond_i32(par[0], arg[0].out,
1941 arg[2].in, zero, arg[1].in, arg[0].in);
1942 tcg_temp_free(zero);
1943 }
1944
1945 static void translate_movi(DisasContext *dc, const OpcodeArg arg[],
1946 const uint32_t par[])
1947 {
1948 tcg_gen_movi_i32(arg[0].out, arg[1].imm);
1949 }
1950
1951 static void translate_movp(DisasContext *dc, const OpcodeArg arg[],
1952 const uint32_t par[])
1953 {
1954 TCGv_i32 zero = tcg_const_i32(0);
1955 TCGv_i32 tmp = tcg_temp_new_i32();
1956
1957 tcg_gen_andi_i32(tmp, arg[2].in, 1 << arg[2].imm);
1958 tcg_gen_movcond_i32(par[0],
1959 arg[0].out, tmp, zero,
1960 arg[1].in, arg[0].in);
1961 tcg_temp_free(tmp);
1962 tcg_temp_free(zero);
1963 }
1964
1965 static void translate_movsp(DisasContext *dc, const OpcodeArg arg[],
1966 const uint32_t par[])
1967 {
1968 tcg_gen_mov_i32(arg[0].out, arg[1].in);
1969 }
1970
1971 static void translate_mul16(DisasContext *dc, const OpcodeArg arg[],
1972 const uint32_t par[])
1973 {
1974 TCGv_i32 v1 = tcg_temp_new_i32();
1975 TCGv_i32 v2 = tcg_temp_new_i32();
1976
1977 if (par[0]) {
1978 tcg_gen_ext16s_i32(v1, arg[1].in);
1979 tcg_gen_ext16s_i32(v2, arg[2].in);
1980 } else {
1981 tcg_gen_ext16u_i32(v1, arg[1].in);
1982 tcg_gen_ext16u_i32(v2, arg[2].in);
1983 }
1984 tcg_gen_mul_i32(arg[0].out, v1, v2);
1985 tcg_temp_free(v2);
1986 tcg_temp_free(v1);
1987 }
1988
1989 static void translate_mull(DisasContext *dc, const OpcodeArg arg[],
1990 const uint32_t par[])
1991 {
1992 tcg_gen_mul_i32(arg[0].out, arg[1].in, arg[2].in);
1993 }
1994
1995 static void translate_mulh(DisasContext *dc, const OpcodeArg arg[],
1996 const uint32_t par[])
1997 {
1998 TCGv_i32 lo = tcg_temp_new();
1999
2000 if (par[0]) {
2001 tcg_gen_muls2_i32(lo, arg[0].out, arg[1].in, arg[2].in);
2002 } else {
2003 tcg_gen_mulu2_i32(lo, arg[0].out, arg[1].in, arg[2].in);
2004 }
2005 tcg_temp_free(lo);
2006 }
2007
2008 static void translate_neg(DisasContext *dc, const OpcodeArg arg[],
2009 const uint32_t par[])
2010 {
2011 tcg_gen_neg_i32(arg[0].out, arg[1].in);
2012 }
2013
2014 static void translate_nop(DisasContext *dc, const OpcodeArg arg[],
2015 const uint32_t par[])
2016 {
2017 }
2018
2019 static void translate_nsa(DisasContext *dc, const OpcodeArg arg[],
2020 const uint32_t par[])
2021 {
2022 tcg_gen_clrsb_i32(arg[0].out, arg[1].in);
2023 }
2024
2025 static void translate_nsau(DisasContext *dc, const OpcodeArg arg[],
2026 const uint32_t par[])
2027 {
2028 tcg_gen_clzi_i32(arg[0].out, arg[1].in, 32);
2029 }
2030
2031 static void translate_or(DisasContext *dc, const OpcodeArg arg[],
2032 const uint32_t par[])
2033 {
2034 tcg_gen_or_i32(arg[0].out, arg[1].in, arg[2].in);
2035 }
2036
2037 static void translate_ptlb(DisasContext *dc, const OpcodeArg arg[],
2038 const uint32_t par[])
2039 {
2040 #ifndef CONFIG_USER_ONLY
2041 TCGv_i32 dtlb = tcg_const_i32(par[0]);
2042
2043 tcg_gen_movi_i32(cpu_pc, dc->pc);
2044 gen_helper_ptlb(arg[0].out, cpu_env, arg[1].in, dtlb);
2045 tcg_temp_free(dtlb);
2046 #endif
2047 }
2048
2049 static void translate_pptlb(DisasContext *dc, const OpcodeArg arg[],
2050 const uint32_t par[])
2051 {
2052 #ifndef CONFIG_USER_ONLY
2053 tcg_gen_movi_i32(cpu_pc, dc->pc);
2054 gen_helper_pptlb(arg[0].out, cpu_env, arg[1].in);
2055 #endif
2056 }
2057
2058 static void translate_quos(DisasContext *dc, const OpcodeArg arg[],
2059 const uint32_t par[])
2060 {
2061 TCGLabel *label1 = gen_new_label();
2062 TCGLabel *label2 = gen_new_label();
2063
2064 tcg_gen_brcondi_i32(TCG_COND_NE, arg[1].in, 0x80000000,
2065 label1);
2066 tcg_gen_brcondi_i32(TCG_COND_NE, arg[2].in, 0xffffffff,
2067 label1);
2068 tcg_gen_movi_i32(arg[0].out,
2069 par[0] ? 0x80000000 : 0);
2070 tcg_gen_br(label2);
2071 gen_set_label(label1);
2072 if (par[0]) {
2073 tcg_gen_div_i32(arg[0].out,
2074 arg[1].in, arg[2].in);
2075 } else {
2076 tcg_gen_rem_i32(arg[0].out,
2077 arg[1].in, arg[2].in);
2078 }
2079 gen_set_label(label2);
2080 }
2081
2082 static void translate_quou(DisasContext *dc, const OpcodeArg arg[],
2083 const uint32_t par[])
2084 {
2085 tcg_gen_divu_i32(arg[0].out,
2086 arg[1].in, arg[2].in);
2087 }
2088
2089 static void translate_read_impwire(DisasContext *dc, const OpcodeArg arg[],
2090 const uint32_t par[])
2091 {
2092 /* TODO: GPIO32 may be a part of coprocessor */
2093 tcg_gen_movi_i32(arg[0].out, 0);
2094 }
2095
2096 static void translate_remu(DisasContext *dc, const OpcodeArg arg[],
2097 const uint32_t par[])
2098 {
2099 tcg_gen_remu_i32(arg[0].out,
2100 arg[1].in, arg[2].in);
2101 }
2102
2103 static void translate_rer(DisasContext *dc, const OpcodeArg arg[],
2104 const uint32_t par[])
2105 {
2106 gen_helper_rer(arg[0].out, cpu_env, arg[1].in);
2107 }
2108
2109 static void translate_ret(DisasContext *dc, const OpcodeArg arg[],
2110 const uint32_t par[])
2111 {
2112 gen_jump(dc, cpu_R[0]);
2113 }
2114
2115 static bool test_ill_retw(DisasContext *dc, const OpcodeArg arg[],
2116 const uint32_t par[])
2117 {
2118 if (!dc->cwoe) {
2119 qemu_log_mask(LOG_GUEST_ERROR,
2120 "Illegal retw instruction(pc = %08x)\n", dc->pc);
2121 return true;
2122 } else {
2123 TCGv_i32 tmp = tcg_const_i32(dc->pc);
2124
2125 gen_helper_test_ill_retw(cpu_env, tmp);
2126 tcg_temp_free(tmp);
2127 return false;
2128 }
2129 }
2130
2131 static void translate_retw(DisasContext *dc, const OpcodeArg arg[],
2132 const uint32_t par[])
2133 {
2134 TCGv_i32 tmp = tcg_const_i32(1);
2135 tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]);
2136 tcg_gen_andc_i32(cpu_SR[WINDOW_START],
2137 cpu_SR[WINDOW_START], tmp);
2138 tcg_gen_movi_i32(tmp, dc->pc);
2139 tcg_gen_deposit_i32(tmp, tmp, cpu_R[0], 0, 30);
2140 gen_helper_retw(cpu_env, cpu_R[0]);
2141 gen_jump(dc, tmp);
2142 tcg_temp_free(tmp);
2143 }
2144
2145 static void translate_rfde(DisasContext *dc, const OpcodeArg arg[],
2146 const uint32_t par[])
2147 {
2148 gen_jump(dc, cpu_SR[dc->config->ndepc ? DEPC : EPC1]);
2149 }
2150
2151 static void translate_rfe(DisasContext *dc, const OpcodeArg arg[],
2152 const uint32_t par[])
2153 {
2154 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
2155 gen_jump(dc, cpu_SR[EPC1]);
2156 }
2157
2158 static void translate_rfi(DisasContext *dc, const OpcodeArg arg[],
2159 const uint32_t par[])
2160 {
2161 tcg_gen_mov_i32(cpu_SR[PS], cpu_SR[EPS2 + arg[0].imm - 2]);
2162 gen_jump(dc, cpu_SR[EPC1 + arg[0].imm - 1]);
2163 }
2164
2165 static void translate_rfw(DisasContext *dc, const OpcodeArg arg[],
2166 const uint32_t par[])
2167 {
2168 TCGv_i32 tmp = tcg_const_i32(1);
2169
2170 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
2171 tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]);
2172
2173 if (par[0]) {
2174 tcg_gen_andc_i32(cpu_SR[WINDOW_START],
2175 cpu_SR[WINDOW_START], tmp);
2176 } else {
2177 tcg_gen_or_i32(cpu_SR[WINDOW_START],
2178 cpu_SR[WINDOW_START], tmp);
2179 }
2180
2181 tcg_temp_free(tmp);
2182 gen_helper_restore_owb(cpu_env);
2183 gen_jump(dc, cpu_SR[EPC1]);
2184 }
2185
2186 static void translate_rotw(DisasContext *dc, const OpcodeArg arg[],
2187 const uint32_t par[])
2188 {
2189 tcg_gen_addi_i32(cpu_windowbase_next, cpu_SR[WINDOW_BASE], arg[0].imm);
2190 }
2191
2192 static void translate_rsil(DisasContext *dc, const OpcodeArg arg[],
2193 const uint32_t par[])
2194 {
2195 tcg_gen_mov_i32(arg[0].out, cpu_SR[PS]);
2196 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL);
2197 tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], arg[1].imm);
2198 }
2199
2200 static void translate_rsr(DisasContext *dc, const OpcodeArg arg[],
2201 const uint32_t par[])
2202 {
2203 tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
2204 }
2205
2206 static void translate_rsr_ccount(DisasContext *dc, const OpcodeArg arg[],
2207 const uint32_t par[])
2208 {
2209 #ifndef CONFIG_USER_ONLY
2210 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
2211 gen_io_start();
2212 }
2213 gen_helper_update_ccount(cpu_env);
2214 tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
2215 #endif
2216 }
2217
2218 static void translate_rsr_ptevaddr(DisasContext *dc, const OpcodeArg arg[],
2219 const uint32_t par[])
2220 {
2221 #ifndef CONFIG_USER_ONLY
2222 TCGv_i32 tmp = tcg_temp_new_i32();
2223
2224 tcg_gen_shri_i32(tmp, cpu_SR[EXCVADDR], 10);
2225 tcg_gen_or_i32(tmp, tmp, cpu_SR[PTEVADDR]);
2226 tcg_gen_andi_i32(arg[0].out, tmp, 0xfffffffc);
2227 tcg_temp_free(tmp);
2228 #endif
2229 }
2230
2231 static void translate_rtlb(DisasContext *dc, const OpcodeArg arg[],
2232 const uint32_t par[])
2233 {
2234 #ifndef CONFIG_USER_ONLY
2235 static void (* const helper[])(TCGv_i32 r, TCGv_env env, TCGv_i32 a1,
2236 TCGv_i32 a2) = {
2237 gen_helper_rtlb0,
2238 gen_helper_rtlb1,
2239 };
2240 TCGv_i32 dtlb = tcg_const_i32(par[0]);
2241
2242 helper[par[1]](arg[0].out, cpu_env, arg[1].in, dtlb);
2243 tcg_temp_free(dtlb);
2244 #endif
2245 }
2246
2247 static void translate_rptlb0(DisasContext *dc, const OpcodeArg arg[],
2248 const uint32_t par[])
2249 {
2250 #ifndef CONFIG_USER_ONLY
2251 gen_helper_rptlb0(arg[0].out, cpu_env, arg[1].in);
2252 #endif
2253 }
2254
2255 static void translate_rptlb1(DisasContext *dc, const OpcodeArg arg[],
2256 const uint32_t par[])
2257 {
2258 #ifndef CONFIG_USER_ONLY
2259 gen_helper_rptlb1(arg[0].out, cpu_env, arg[1].in);
2260 #endif
2261 }
2262
2263 static void translate_rur(DisasContext *dc, const OpcodeArg arg[],
2264 const uint32_t par[])
2265 {
2266 tcg_gen_mov_i32(arg[0].out, cpu_UR[par[0]]);
2267 }
2268
2269 static void translate_setb_expstate(DisasContext *dc, const OpcodeArg arg[],
2270 const uint32_t par[])
2271 {
2272 /* TODO: GPIO32 may be a part of coprocessor */
2273 tcg_gen_ori_i32(cpu_UR[EXPSTATE], cpu_UR[EXPSTATE], 1u << arg[0].imm);
2274 }
2275
2276 #ifdef CONFIG_USER_ONLY
2277 static void gen_check_atomctl(DisasContext *dc, TCGv_i32 addr)
2278 {
2279 }
2280 #else
2281 static void gen_check_atomctl(DisasContext *dc, TCGv_i32 addr)
2282 {
2283 TCGv_i32 tpc = tcg_const_i32(dc->pc);
2284
2285 gen_helper_check_atomctl(cpu_env, tpc, addr);
2286 tcg_temp_free(tpc);
2287 }
2288 #endif
2289
2290 static void translate_s32c1i(DisasContext *dc, const OpcodeArg arg[],
2291 const uint32_t par[])
2292 {
2293 TCGv_i32 tmp = tcg_temp_local_new_i32();
2294 TCGv_i32 addr = tcg_temp_local_new_i32();
2295
2296 tcg_gen_mov_i32(tmp, arg[0].in);
2297 tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
2298 gen_load_store_alignment(dc, 2, addr, true);
2299 gen_check_atomctl(dc, addr);
2300 tcg_gen_atomic_cmpxchg_i32(arg[0].out, addr, cpu_SR[SCOMPARE1],
2301 tmp, dc->cring, MO_TEUL);
2302 tcg_temp_free(addr);
2303 tcg_temp_free(tmp);
2304 }
2305
2306 static void translate_s32e(DisasContext *dc, const OpcodeArg arg[],
2307 const uint32_t par[])
2308 {
2309 TCGv_i32 addr = tcg_temp_new_i32();
2310
2311 tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
2312 gen_load_store_alignment(dc, 2, addr, false);
2313 tcg_gen_qemu_st_tl(arg[0].in, addr, dc->ring, MO_TEUL);
2314 tcg_temp_free(addr);
2315 }
2316
2317 static void translate_s32ex(DisasContext *dc, const OpcodeArg arg[],
2318 const uint32_t par[])
2319 {
2320 TCGv_i32 prev = tcg_temp_new_i32();
2321 TCGv_i32 addr = tcg_temp_local_new_i32();
2322 TCGv_i32 res = tcg_temp_local_new_i32();
2323 TCGLabel *label = gen_new_label();
2324
2325 tcg_gen_movi_i32(res, 0);
2326 tcg_gen_mov_i32(addr, arg[1].in);
2327 gen_load_store_alignment(dc, 2, addr, true);
2328 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, label);
2329 gen_check_exclusive(dc, addr, true);
2330 tcg_gen_atomic_cmpxchg_i32(prev, cpu_exclusive_addr, cpu_exclusive_val,
2331 arg[0].in, dc->cring, MO_TEUL);
2332 tcg_gen_setcond_i32(TCG_COND_EQ, res, prev, cpu_exclusive_val);
2333 tcg_gen_movcond_i32(TCG_COND_EQ, cpu_exclusive_val,
2334 prev, cpu_exclusive_val, prev, cpu_exclusive_val);
2335 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
2336 gen_set_label(label);
2337 tcg_gen_extract_i32(arg[0].out, cpu_SR[ATOMCTL], 8, 1);
2338 tcg_gen_deposit_i32(cpu_SR[ATOMCTL], cpu_SR[ATOMCTL], res, 8, 1);
2339 tcg_temp_free(prev);
2340 tcg_temp_free(addr);
2341 tcg_temp_free(res);
2342 }
2343
2344 static void translate_salt(DisasContext *dc, const OpcodeArg arg[],
2345 const uint32_t par[])
2346 {
2347 tcg_gen_setcond_i32(par[0],
2348 arg[0].out,
2349 arg[1].in, arg[2].in);
2350 }
2351
2352 static void translate_sext(DisasContext *dc, const OpcodeArg arg[],
2353 const uint32_t par[])
2354 {
2355 int shift = 31 - arg[2].imm;
2356
2357 if (shift == 24) {
2358 tcg_gen_ext8s_i32(arg[0].out, arg[1].in);
2359 } else if (shift == 16) {
2360 tcg_gen_ext16s_i32(arg[0].out, arg[1].in);
2361 } else {
2362 TCGv_i32 tmp = tcg_temp_new_i32();
2363 tcg_gen_shli_i32(tmp, arg[1].in, shift);
2364 tcg_gen_sari_i32(arg[0].out, tmp, shift);
2365 tcg_temp_free(tmp);
2366 }
2367 }
2368
2369 static bool test_ill_simcall(DisasContext *dc, const OpcodeArg arg[],
2370 const uint32_t par[])
2371 {
2372 #ifdef CONFIG_USER_ONLY
2373 bool ill = true;
2374 #else
2375 bool ill = !semihosting_enabled();
2376 #endif
2377 if (ill) {
2378 qemu_log_mask(LOG_GUEST_ERROR, "SIMCALL but semihosting is disabled\n");
2379 }
2380 return ill;
2381 }
2382
2383 static void translate_simcall(DisasContext *dc, const OpcodeArg arg[],
2384 const uint32_t par[])
2385 {
2386 #ifndef CONFIG_USER_ONLY
2387 gen_helper_simcall(cpu_env);
2388 #endif
2389 }
2390
2391 /*
2392 * Note: 64 bit ops are used here solely because SAR values
2393 * have range 0..63
2394 */
2395 #define gen_shift_reg(cmd, reg) do { \
2396 TCGv_i64 tmp = tcg_temp_new_i64(); \
2397 tcg_gen_extu_i32_i64(tmp, reg); \
2398 tcg_gen_##cmd##_i64(v, v, tmp); \
2399 tcg_gen_extrl_i64_i32(arg[0].out, v); \
2400 tcg_temp_free_i64(v); \
2401 tcg_temp_free_i64(tmp); \
2402 } while (0)
2403
2404 #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
2405
2406 static void translate_sll(DisasContext *dc, const OpcodeArg arg[],
2407 const uint32_t par[])
2408 {
2409 if (dc->sar_m32_5bit) {
2410 tcg_gen_shl_i32(arg[0].out, arg[1].in, dc->sar_m32);
2411 } else {
2412 TCGv_i64 v = tcg_temp_new_i64();
2413 TCGv_i32 s = tcg_const_i32(32);
2414 tcg_gen_sub_i32(s, s, cpu_SR[SAR]);
2415 tcg_gen_andi_i32(s, s, 0x3f);
2416 tcg_gen_extu_i32_i64(v, arg[1].in);
2417 gen_shift_reg(shl, s);
2418 tcg_temp_free(s);
2419 }
2420 }
2421
2422 static void translate_slli(DisasContext *dc, const OpcodeArg arg[],
2423 const uint32_t par[])
2424 {
2425 if (arg[2].imm == 32) {
2426 qemu_log_mask(LOG_GUEST_ERROR, "slli a%d, a%d, 32 is undefined\n",
2427 arg[0].imm, arg[1].imm);
2428 }
2429 tcg_gen_shli_i32(arg[0].out, arg[1].in, arg[2].imm & 0x1f);
2430 }
2431
2432 static void translate_sra(DisasContext *dc, const OpcodeArg arg[],
2433 const uint32_t par[])
2434 {
2435 if (dc->sar_m32_5bit) {
2436 tcg_gen_sar_i32(arg[0].out, arg[1].in, cpu_SR[SAR]);
2437 } else {
2438 TCGv_i64 v = tcg_temp_new_i64();
2439 tcg_gen_ext_i32_i64(v, arg[1].in);
2440 gen_shift(sar);
2441 }
2442 }
2443
2444 static void translate_srai(DisasContext *dc, const OpcodeArg arg[],
2445 const uint32_t par[])
2446 {
2447 tcg_gen_sari_i32(arg[0].out, arg[1].in, arg[2].imm);
2448 }
2449
2450 static void translate_src(DisasContext *dc, const OpcodeArg arg[],
2451 const uint32_t par[])
2452 {
2453 TCGv_i64 v = tcg_temp_new_i64();
2454 tcg_gen_concat_i32_i64(v, arg[2].in, arg[1].in);
2455 gen_shift(shr);
2456 }
2457
2458 static void translate_srl(DisasContext *dc, const OpcodeArg arg[],
2459 const uint32_t par[])
2460 {
2461 if (dc->sar_m32_5bit) {
2462 tcg_gen_shr_i32(arg[0].out, arg[1].in, cpu_SR[SAR]);
2463 } else {
2464 TCGv_i64 v = tcg_temp_new_i64();
2465 tcg_gen_extu_i32_i64(v, arg[1].in);
2466 gen_shift(shr);
2467 }
2468 }
2469
2470 #undef gen_shift
2471 #undef gen_shift_reg
2472
2473 static void translate_srli(DisasContext *dc, const OpcodeArg arg[],
2474 const uint32_t par[])
2475 {
2476 tcg_gen_shri_i32(arg[0].out, arg[1].in, arg[2].imm);
2477 }
2478
2479 static void translate_ssa8b(DisasContext *dc, const OpcodeArg arg[],
2480 const uint32_t par[])
2481 {
2482 TCGv_i32 tmp = tcg_temp_new_i32();
2483 tcg_gen_shli_i32(tmp, arg[0].in, 3);
2484 gen_left_shift_sar(dc, tmp);
2485 tcg_temp_free(tmp);
2486 }
2487
2488 static void translate_ssa8l(DisasContext *dc, const OpcodeArg arg[],
2489 const uint32_t par[])
2490 {
2491 TCGv_i32 tmp = tcg_temp_new_i32();
2492 tcg_gen_shli_i32(tmp, arg[0].in, 3);
2493 gen_right_shift_sar(dc, tmp);
2494 tcg_temp_free(tmp);
2495 }
2496
2497 static void translate_ssai(DisasContext *dc, const OpcodeArg arg[],
2498 const uint32_t par[])
2499 {
2500 TCGv_i32 tmp = tcg_const_i32(arg[0].imm);
2501 gen_right_shift_sar(dc, tmp);
2502 tcg_temp_free(tmp);
2503 }
2504
2505 static void translate_ssl(DisasContext *dc, const OpcodeArg arg[],
2506 const uint32_t par[])
2507 {
2508 gen_left_shift_sar(dc, arg[0].in);
2509 }
2510
2511 static void translate_ssr(DisasContext *dc, const OpcodeArg arg[],
2512 const uint32_t par[])
2513 {
2514 gen_right_shift_sar(dc, arg[0].in);
2515 }
2516
2517 static void translate_sub(DisasContext *dc, const OpcodeArg arg[],
2518 const uint32_t par[])
2519 {
2520 tcg_gen_sub_i32(arg[0].out, arg[1].in, arg[2].in);
2521 }
2522
2523 static void translate_subx(DisasContext *dc, const OpcodeArg arg[],
2524 const uint32_t par[])
2525 {
2526 TCGv_i32 tmp = tcg_temp_new_i32();
2527 tcg_gen_shli_i32(tmp, arg[1].in, par[0]);
2528 tcg_gen_sub_i32(arg[0].out, tmp, arg[2].in);
2529 tcg_temp_free(tmp);
2530 }
2531
2532 static void translate_waiti(DisasContext *dc, const OpcodeArg arg[],
2533 const uint32_t par[])
2534 {
2535 #ifndef CONFIG_USER_ONLY
2536 gen_waiti(dc, arg[0].imm);
2537 #endif
2538 }
2539
2540 static void translate_wtlb(DisasContext *dc, const OpcodeArg arg[],
2541 const uint32_t par[])
2542 {
2543 #ifndef CONFIG_USER_ONLY
2544 TCGv_i32 dtlb = tcg_const_i32(par[0]);
2545
2546 gen_helper_wtlb(cpu_env, arg[0].in, arg[1].in, dtlb);
2547 tcg_temp_free(dtlb);
2548 #endif
2549 }
2550
2551 static void translate_wptlb(DisasContext *dc, const OpcodeArg arg[],
2552 const uint32_t par[])
2553 {
2554 #ifndef CONFIG_USER_ONLY
2555 gen_helper_wptlb(cpu_env, arg[0].in, arg[1].in);
2556 #endif
2557 }
2558
2559 static void translate_wer(DisasContext *dc, const OpcodeArg arg[],
2560 const uint32_t par[])
2561 {
2562 gen_helper_wer(cpu_env, arg[0].in, arg[1].in);
2563 }
2564
2565 static void translate_wrmsk_expstate(DisasContext *dc, const OpcodeArg arg[],
2566 const uint32_t par[])
2567 {
2568 /* TODO: GPIO32 may be a part of coprocessor */
2569 tcg_gen_and_i32(cpu_UR[EXPSTATE], arg[0].in, arg[1].in);
2570 }
2571
2572 static void translate_wsr(DisasContext *dc, const OpcodeArg arg[],
2573 const uint32_t par[])
2574 {
2575 tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in);
2576 }
2577
2578 static void translate_wsr_mask(DisasContext *dc, const OpcodeArg arg[],
2579 const uint32_t par[])
2580 {
2581 tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in, par[2]);
2582 }
2583
2584 static void translate_wsr_acchi(DisasContext *dc, const OpcodeArg arg[],
2585 const uint32_t par[])
2586 {
2587 tcg_gen_ext8s_i32(cpu_SR[par[0]], arg[0].in);
2588 }
2589
2590 static void translate_wsr_ccompare(DisasContext *dc, const OpcodeArg arg[],
2591 const uint32_t par[])
2592 {
2593 #ifndef CONFIG_USER_ONLY
2594 uint32_t id = par[0] - CCOMPARE;
2595 TCGv_i32 tmp = tcg_const_i32(id);
2596
2597 assert(id < dc->config->nccompare);
2598 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
2599 gen_io_start();
2600 }
2601 tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in);
2602 gen_helper_update_ccompare(cpu_env, tmp);
2603 tcg_temp_free(tmp);
2604 #endif
2605 }
2606
2607 static void translate_wsr_ccount(DisasContext *dc, const OpcodeArg arg[],
2608 const uint32_t par[])
2609 {
2610 #ifndef CONFIG_USER_ONLY
2611 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
2612 gen_io_start();
2613 }
2614 gen_helper_wsr_ccount(cpu_env, arg[0].in);
2615 #endif
2616 }
2617
2618 static void translate_wsr_dbreaka(DisasContext *dc, const OpcodeArg arg[],
2619 const uint32_t par[])
2620 {
2621 #ifndef CONFIG_USER_ONLY
2622 unsigned id = par[0] - DBREAKA;
2623 TCGv_i32 tmp = tcg_const_i32(id);
2624
2625 assert(id < dc->config->ndbreak);
2626 gen_helper_wsr_dbreaka(cpu_env, tmp, arg[0].in);
2627 tcg_temp_free(tmp);
2628 #endif
2629 }
2630
2631 static void translate_wsr_dbreakc(DisasContext *dc, const OpcodeArg arg[],
2632 const uint32_t par[])
2633 {
2634 #ifndef CONFIG_USER_ONLY
2635 unsigned id = par[0] - DBREAKC;
2636 TCGv_i32 tmp = tcg_const_i32(id);
2637
2638 assert(id < dc->config->ndbreak);
2639 gen_helper_wsr_dbreakc(cpu_env, tmp, arg[0].in);
2640 tcg_temp_free(tmp);
2641 #endif
2642 }
2643
2644 static void translate_wsr_ibreaka(DisasContext *dc, const OpcodeArg arg[],
2645 const uint32_t par[])
2646 {
2647 #ifndef CONFIG_USER_ONLY
2648 unsigned id = par[0] - IBREAKA;
2649 TCGv_i32 tmp = tcg_const_i32(id);
2650
2651 assert(id < dc->config->nibreak);
2652 gen_helper_wsr_ibreaka(cpu_env, tmp, arg[0].in);
2653 tcg_temp_free(tmp);
2654 #endif
2655 }
2656
2657 static void translate_wsr_ibreakenable(DisasContext *dc, const OpcodeArg arg[],
2658 const uint32_t par[])
2659 {
2660 #ifndef CONFIG_USER_ONLY
2661 gen_helper_wsr_ibreakenable(cpu_env, arg[0].in);
2662 #endif
2663 }
2664
2665 static void translate_wsr_icount(DisasContext *dc, const OpcodeArg arg[],
2666 const uint32_t par[])
2667 {
2668 #ifndef CONFIG_USER_ONLY
2669 if (dc->icount) {
2670 tcg_gen_mov_i32(dc->next_icount, arg[0].in);
2671 } else {
2672 tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in);
2673 }
2674 #endif
2675 }
2676
2677 static void translate_wsr_intclear(DisasContext *dc, const OpcodeArg arg[],
2678 const uint32_t par[])
2679 {
2680 #ifndef CONFIG_USER_ONLY
2681 gen_helper_intclear(cpu_env, arg[0].in);
2682 #endif
2683 }
2684
2685 static void translate_wsr_intset(DisasContext *dc, const OpcodeArg arg[],
2686 const uint32_t par[])
2687 {
2688 #ifndef CONFIG_USER_ONLY
2689 gen_helper_intset(cpu_env, arg[0].in);
2690 #endif
2691 }
2692
2693 static void translate_wsr_memctl(DisasContext *dc, const OpcodeArg arg[],
2694 const uint32_t par[])
2695 {
2696 #ifndef CONFIG_USER_ONLY
2697 gen_helper_wsr_memctl(cpu_env, arg[0].in);
2698 #endif
2699 }
2700
2701 static void translate_wsr_mpuenb(DisasContext *dc, const OpcodeArg arg[],
2702 const uint32_t par[])
2703 {
2704 #ifndef CONFIG_USER_ONLY
2705 gen_helper_wsr_mpuenb(cpu_env, arg[0].in);
2706 #endif
2707 }
2708
2709 static void translate_wsr_ps(DisasContext *dc, const OpcodeArg arg[],
2710 const uint32_t par[])
2711 {
2712 #ifndef CONFIG_USER_ONLY
2713 uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB |
2714 PS_UM | PS_EXCM | PS_INTLEVEL;
2715
2716 if (option_enabled(dc, XTENSA_OPTION_MMU) ||
2717 option_enabled(dc, XTENSA_OPTION_MPU)) {
2718 mask |= PS_RING;
2719 }
2720 tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in, mask);
2721 #endif
2722 }
2723
2724 static void translate_wsr_rasid(DisasContext *dc, const OpcodeArg arg[],
2725 const uint32_t par[])
2726 {
2727 #ifndef CONFIG_USER_ONLY
2728 gen_helper_wsr_rasid(cpu_env, arg[0].in);
2729 #endif
2730 }
2731
2732 static void translate_wsr_sar(DisasContext *dc, const OpcodeArg arg[],
2733 const uint32_t par[])
2734 {
2735 tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in, 0x3f);
2736 if (dc->sar_m32_5bit) {
2737 tcg_gen_discard_i32(dc->sar_m32);
2738 }
2739 dc->sar_5bit = false;
2740 dc->sar_m32_5bit = false;
2741 }
2742
2743 static void translate_wsr_windowbase(DisasContext *dc, const OpcodeArg arg[],
2744 const uint32_t par[])
2745 {
2746 #ifndef CONFIG_USER_ONLY
2747 tcg_gen_mov_i32(cpu_windowbase_next, arg[0].in);
2748 #endif
2749 }
2750
2751 static void translate_wsr_windowstart(DisasContext *dc, const OpcodeArg arg[],
2752 const uint32_t par[])
2753 {
2754 #ifndef CONFIG_USER_ONLY
2755 tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in,
2756 (1 << dc->config->nareg / 4) - 1);
2757 #endif
2758 }
2759
2760 static void translate_wur(DisasContext *dc, const OpcodeArg arg[],
2761 const uint32_t par[])
2762 {
2763 tcg_gen_mov_i32(cpu_UR[par[0]], arg[0].in);
2764 }
2765
2766 static void translate_wur_fcr(DisasContext *dc, const OpcodeArg arg[],
2767 const uint32_t par[])
2768 {
2769 gen_helper_wur_fcr(cpu_env, arg[0].in);
2770 }
2771
2772 static void translate_wur_fsr(DisasContext *dc, const OpcodeArg arg[],
2773 const uint32_t par[])
2774 {
2775 tcg_gen_andi_i32(cpu_UR[par[0]], arg[0].in, 0xffffff80);
2776 }
2777
2778 static void translate_xor(DisasContext *dc, const OpcodeArg arg[],
2779 const uint32_t par[])
2780 {
2781 tcg_gen_xor_i32(arg[0].out, arg[1].in, arg[2].in);
2782 }
2783
2784 static void translate_xsr(DisasContext *dc, const OpcodeArg arg[],
2785 const uint32_t par[])
2786 {
2787 TCGv_i32 tmp = tcg_temp_new_i32();
2788
2789 tcg_gen_mov_i32(tmp, arg[0].in);
2790 tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
2791 tcg_gen_mov_i32(cpu_SR[par[0]], tmp);
2792 tcg_temp_free(tmp);
2793 }
2794
2795 static void translate_xsr_mask(DisasContext *dc, const OpcodeArg arg[],
2796 const uint32_t par[])
2797 {
2798 TCGv_i32 tmp = tcg_temp_new_i32();
2799
2800 tcg_gen_mov_i32(tmp, arg[0].in);
2801 tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
2802 tcg_gen_andi_i32(cpu_SR[par[0]], tmp, par[2]);
2803 tcg_temp_free(tmp);
2804 }
2805
2806 static void translate_xsr_ccount(DisasContext *dc, const OpcodeArg arg[],
2807 const uint32_t par[])
2808 {
2809 #ifndef CONFIG_USER_ONLY
2810 TCGv_i32 tmp = tcg_temp_new_i32();
2811
2812 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
2813 gen_io_start();
2814 }
2815
2816 gen_helper_update_ccount(cpu_env);
2817 tcg_gen_mov_i32(tmp, cpu_SR[par[0]]);
2818 gen_helper_wsr_ccount(cpu_env, arg[0].in);
2819 tcg_gen_mov_i32(arg[0].out, tmp);
2820 tcg_temp_free(tmp);
2821
2822 #endif
2823 }
2824
2825 #define gen_translate_xsr(name) \
2826 static void translate_xsr_##name(DisasContext *dc, const OpcodeArg arg[], \
2827 const uint32_t par[]) \
2828 { \
2829 TCGv_i32 tmp = tcg_temp_new_i32(); \
2830 \
2831 tcg_gen_mov_i32(tmp, cpu_SR[par[0]]); \
2832 translate_wsr_##name(dc, arg, par); \
2833 tcg_gen_mov_i32(arg[0].out, tmp); \
2834 tcg_temp_free(tmp); \
2835 }
2836
2837 gen_translate_xsr(acchi)
2838 gen_translate_xsr(ccompare)
2839 gen_translate_xsr(dbreaka)
2840 gen_translate_xsr(dbreakc)
2841 gen_translate_xsr(ibreaka)
2842 gen_translate_xsr(ibreakenable)
2843 gen_translate_xsr(icount)
2844 gen_translate_xsr(memctl)
2845 gen_translate_xsr(mpuenb)
2846 gen_translate_xsr(ps)
2847 gen_translate_xsr(rasid)
2848 gen_translate_xsr(sar)
2849 gen_translate_xsr(windowbase)
2850 gen_translate_xsr(windowstart)
2851
2852 #undef gen_translate_xsr
2853
2854 static const XtensaOpcodeOps core_ops[] = {
2855 {
2856 .name = "abs",
2857 .translate = translate_abs,
2858 }, {
2859 .name = (const char * const[]) {
2860 "add", "add.n", NULL,
2861 },
2862 .translate = translate_add,
2863 .op_flags = XTENSA_OP_NAME_ARRAY,
2864 }, {
2865 .name = (const char * const[]) {
2866 "addi", "addi.n", NULL,
2867 },
2868 .translate = translate_addi,
2869 .op_flags = XTENSA_OP_NAME_ARRAY,
2870 }, {
2871 .name = "addmi",
2872 .translate = translate_addi,
2873 }, {
2874 .name = "addx2",
2875 .translate = translate_addx,
2876 .par = (const uint32_t[]){1},
2877 }, {
2878 .name = "addx4",
2879 .translate = translate_addx,
2880 .par = (const uint32_t[]){2},
2881 }, {
2882 .name = "addx8",
2883 .translate = translate_addx,
2884 .par = (const uint32_t[]){3},
2885 }, {
2886 .name = "all4",
2887 .translate = translate_all,
2888 .par = (const uint32_t[]){true, 4},
2889 }, {
2890 .name = "all8",
2891 .translate = translate_all,
2892 .par = (const uint32_t[]){true, 8},
2893 }, {
2894 .name = "and",
2895 .translate = translate_and,
2896 }, {
2897 .name = "andb",
2898 .translate = translate_boolean,
2899 .par = (const uint32_t[]){BOOLEAN_AND},
2900 }, {
2901 .name = "andbc",
2902 .translate = translate_boolean,
2903 .par = (const uint32_t[]){BOOLEAN_ANDC},
2904 }, {
2905 .name = "any4",
2906 .translate = translate_all,
2907 .par = (const uint32_t[]){false, 4},
2908 }, {
2909 .name = "any8",
2910 .translate = translate_all,
2911 .par = (const uint32_t[]){false, 8},
2912 }, {
2913 .name = (const char * const[]) {
2914 "ball", "ball.w15", "ball.w18", NULL,
2915 },
2916 .translate = translate_ball,
2917 .par = (const uint32_t[]){TCG_COND_EQ},
2918 .op_flags = XTENSA_OP_NAME_ARRAY,
2919 }, {
2920 .name = (const char * const[]) {
2921 "bany", "bany.w15", "bany.w18", NULL,
2922 },
2923 .translate = translate_bany,
2924 .par = (const uint32_t[]){TCG_COND_NE},
2925 .op_flags = XTENSA_OP_NAME_ARRAY,
2926 }, {
2927 .name = (const char * const[]) {
2928 "bbc", "bbc.w15", "bbc.w18", NULL,
2929 },
2930 .translate = translate_bb,
2931 .par = (const uint32_t[]){TCG_COND_EQ},
2932 .op_flags = XTENSA_OP_NAME_ARRAY,
2933 }, {
2934 .name = (const char * const[]) {
2935 "bbci", "bbci.w15", "bbci.w18", NULL,
2936 },
2937 .translate = translate_bbi,
2938 .par = (const uint32_t[]){TCG_COND_EQ},
2939 .op_flags = XTENSA_OP_NAME_ARRAY,
2940 }, {
2941 .name = (const char * const[]) {
2942 "bbs", "bbs.w15", "bbs.w18", NULL,
2943 },
2944 .translate = translate_bb,
2945 .par = (const uint32_t[]){TCG_COND_NE},
2946 .op_flags = XTENSA_OP_NAME_ARRAY,
2947 }, {
2948 .name = (const char * const[]) {
2949 "bbsi", "bbsi.w15", "bbsi.w18", NULL,
2950 },
2951 .translate = translate_bbi,
2952 .par = (const uint32_t[]){TCG_COND_NE},
2953 .op_flags = XTENSA_OP_NAME_ARRAY,
2954 }, {
2955 .name = (const char * const[]) {
2956 "beq", "beq.w15", "beq.w18", NULL,
2957 },
2958 .translate = translate_b,
2959 .par = (const uint32_t[]){TCG_COND_EQ},
2960 .op_flags = XTENSA_OP_NAME_ARRAY,
2961 }, {
2962 .name = (const char * const[]) {
2963 "beqi", "beqi.w15", "beqi.w18", NULL,
2964 },
2965 .translate = translate_bi,
2966 .par = (const uint32_t[]){TCG_COND_EQ},
2967 .op_flags = XTENSA_OP_NAME_ARRAY,
2968 }, {
2969 .name = (const char * const[]) {
2970 "beqz", "beqz.n", "beqz.w15", "beqz.w18", NULL,
2971 },
2972 .translate = translate_bz,
2973 .par = (const uint32_t[]){TCG_COND_EQ},
2974 .op_flags = XTENSA_OP_NAME_ARRAY,
2975 }, {
2976 .name = "bf",
2977 .translate = translate_bp,
2978 .par = (const uint32_t[]){TCG_COND_EQ},
2979 }, {
2980 .name = (const char * const[]) {
2981 "bge", "bge.w15", "bge.w18", NULL,
2982 },
2983 .translate = translate_b,
2984 .par = (const uint32_t[]){TCG_COND_GE},
2985 .op_flags = XTENSA_OP_NAME_ARRAY,
2986 }, {
2987 .name = (const char * const[]) {
2988 "bgei", "bgei.w15", "bgei.w18", NULL,
2989 },
2990 .translate = translate_bi,
2991 .par = (const uint32_t[]){TCG_COND_GE},
2992 .op_flags = XTENSA_OP_NAME_ARRAY,
2993 }, {
2994 .name = (const char * const[]) {
2995 "bgeu", "bgeu.w15", "bgeu.w18", NULL,
2996 },
2997 .translate = translate_b,
2998 .par = (const uint32_t[]){TCG_COND_GEU},
2999 .op_flags = XTENSA_OP_NAME_ARRAY,
3000 }, {
3001 .name = (const char * const[]) {
3002 "bgeui", "bgeui.w15", "bgeui.w18", NULL,
3003 },
3004 .translate = translate_bi,
3005 .par = (const uint32_t[]){TCG_COND_GEU},
3006 .op_flags = XTENSA_OP_NAME_ARRAY,
3007 }, {
3008 .name = (const char * const[]) {
3009 "bgez", "bgez.w15", "bgez.w18", NULL,
3010 },
3011 .translate = translate_bz,
3012 .par = (const uint32_t[]){TCG_COND_GE},
3013 .op_flags = XTENSA_OP_NAME_ARRAY,
3014 }, {
3015 .name = (const char * const[]) {
3016 "blt", "blt.w15", "blt.w18", NULL,
3017 },
3018 .translate = translate_b,
3019 .par = (const uint32_t[]){TCG_COND_LT},
3020 .op_flags = XTENSA_OP_NAME_ARRAY,
3021 }, {
3022 .name = (const char * const[]) {
3023 "blti", "blti.w15", "blti.w18", NULL,
3024 },
3025 .translate = translate_bi,
3026 .par = (const uint32_t[]){TCG_COND_LT},
3027 .op_flags = XTENSA_OP_NAME_ARRAY,
3028 }, {
3029 .name = (const char * const[]) {
3030 "bltu", "bltu.w15", "bltu.w18", NULL,
3031 },
3032 .translate = translate_b,
3033 .par = (const uint32_t[]){TCG_COND_LTU},
3034 .op_flags = XTENSA_OP_NAME_ARRAY,
3035 }, {
3036 .name = (const char * const[]) {
3037 "bltui", "bltui.w15", "bltui.w18", NULL,
3038 },
3039 .translate = translate_bi,
3040 .par = (const uint32_t[]){TCG_COND_LTU},
3041 .op_flags = XTENSA_OP_NAME_ARRAY,
3042 }, {
3043 .name = (const char * const[]) {
3044 "bltz", "bltz.w15", "bltz.w18", NULL,
3045 },
3046 .translate = translate_bz,
3047 .par = (const uint32_t[]){TCG_COND_LT},
3048 .op_flags = XTENSA_OP_NAME_ARRAY,
3049 }, {
3050 .name = (const char * const[]) {
3051 "bnall", "bnall.w15", "bnall.w18", NULL,
3052 },
3053 .translate = translate_ball,
3054 .par = (const uint32_t[]){TCG_COND_NE},
3055 .op_flags = XTENSA_OP_NAME_ARRAY,
3056 }, {
3057 .name = (const char * const[]) {
3058 "bne", "bne.w15", "bne.w18", NULL,
3059 },
3060 .translate = translate_b,
3061 .par = (const uint32_t[]){TCG_COND_NE},
3062 .op_flags = XTENSA_OP_NAME_ARRAY,
3063 }, {
3064 .name = (const char * const[]) {
3065 "bnei", "bnei.w15", "bnei.w18", NULL,
3066 },
3067 .translate = translate_bi,
3068 .par = (const uint32_t[]){TCG_COND_NE},
3069 .op_flags = XTENSA_OP_NAME_ARRAY,
3070 }, {
3071 .name = (const char * const[]) {
3072 "bnez", "bnez.n", "bnez.w15", "bnez.w18", NULL,
3073 },
3074 .translate = translate_bz,
3075 .par = (const uint32_t[]){TCG_COND_NE},
3076 .op_flags = XTENSA_OP_NAME_ARRAY,
3077 }, {
3078 .name = (const char * const[]) {
3079 "bnone", "bnone.w15", "bnone.w18", NULL,
3080 },
3081 .translate = translate_bany,
3082 .par = (const uint32_t[]){TCG_COND_EQ},
3083 .op_flags = XTENSA_OP_NAME_ARRAY,
3084 }, {
3085 .name = "break",
3086 .translate = translate_nop,
3087 .par = (const uint32_t[]){DEBUGCAUSE_BI},
3088 .op_flags = XTENSA_OP_DEBUG_BREAK,
3089 }, {
3090 .name = "break.n",
3091 .translate = translate_nop,
3092 .par = (const uint32_t[]){DEBUGCAUSE_BN},
3093 .op_flags = XTENSA_OP_DEBUG_BREAK,
3094 }, {
3095 .name = "bt",
3096 .translate = translate_bp,
3097 .par = (const uint32_t[]){TCG_COND_NE},
3098 }, {
3099 .name = "call0",
3100 .translate = translate_call0,
3101 }, {
3102 .name = "call12",
3103 .translate = translate_callw,
3104 .par = (const uint32_t[]){3},
3105 }, {
3106 .name = "call4",
3107 .translate = translate_callw,
3108 .par = (const uint32_t[]){1},
3109 }, {
3110 .name = "call8",
3111 .translate = translate_callw,
3112 .par = (const uint32_t[]){2},
3113 }, {
3114 .name = "callx0",
3115 .translate = tran