tmp105: Correct handling of temperature limit checks
[qemu.git] / target / xtensa / translate.c
1 /*
2 * Xtensa ISA:
3 * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
4 *
5 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of the Open Source and Linux Lab nor the
16 * names of its contributors may be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include "qemu/osdep.h"
32
33 #include "cpu.h"
34 #include "exec/exec-all.h"
35 #include "disas/disas.h"
36 #include "tcg/tcg-op.h"
37 #include "qemu/log.h"
38 #include "qemu/qemu-print.h"
39 #include "exec/cpu_ldst.h"
40 #include "hw/semihosting/semihost.h"
41 #include "exec/translator.h"
42
43 #include "exec/helper-proto.h"
44 #include "exec/helper-gen.h"
45
46 #include "trace-tcg.h"
47 #include "exec/log.h"
48
49
50 struct DisasContext {
51 DisasContextBase base;
52 const XtensaConfig *config;
53 uint32_t pc;
54 int cring;
55 int ring;
56 uint32_t lbeg_off;
57 uint32_t lend;
58
59 bool sar_5bit;
60 bool sar_m32_5bit;
61 bool sar_m32_allocated;
62 TCGv_i32 sar_m32;
63
64 unsigned window;
65 unsigned callinc;
66 bool cwoe;
67
68 bool debug;
69 bool icount;
70 TCGv_i32 next_icount;
71
72 unsigned cpenable;
73
74 uint32_t op_flags;
75 xtensa_insnbuf_word insnbuf[MAX_INSNBUF_LENGTH];
76 xtensa_insnbuf_word slotbuf[MAX_INSNBUF_LENGTH];
77 };
78
79 static TCGv_i32 cpu_pc;
80 static TCGv_i32 cpu_R[16];
81 static TCGv_i32 cpu_FR[16];
82 static TCGv_i64 cpu_FRD[16];
83 static TCGv_i32 cpu_MR[4];
84 static TCGv_i32 cpu_BR[16];
85 static TCGv_i32 cpu_BR4[4];
86 static TCGv_i32 cpu_BR8[2];
87 static TCGv_i32 cpu_SR[256];
88 static TCGv_i32 cpu_UR[256];
89 static TCGv_i32 cpu_windowbase_next;
90 static TCGv_i32 cpu_exclusive_addr;
91 static TCGv_i32 cpu_exclusive_val;
92
93 static GHashTable *xtensa_regfile_table;
94
95 #include "exec/gen-icount.h"
96
97 static char *sr_name[256];
98 static char *ur_name[256];
99
100 void xtensa_collect_sr_names(const XtensaConfig *config)
101 {
102 xtensa_isa isa = config->isa;
103 int n = xtensa_isa_num_sysregs(isa);
104 int i;
105
106 for (i = 0; i < n; ++i) {
107 int sr = xtensa_sysreg_number(isa, i);
108
109 if (sr >= 0 && sr < 256) {
110 const char *name = xtensa_sysreg_name(isa, i);
111 char **pname =
112 (xtensa_sysreg_is_user(isa, i) ? ur_name : sr_name) + sr;
113
114 if (*pname) {
115 if (strstr(*pname, name) == NULL) {
116 char *new_name =
117 malloc(strlen(*pname) + strlen(name) + 2);
118
119 strcpy(new_name, *pname);
120 strcat(new_name, "/");
121 strcat(new_name, name);
122 free(*pname);
123 *pname = new_name;
124 }
125 } else {
126 *pname = strdup(name);
127 }
128 }
129 }
130 }
131
132 void xtensa_translate_init(void)
133 {
134 static const char * const regnames[] = {
135 "ar0", "ar1", "ar2", "ar3",
136 "ar4", "ar5", "ar6", "ar7",
137 "ar8", "ar9", "ar10", "ar11",
138 "ar12", "ar13", "ar14", "ar15",
139 };
140 static const char * const fregnames[] = {
141 "f0", "f1", "f2", "f3",
142 "f4", "f5", "f6", "f7",
143 "f8", "f9", "f10", "f11",
144 "f12", "f13", "f14", "f15",
145 };
146 static const char * const mregnames[] = {
147 "m0", "m1", "m2", "m3",
148 };
149 static const char * const bregnames[] = {
150 "b0", "b1", "b2", "b3",
151 "b4", "b5", "b6", "b7",
152 "b8", "b9", "b10", "b11",
153 "b12", "b13", "b14", "b15",
154 };
155 int i;
156
157 cpu_pc = tcg_global_mem_new_i32(cpu_env,
158 offsetof(CPUXtensaState, pc), "pc");
159
160 for (i = 0; i < 16; i++) {
161 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
162 offsetof(CPUXtensaState, regs[i]),
163 regnames[i]);
164 }
165
166 for (i = 0; i < 16; i++) {
167 cpu_FR[i] = tcg_global_mem_new_i32(cpu_env,
168 offsetof(CPUXtensaState,
169 fregs[i].f32[FP_F32_LOW]),
170 fregnames[i]);
171 }
172
173 for (i = 0; i < 16; i++) {
174 cpu_FRD[i] = tcg_global_mem_new_i64(cpu_env,
175 offsetof(CPUXtensaState,
176 fregs[i].f64),
177 fregnames[i]);
178 }
179
180 for (i = 0; i < 4; i++) {
181 cpu_MR[i] = tcg_global_mem_new_i32(cpu_env,
182 offsetof(CPUXtensaState,
183 sregs[MR + i]),
184 mregnames[i]);
185 }
186
187 for (i = 0; i < 16; i++) {
188 cpu_BR[i] = tcg_global_mem_new_i32(cpu_env,
189 offsetof(CPUXtensaState,
190 sregs[BR]),
191 bregnames[i]);
192 if (i % 4 == 0) {
193 cpu_BR4[i / 4] = tcg_global_mem_new_i32(cpu_env,
194 offsetof(CPUXtensaState,
195 sregs[BR]),
196 bregnames[i]);
197 }
198 if (i % 8 == 0) {
199 cpu_BR8[i / 8] = tcg_global_mem_new_i32(cpu_env,
200 offsetof(CPUXtensaState,
201 sregs[BR]),
202 bregnames[i]);
203 }
204 }
205
206 for (i = 0; i < 256; ++i) {
207 if (sr_name[i]) {
208 cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
209 offsetof(CPUXtensaState,
210 sregs[i]),
211 sr_name[i]);
212 }
213 }
214
215 for (i = 0; i < 256; ++i) {
216 if (ur_name[i]) {
217 cpu_UR[i] = tcg_global_mem_new_i32(cpu_env,
218 offsetof(CPUXtensaState,
219 uregs[i]),
220 ur_name[i]);
221 }
222 }
223
224 cpu_windowbase_next =
225 tcg_global_mem_new_i32(cpu_env,
226 offsetof(CPUXtensaState, windowbase_next),
227 "windowbase_next");
228 cpu_exclusive_addr =
229 tcg_global_mem_new_i32(cpu_env,
230 offsetof(CPUXtensaState, exclusive_addr),
231 "exclusive_addr");
232 cpu_exclusive_val =
233 tcg_global_mem_new_i32(cpu_env,
234 offsetof(CPUXtensaState, exclusive_val),
235 "exclusive_val");
236 }
237
238 void **xtensa_get_regfile_by_name(const char *name, int entries, int bits)
239 {
240 char *geometry_name;
241 void **res;
242
243 if (xtensa_regfile_table == NULL) {
244 xtensa_regfile_table = g_hash_table_new(g_str_hash, g_str_equal);
245 /*
246 * AR is special. Xtensa translator uses it as a current register
247 * window, but configuration overlays represent it as a complete
248 * physical register file.
249 */
250 g_hash_table_insert(xtensa_regfile_table,
251 (void *)"AR 16x32", (void *)cpu_R);
252 g_hash_table_insert(xtensa_regfile_table,
253 (void *)"AR 32x32", (void *)cpu_R);
254 g_hash_table_insert(xtensa_regfile_table,
255 (void *)"AR 64x32", (void *)cpu_R);
256
257 g_hash_table_insert(xtensa_regfile_table,
258 (void *)"MR 4x32", (void *)cpu_MR);
259
260 g_hash_table_insert(xtensa_regfile_table,
261 (void *)"FR 16x32", (void *)cpu_FR);
262 g_hash_table_insert(xtensa_regfile_table,
263 (void *)"FR 16x64", (void *)cpu_FRD);
264
265 g_hash_table_insert(xtensa_regfile_table,
266 (void *)"BR 16x1", (void *)cpu_BR);
267 g_hash_table_insert(xtensa_regfile_table,
268 (void *)"BR4 4x4", (void *)cpu_BR4);
269 g_hash_table_insert(xtensa_regfile_table,
270 (void *)"BR8 2x8", (void *)cpu_BR8);
271 }
272
273 geometry_name = g_strdup_printf("%s %dx%d", name, entries, bits);
274 res = (void **)g_hash_table_lookup(xtensa_regfile_table, geometry_name);
275 g_free(geometry_name);
276 return res;
277 }
278
279 static inline bool option_enabled(DisasContext *dc, int opt)
280 {
281 return xtensa_option_enabled(dc->config, opt);
282 }
283
284 static void init_sar_tracker(DisasContext *dc)
285 {
286 dc->sar_5bit = false;
287 dc->sar_m32_5bit = false;
288 dc->sar_m32_allocated = false;
289 }
290
291 static void reset_sar_tracker(DisasContext *dc)
292 {
293 if (dc->sar_m32_allocated) {
294 tcg_temp_free(dc->sar_m32);
295 }
296 }
297
298 static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa)
299 {
300 tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
301 if (dc->sar_m32_5bit) {
302 tcg_gen_discard_i32(dc->sar_m32);
303 }
304 dc->sar_5bit = true;
305 dc->sar_m32_5bit = false;
306 }
307
308 static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa)
309 {
310 TCGv_i32 tmp = tcg_const_i32(32);
311 if (!dc->sar_m32_allocated) {
312 dc->sar_m32 = tcg_temp_local_new_i32();
313 dc->sar_m32_allocated = true;
314 }
315 tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
316 tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32);
317 dc->sar_5bit = false;
318 dc->sar_m32_5bit = true;
319 tcg_temp_free(tmp);
320 }
321
322 static void gen_exception(DisasContext *dc, int excp)
323 {
324 TCGv_i32 tmp = tcg_const_i32(excp);
325 gen_helper_exception(cpu_env, tmp);
326 tcg_temp_free(tmp);
327 }
328
329 static void gen_exception_cause(DisasContext *dc, uint32_t cause)
330 {
331 TCGv_i32 tpc = tcg_const_i32(dc->pc);
332 TCGv_i32 tcause = tcg_const_i32(cause);
333 gen_helper_exception_cause(cpu_env, tpc, tcause);
334 tcg_temp_free(tpc);
335 tcg_temp_free(tcause);
336 if (cause == ILLEGAL_INSTRUCTION_CAUSE ||
337 cause == SYSCALL_CAUSE) {
338 dc->base.is_jmp = DISAS_NORETURN;
339 }
340 }
341
342 static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause,
343 TCGv_i32 vaddr)
344 {
345 TCGv_i32 tpc = tcg_const_i32(dc->pc);
346 TCGv_i32 tcause = tcg_const_i32(cause);
347 gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr);
348 tcg_temp_free(tpc);
349 tcg_temp_free(tcause);
350 }
351
352 static void gen_debug_exception(DisasContext *dc, uint32_t cause)
353 {
354 TCGv_i32 tpc = tcg_const_i32(dc->pc);
355 TCGv_i32 tcause = tcg_const_i32(cause);
356 gen_helper_debug_exception(cpu_env, tpc, tcause);
357 tcg_temp_free(tpc);
358 tcg_temp_free(tcause);
359 if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) {
360 dc->base.is_jmp = DISAS_NORETURN;
361 }
362 }
363
364 static bool gen_check_privilege(DisasContext *dc)
365 {
366 #ifndef CONFIG_USER_ONLY
367 if (!dc->cring) {
368 return true;
369 }
370 #endif
371 gen_exception_cause(dc, PRIVILEGED_CAUSE);
372 dc->base.is_jmp = DISAS_NORETURN;
373 return false;
374 }
375
376 static bool gen_check_cpenable(DisasContext *dc, uint32_t cp_mask)
377 {
378 cp_mask &= ~dc->cpenable;
379
380 if (option_enabled(dc, XTENSA_OPTION_COPROCESSOR) && cp_mask) {
381 gen_exception_cause(dc, COPROCESSOR0_DISABLED + ctz32(cp_mask));
382 dc->base.is_jmp = DISAS_NORETURN;
383 return false;
384 }
385 return true;
386 }
387
388 static int gen_postprocess(DisasContext *dc, int slot);
389
390 static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
391 {
392 tcg_gen_mov_i32(cpu_pc, dest);
393 if (dc->icount) {
394 tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
395 }
396 if (dc->base.singlestep_enabled) {
397 gen_exception(dc, EXCP_DEBUG);
398 } else {
399 if (dc->op_flags & XTENSA_OP_POSTPROCESS) {
400 slot = gen_postprocess(dc, slot);
401 }
402 if (slot >= 0) {
403 tcg_gen_goto_tb(slot);
404 tcg_gen_exit_tb(dc->base.tb, slot);
405 } else {
406 tcg_gen_exit_tb(NULL, 0);
407 }
408 }
409 dc->base.is_jmp = DISAS_NORETURN;
410 }
411
412 static void gen_jump(DisasContext *dc, TCGv dest)
413 {
414 gen_jump_slot(dc, dest, -1);
415 }
416
417 static int adjust_jump_slot(DisasContext *dc, uint32_t dest, int slot)
418 {
419 if (((dc->base.pc_first ^ dest) & TARGET_PAGE_MASK) != 0) {
420 return -1;
421 } else {
422 return slot;
423 }
424 }
425
426 static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
427 {
428 TCGv_i32 tmp = tcg_const_i32(dest);
429 gen_jump_slot(dc, tmp, adjust_jump_slot(dc, dest, slot));
430 tcg_temp_free(tmp);
431 }
432
433 static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest,
434 int slot)
435 {
436 TCGv_i32 tcallinc = tcg_const_i32(callinc);
437
438 tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS],
439 tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN);
440 tcg_temp_free(tcallinc);
441 tcg_gen_movi_i32(cpu_R[callinc << 2],
442 (callinc << 30) | (dc->base.pc_next & 0x3fffffff));
443 gen_jump_slot(dc, dest, slot);
444 }
445
446 static bool gen_check_loop_end(DisasContext *dc, int slot)
447 {
448 if (dc->base.pc_next == dc->lend) {
449 TCGLabel *label = gen_new_label();
450
451 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label);
452 tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1);
453 if (dc->lbeg_off) {
454 gen_jumpi(dc, dc->base.pc_next - dc->lbeg_off, slot);
455 } else {
456 gen_jump(dc, cpu_SR[LBEG]);
457 }
458 gen_set_label(label);
459 gen_jumpi(dc, dc->base.pc_next, -1);
460 return true;
461 }
462 return false;
463 }
464
465 static void gen_jumpi_check_loop_end(DisasContext *dc, int slot)
466 {
467 if (!gen_check_loop_end(dc, slot)) {
468 gen_jumpi(dc, dc->base.pc_next, slot);
469 }
470 }
471
472 static void gen_brcond(DisasContext *dc, TCGCond cond,
473 TCGv_i32 t0, TCGv_i32 t1, uint32_t addr)
474 {
475 TCGLabel *label = gen_new_label();
476
477 tcg_gen_brcond_i32(cond, t0, t1, label);
478 gen_jumpi_check_loop_end(dc, 0);
479 gen_set_label(label);
480 gen_jumpi(dc, addr, 1);
481 }
482
483 static void gen_brcondi(DisasContext *dc, TCGCond cond,
484 TCGv_i32 t0, uint32_t t1, uint32_t addr)
485 {
486 TCGv_i32 tmp = tcg_const_i32(t1);
487 gen_brcond(dc, cond, t0, tmp, addr);
488 tcg_temp_free(tmp);
489 }
490
491 static uint32_t test_exceptions_sr(DisasContext *dc, const OpcodeArg arg[],
492 const uint32_t par[])
493 {
494 return xtensa_option_enabled(dc->config, par[1]) ? 0 : XTENSA_OP_ILL;
495 }
496
497 static uint32_t test_exceptions_ccompare(DisasContext *dc,
498 const OpcodeArg arg[],
499 const uint32_t par[])
500 {
501 unsigned n = par[0] - CCOMPARE;
502
503 if (n >= dc->config->nccompare) {
504 return XTENSA_OP_ILL;
505 }
506 return test_exceptions_sr(dc, arg, par);
507 }
508
509 static uint32_t test_exceptions_dbreak(DisasContext *dc, const OpcodeArg arg[],
510 const uint32_t par[])
511 {
512 unsigned n = MAX_NDBREAK;
513
514 if (par[0] >= DBREAKA && par[0] < DBREAKA + MAX_NDBREAK) {
515 n = par[0] - DBREAKA;
516 }
517 if (par[0] >= DBREAKC && par[0] < DBREAKC + MAX_NDBREAK) {
518 n = par[0] - DBREAKC;
519 }
520 if (n >= dc->config->ndbreak) {
521 return XTENSA_OP_ILL;
522 }
523 return test_exceptions_sr(dc, arg, par);
524 }
525
526 static uint32_t test_exceptions_ibreak(DisasContext *dc, const OpcodeArg arg[],
527 const uint32_t par[])
528 {
529 unsigned n = par[0] - IBREAKA;
530
531 if (n >= dc->config->nibreak) {
532 return XTENSA_OP_ILL;
533 }
534 return test_exceptions_sr(dc, arg, par);
535 }
536
537 static uint32_t test_exceptions_hpi(DisasContext *dc, const OpcodeArg arg[],
538 const uint32_t par[])
539 {
540 unsigned n = MAX_NLEVEL + 1;
541
542 if (par[0] >= EXCSAVE1 && par[0] < EXCSAVE1 + MAX_NLEVEL) {
543 n = par[0] - EXCSAVE1 + 1;
544 }
545 if (par[0] >= EPC1 && par[0] < EPC1 + MAX_NLEVEL) {
546 n = par[0] - EPC1 + 1;
547 }
548 if (par[0] >= EPS2 && par[0] < EPS2 + MAX_NLEVEL - 1) {
549 n = par[0] - EPS2 + 2;
550 }
551 if (n > dc->config->nlevel) {
552 return XTENSA_OP_ILL;
553 }
554 return test_exceptions_sr(dc, arg, par);
555 }
556
557 static void gen_load_store_alignment(DisasContext *dc, int shift,
558 TCGv_i32 addr, bool no_hw_alignment)
559 {
560 if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
561 tcg_gen_andi_i32(addr, addr, ~0 << shift);
562 } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) &&
563 no_hw_alignment) {
564 TCGLabel *label = gen_new_label();
565 TCGv_i32 tmp = tcg_temp_new_i32();
566 tcg_gen_andi_i32(tmp, addr, ~(~0 << shift));
567 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
568 gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
569 gen_set_label(label);
570 tcg_temp_free(tmp);
571 }
572 }
573
574 #ifndef CONFIG_USER_ONLY
575 static void gen_waiti(DisasContext *dc, uint32_t imm4)
576 {
577 TCGv_i32 pc = tcg_const_i32(dc->base.pc_next);
578 TCGv_i32 intlevel = tcg_const_i32(imm4);
579
580 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
581 gen_io_start();
582 }
583 gen_helper_waiti(cpu_env, pc, intlevel);
584 tcg_temp_free(pc);
585 tcg_temp_free(intlevel);
586 }
587 #endif
588
589 static bool gen_window_check(DisasContext *dc, uint32_t mask)
590 {
591 unsigned r = 31 - clz32(mask);
592
593 if (r / 4 > dc->window) {
594 TCGv_i32 pc = tcg_const_i32(dc->pc);
595 TCGv_i32 w = tcg_const_i32(r / 4);
596
597 gen_helper_window_check(cpu_env, pc, w);
598 dc->base.is_jmp = DISAS_NORETURN;
599 return false;
600 }
601 return true;
602 }
603
604 static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned)
605 {
606 TCGv_i32 m = tcg_temp_new_i32();
607
608 if (hi) {
609 (is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16);
610 } else {
611 (is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v);
612 }
613 return m;
614 }
615
616 static void gen_zero_check(DisasContext *dc, const OpcodeArg arg[])
617 {
618 TCGLabel *label = gen_new_label();
619
620 tcg_gen_brcondi_i32(TCG_COND_NE, arg[2].in, 0, label);
621 gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE);
622 gen_set_label(label);
623 }
624
625 static inline unsigned xtensa_op0_insn_len(DisasContext *dc, uint8_t op0)
626 {
627 return xtensa_isa_length_from_chars(dc->config->isa, &op0);
628 }
629
630 static int gen_postprocess(DisasContext *dc, int slot)
631 {
632 uint32_t op_flags = dc->op_flags;
633
634 #ifndef CONFIG_USER_ONLY
635 if (op_flags & XTENSA_OP_CHECK_INTERRUPTS) {
636 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
637 gen_io_start();
638 }
639 gen_helper_check_interrupts(cpu_env);
640 }
641 #endif
642 if (op_flags & XTENSA_OP_SYNC_REGISTER_WINDOW) {
643 gen_helper_sync_windowbase(cpu_env);
644 }
645 if (op_flags & XTENSA_OP_EXIT_TB_M1) {
646 slot = -1;
647 }
648 return slot;
649 }
650
651 struct opcode_arg_copy {
652 uint32_t resource;
653 void *temp;
654 OpcodeArg *arg;
655 };
656
657 struct opcode_arg_info {
658 uint32_t resource;
659 int index;
660 };
661
662 struct slot_prop {
663 XtensaOpcodeOps *ops;
664 OpcodeArg arg[MAX_OPCODE_ARGS];
665 struct opcode_arg_info in[MAX_OPCODE_ARGS];
666 struct opcode_arg_info out[MAX_OPCODE_ARGS];
667 unsigned n_in;
668 unsigned n_out;
669 uint32_t op_flags;
670 };
671
672 enum resource_type {
673 RES_REGFILE,
674 RES_STATE,
675 RES_MAX,
676 };
677
678 static uint32_t encode_resource(enum resource_type r, unsigned g, unsigned n)
679 {
680 assert(r < RES_MAX && g < 256 && n < 65536);
681 return (r << 24) | (g << 16) | n;
682 }
683
684 static enum resource_type get_resource_type(uint32_t resource)
685 {
686 return resource >> 24;
687 }
688
689 /*
690 * a depends on b if b must be executed before a,
691 * because a's side effects will destroy b's inputs.
692 */
693 static bool op_depends_on(const struct slot_prop *a,
694 const struct slot_prop *b)
695 {
696 unsigned i = 0;
697 unsigned j = 0;
698
699 if (a->op_flags & XTENSA_OP_CONTROL_FLOW) {
700 return true;
701 }
702 if ((a->op_flags & XTENSA_OP_LOAD_STORE) <
703 (b->op_flags & XTENSA_OP_LOAD_STORE)) {
704 return true;
705 }
706 while (i < a->n_out && j < b->n_in) {
707 if (a->out[i].resource < b->in[j].resource) {
708 ++i;
709 } else if (a->out[i].resource > b->in[j].resource) {
710 ++j;
711 } else {
712 return true;
713 }
714 }
715 return false;
716 }
717
718 /*
719 * Try to break a dependency on b, append temporary register copy records
720 * to the end of copy and update n_copy in case of success.
721 * This is not always possible: e.g. control flow must always be the last,
722 * load/store must be first and state dependencies are not supported yet.
723 */
724 static bool break_dependency(struct slot_prop *a,
725 struct slot_prop *b,
726 struct opcode_arg_copy *copy,
727 unsigned *n_copy)
728 {
729 unsigned i = 0;
730 unsigned j = 0;
731 unsigned n = *n_copy;
732 bool rv = false;
733
734 if (a->op_flags & XTENSA_OP_CONTROL_FLOW) {
735 return false;
736 }
737 if ((a->op_flags & XTENSA_OP_LOAD_STORE) <
738 (b->op_flags & XTENSA_OP_LOAD_STORE)) {
739 return false;
740 }
741 while (i < a->n_out && j < b->n_in) {
742 if (a->out[i].resource < b->in[j].resource) {
743 ++i;
744 } else if (a->out[i].resource > b->in[j].resource) {
745 ++j;
746 } else {
747 int index = b->in[j].index;
748
749 if (get_resource_type(a->out[i].resource) != RES_REGFILE ||
750 index < 0) {
751 return false;
752 }
753 copy[n].resource = b->in[j].resource;
754 copy[n].arg = b->arg + index;
755 ++n;
756 ++j;
757 rv = true;
758 }
759 }
760 *n_copy = n;
761 return rv;
762 }
763
764 /*
765 * Calculate evaluation order for slot opcodes.
766 * Build opcode order graph and output its nodes in topological sort order.
767 * An edge a -> b in the graph means that opcode a must be followed by
768 * opcode b.
769 */
770 static bool tsort(struct slot_prop *slot,
771 struct slot_prop *sorted[],
772 unsigned n,
773 struct opcode_arg_copy *copy,
774 unsigned *n_copy)
775 {
776 struct tsnode {
777 unsigned n_in_edge;
778 unsigned n_out_edge;
779 unsigned out_edge[MAX_INSN_SLOTS];
780 } node[MAX_INSN_SLOTS];
781
782 unsigned in[MAX_INSN_SLOTS];
783 unsigned i, j;
784 unsigned n_in = 0;
785 unsigned n_out = 0;
786 unsigned n_edge = 0;
787 unsigned in_idx = 0;
788 unsigned node_idx = 0;
789
790 for (i = 0; i < n; ++i) {
791 node[i].n_in_edge = 0;
792 node[i].n_out_edge = 0;
793 }
794
795 for (i = 0; i < n; ++i) {
796 unsigned n_out_edge = 0;
797
798 for (j = 0; j < n; ++j) {
799 if (i != j && op_depends_on(slot + j, slot + i)) {
800 node[i].out_edge[n_out_edge] = j;
801 ++node[j].n_in_edge;
802 ++n_out_edge;
803 ++n_edge;
804 }
805 }
806 node[i].n_out_edge = n_out_edge;
807 }
808
809 for (i = 0; i < n; ++i) {
810 if (!node[i].n_in_edge) {
811 in[n_in] = i;
812 ++n_in;
813 }
814 }
815
816 again:
817 for (; in_idx < n_in; ++in_idx) {
818 i = in[in_idx];
819 sorted[n_out] = slot + i;
820 ++n_out;
821 for (j = 0; j < node[i].n_out_edge; ++j) {
822 --n_edge;
823 if (--node[node[i].out_edge[j]].n_in_edge == 0) {
824 in[n_in] = node[i].out_edge[j];
825 ++n_in;
826 }
827 }
828 }
829 if (n_edge) {
830 for (; node_idx < n; ++node_idx) {
831 struct tsnode *cnode = node + node_idx;
832
833 if (cnode->n_in_edge) {
834 for (j = 0; j < cnode->n_out_edge; ++j) {
835 unsigned k = cnode->out_edge[j];
836
837 if (break_dependency(slot + k, slot + node_idx,
838 copy, n_copy) &&
839 --node[k].n_in_edge == 0) {
840 in[n_in] = k;
841 ++n_in;
842 --n_edge;
843 cnode->out_edge[j] =
844 cnode->out_edge[cnode->n_out_edge - 1];
845 --cnode->n_out_edge;
846 goto again;
847 }
848 }
849 }
850 }
851 }
852 return n_edge == 0;
853 }
854
855 static void opcode_add_resource(struct slot_prop *op,
856 uint32_t resource, char direction,
857 int index)
858 {
859 switch (direction) {
860 case 'm':
861 case 'i':
862 assert(op->n_in < ARRAY_SIZE(op->in));
863 op->in[op->n_in].resource = resource;
864 op->in[op->n_in].index = index;
865 ++op->n_in;
866 /* fall through */
867 case 'o':
868 if (direction == 'm' || direction == 'o') {
869 assert(op->n_out < ARRAY_SIZE(op->out));
870 op->out[op->n_out].resource = resource;
871 op->out[op->n_out].index = index;
872 ++op->n_out;
873 }
874 break;
875 default:
876 g_assert_not_reached();
877 }
878 }
879
880 static int resource_compare(const void *a, const void *b)
881 {
882 const struct opcode_arg_info *pa = a;
883 const struct opcode_arg_info *pb = b;
884
885 return pa->resource < pb->resource ?
886 -1 : (pa->resource > pb->resource ? 1 : 0);
887 }
888
889 static int arg_copy_compare(const void *a, const void *b)
890 {
891 const struct opcode_arg_copy *pa = a;
892 const struct opcode_arg_copy *pb = b;
893
894 return pa->resource < pb->resource ?
895 -1 : (pa->resource > pb->resource ? 1 : 0);
896 }
897
898 static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
899 {
900 xtensa_isa isa = dc->config->isa;
901 unsigned char b[MAX_INSN_LENGTH] = {translator_ldub(env, dc->pc)};
902 unsigned len = xtensa_op0_insn_len(dc, b[0]);
903 xtensa_format fmt;
904 int slot, slots;
905 unsigned i;
906 uint32_t op_flags = 0;
907 struct slot_prop slot_prop[MAX_INSN_SLOTS];
908 struct slot_prop *ordered[MAX_INSN_SLOTS];
909 struct opcode_arg_copy arg_copy[MAX_INSN_SLOTS * MAX_OPCODE_ARGS];
910 unsigned n_arg_copy = 0;
911 uint32_t debug_cause = 0;
912 uint32_t windowed_register = 0;
913 uint32_t coprocessor = 0;
914
915 if (len == XTENSA_UNDEFINED) {
916 qemu_log_mask(LOG_GUEST_ERROR,
917 "unknown instruction length (pc = %08x)\n",
918 dc->pc);
919 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
920 return;
921 }
922
923 dc->base.pc_next = dc->pc + len;
924 for (i = 1; i < len; ++i) {
925 b[i] = translator_ldub(env, dc->pc + i);
926 }
927 xtensa_insnbuf_from_chars(isa, dc->insnbuf, b, len);
928 fmt = xtensa_format_decode(isa, dc->insnbuf);
929 if (fmt == XTENSA_UNDEFINED) {
930 qemu_log_mask(LOG_GUEST_ERROR,
931 "unrecognized instruction format (pc = %08x)\n",
932 dc->pc);
933 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
934 return;
935 }
936 slots = xtensa_format_num_slots(isa, fmt);
937 for (slot = 0; slot < slots; ++slot) {
938 xtensa_opcode opc;
939 int opnd, vopnd, opnds;
940 OpcodeArg *arg = slot_prop[slot].arg;
941 XtensaOpcodeOps *ops;
942
943 xtensa_format_get_slot(isa, fmt, slot, dc->insnbuf, dc->slotbuf);
944 opc = xtensa_opcode_decode(isa, fmt, slot, dc->slotbuf);
945 if (opc == XTENSA_UNDEFINED) {
946 qemu_log_mask(LOG_GUEST_ERROR,
947 "unrecognized opcode in slot %d (pc = %08x)\n",
948 slot, dc->pc);
949 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
950 return;
951 }
952 opnds = xtensa_opcode_num_operands(isa, opc);
953
954 for (opnd = vopnd = 0; opnd < opnds; ++opnd) {
955 void **register_file = NULL;
956 xtensa_regfile rf;
957
958 if (xtensa_operand_is_register(isa, opc, opnd)) {
959 rf = xtensa_operand_regfile(isa, opc, opnd);
960 register_file = dc->config->regfile[rf];
961
962 if (rf == dc->config->a_regfile) {
963 uint32_t v;
964
965 xtensa_operand_get_field(isa, opc, opnd, fmt, slot,
966 dc->slotbuf, &v);
967 xtensa_operand_decode(isa, opc, opnd, &v);
968 windowed_register |= 1u << v;
969 }
970 }
971 if (xtensa_operand_is_visible(isa, opc, opnd)) {
972 uint32_t v;
973
974 xtensa_operand_get_field(isa, opc, opnd, fmt, slot,
975 dc->slotbuf, &v);
976 xtensa_operand_decode(isa, opc, opnd, &v);
977 arg[vopnd].raw_imm = v;
978 if (xtensa_operand_is_PCrelative(isa, opc, opnd)) {
979 xtensa_operand_undo_reloc(isa, opc, opnd, &v, dc->pc);
980 }
981 arg[vopnd].imm = v;
982 if (register_file) {
983 arg[vopnd].in = register_file[v];
984 arg[vopnd].out = register_file[v];
985 arg[vopnd].num_bits = xtensa_regfile_num_bits(isa, rf);
986 } else {
987 arg[vopnd].num_bits = 32;
988 }
989 ++vopnd;
990 }
991 }
992 ops = dc->config->opcode_ops[opc];
993 slot_prop[slot].ops = ops;
994
995 if (ops) {
996 op_flags |= ops->op_flags;
997 if (ops->test_exceptions) {
998 op_flags |= ops->test_exceptions(dc, arg, ops->par);
999 }
1000 } else {
1001 qemu_log_mask(LOG_UNIMP,
1002 "unimplemented opcode '%s' in slot %d (pc = %08x)\n",
1003 xtensa_opcode_name(isa, opc), slot, dc->pc);
1004 op_flags |= XTENSA_OP_ILL;
1005 }
1006 if (op_flags & XTENSA_OP_ILL) {
1007 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
1008 return;
1009 }
1010 if (op_flags & XTENSA_OP_DEBUG_BREAK) {
1011 debug_cause |= ops->par[0];
1012 }
1013 if (ops->test_overflow) {
1014 windowed_register |= ops->test_overflow(dc, arg, ops->par);
1015 }
1016 coprocessor |= ops->coprocessor;
1017
1018 if (slots > 1) {
1019 slot_prop[slot].n_in = 0;
1020 slot_prop[slot].n_out = 0;
1021 slot_prop[slot].op_flags = ops->op_flags & XTENSA_OP_LOAD_STORE;
1022
1023 opnds = xtensa_opcode_num_operands(isa, opc);
1024
1025 for (opnd = vopnd = 0; opnd < opnds; ++opnd) {
1026 bool visible = xtensa_operand_is_visible(isa, opc, opnd);
1027
1028 if (xtensa_operand_is_register(isa, opc, opnd)) {
1029 xtensa_regfile rf = xtensa_operand_regfile(isa, opc, opnd);
1030 uint32_t v = 0;
1031
1032 xtensa_operand_get_field(isa, opc, opnd, fmt, slot,
1033 dc->slotbuf, &v);
1034 xtensa_operand_decode(isa, opc, opnd, &v);
1035 opcode_add_resource(slot_prop + slot,
1036 encode_resource(RES_REGFILE, rf, v),
1037 xtensa_operand_inout(isa, opc, opnd),
1038 visible ? vopnd : -1);
1039 }
1040 if (visible) {
1041 ++vopnd;
1042 }
1043 }
1044
1045 opnds = xtensa_opcode_num_stateOperands(isa, opc);
1046
1047 for (opnd = 0; opnd < opnds; ++opnd) {
1048 xtensa_state state = xtensa_stateOperand_state(isa, opc, opnd);
1049
1050 opcode_add_resource(slot_prop + slot,
1051 encode_resource(RES_STATE, 0, state),
1052 xtensa_stateOperand_inout(isa, opc, opnd),
1053 -1);
1054 }
1055 if (xtensa_opcode_is_branch(isa, opc) ||
1056 xtensa_opcode_is_jump(isa, opc) ||
1057 xtensa_opcode_is_loop(isa, opc) ||
1058 xtensa_opcode_is_call(isa, opc)) {
1059 slot_prop[slot].op_flags |= XTENSA_OP_CONTROL_FLOW;
1060 }
1061
1062 qsort(slot_prop[slot].in, slot_prop[slot].n_in,
1063 sizeof(slot_prop[slot].in[0]), resource_compare);
1064 qsort(slot_prop[slot].out, slot_prop[slot].n_out,
1065 sizeof(slot_prop[slot].out[0]), resource_compare);
1066 }
1067 }
1068
1069 if (slots > 1) {
1070 if (!tsort(slot_prop, ordered, slots, arg_copy, &n_arg_copy)) {
1071 qemu_log_mask(LOG_UNIMP,
1072 "Circular resource dependencies (pc = %08x)\n",
1073 dc->pc);
1074 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
1075 return;
1076 }
1077 } else {
1078 ordered[0] = slot_prop + 0;
1079 }
1080
1081 if ((op_flags & XTENSA_OP_PRIVILEGED) &&
1082 !gen_check_privilege(dc)) {
1083 return;
1084 }
1085
1086 if (op_flags & XTENSA_OP_SYSCALL) {
1087 gen_exception_cause(dc, SYSCALL_CAUSE);
1088 return;
1089 }
1090
1091 if ((op_flags & XTENSA_OP_DEBUG_BREAK) && dc->debug) {
1092 gen_debug_exception(dc, debug_cause);
1093 return;
1094 }
1095
1096 if (windowed_register && !gen_window_check(dc, windowed_register)) {
1097 return;
1098 }
1099
1100 if (op_flags & XTENSA_OP_UNDERFLOW) {
1101 TCGv_i32 tmp = tcg_const_i32(dc->pc);
1102
1103 gen_helper_test_underflow_retw(cpu_env, tmp);
1104 tcg_temp_free(tmp);
1105 }
1106
1107 if (op_flags & XTENSA_OP_ALLOCA) {
1108 TCGv_i32 tmp = tcg_const_i32(dc->pc);
1109
1110 gen_helper_movsp(cpu_env, tmp);
1111 tcg_temp_free(tmp);
1112 }
1113
1114 if (coprocessor && !gen_check_cpenable(dc, coprocessor)) {
1115 return;
1116 }
1117
1118 if (n_arg_copy) {
1119 uint32_t resource;
1120 void *temp;
1121 unsigned j;
1122
1123 qsort(arg_copy, n_arg_copy, sizeof(*arg_copy), arg_copy_compare);
1124 for (i = j = 0; i < n_arg_copy; ++i) {
1125 if (i == 0 || arg_copy[i].resource != resource) {
1126 resource = arg_copy[i].resource;
1127 if (arg_copy[i].arg->num_bits <= 32) {
1128 temp = tcg_temp_local_new_i32();
1129 tcg_gen_mov_i32(temp, arg_copy[i].arg->in);
1130 } else if (arg_copy[i].arg->num_bits <= 64) {
1131 temp = tcg_temp_local_new_i64();
1132 tcg_gen_mov_i64(temp, arg_copy[i].arg->in);
1133 } else {
1134 g_assert_not_reached();
1135 }
1136 arg_copy[i].temp = temp;
1137
1138 if (i != j) {
1139 arg_copy[j] = arg_copy[i];
1140 }
1141 ++j;
1142 }
1143 arg_copy[i].arg->in = temp;
1144 }
1145 n_arg_copy = j;
1146 }
1147
1148 if (op_flags & XTENSA_OP_DIVIDE_BY_ZERO) {
1149 for (slot = 0; slot < slots; ++slot) {
1150 if (slot_prop[slot].ops->op_flags & XTENSA_OP_DIVIDE_BY_ZERO) {
1151 gen_zero_check(dc, slot_prop[slot].arg);
1152 }
1153 }
1154 }
1155
1156 dc->op_flags = op_flags;
1157
1158 for (slot = 0; slot < slots; ++slot) {
1159 struct slot_prop *pslot = ordered[slot];
1160 XtensaOpcodeOps *ops = pslot->ops;
1161
1162 ops->translate(dc, pslot->arg, ops->par);
1163 }
1164
1165 for (i = 0; i < n_arg_copy; ++i) {
1166 if (arg_copy[i].arg->num_bits <= 32) {
1167 tcg_temp_free_i32(arg_copy[i].temp);
1168 } else if (arg_copy[i].arg->num_bits <= 64) {
1169 tcg_temp_free_i64(arg_copy[i].temp);
1170 } else {
1171 g_assert_not_reached();
1172 }
1173 }
1174
1175 if (dc->base.is_jmp == DISAS_NEXT) {
1176 gen_postprocess(dc, 0);
1177 dc->op_flags = 0;
1178 if (op_flags & XTENSA_OP_EXIT_TB_M1) {
1179 /* Change in mmu index, memory mapping or tb->flags; exit tb */
1180 gen_jumpi_check_loop_end(dc, -1);
1181 } else if (op_flags & XTENSA_OP_EXIT_TB_0) {
1182 gen_jumpi_check_loop_end(dc, 0);
1183 } else {
1184 gen_check_loop_end(dc, 0);
1185 }
1186 }
1187 dc->pc = dc->base.pc_next;
1188 }
1189
1190 static inline unsigned xtensa_insn_len(CPUXtensaState *env, DisasContext *dc)
1191 {
1192 uint8_t b0 = cpu_ldub_code(env, dc->pc);
1193 return xtensa_op0_insn_len(dc, b0);
1194 }
1195
1196 static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc)
1197 {
1198 unsigned i;
1199
1200 for (i = 0; i < dc->config->nibreak; ++i) {
1201 if ((env->sregs[IBREAKENABLE] & (1 << i)) &&
1202 env->sregs[IBREAKA + i] == dc->pc) {
1203 gen_debug_exception(dc, DEBUGCAUSE_IB);
1204 break;
1205 }
1206 }
1207 }
1208
1209 static void xtensa_tr_init_disas_context(DisasContextBase *dcbase,
1210 CPUState *cpu)
1211 {
1212 DisasContext *dc = container_of(dcbase, DisasContext, base);
1213 CPUXtensaState *env = cpu->env_ptr;
1214 uint32_t tb_flags = dc->base.tb->flags;
1215
1216 dc->config = env->config;
1217 dc->pc = dc->base.pc_first;
1218 dc->ring = tb_flags & XTENSA_TBFLAG_RING_MASK;
1219 dc->cring = (tb_flags & XTENSA_TBFLAG_EXCM) ? 0 : dc->ring;
1220 dc->lbeg_off = (dc->base.tb->cs_base & XTENSA_CSBASE_LBEG_OFF_MASK) >>
1221 XTENSA_CSBASE_LBEG_OFF_SHIFT;
1222 dc->lend = (dc->base.tb->cs_base & XTENSA_CSBASE_LEND_MASK) +
1223 (dc->base.pc_first & TARGET_PAGE_MASK);
1224 dc->debug = tb_flags & XTENSA_TBFLAG_DEBUG;
1225 dc->icount = tb_flags & XTENSA_TBFLAG_ICOUNT;
1226 dc->cpenable = (tb_flags & XTENSA_TBFLAG_CPENABLE_MASK) >>
1227 XTENSA_TBFLAG_CPENABLE_SHIFT;
1228 dc->window = ((tb_flags & XTENSA_TBFLAG_WINDOW_MASK) >>
1229 XTENSA_TBFLAG_WINDOW_SHIFT);
1230 dc->cwoe = tb_flags & XTENSA_TBFLAG_CWOE;
1231 dc->callinc = ((tb_flags & XTENSA_TBFLAG_CALLINC_MASK) >>
1232 XTENSA_TBFLAG_CALLINC_SHIFT);
1233 init_sar_tracker(dc);
1234 }
1235
1236 static void xtensa_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
1237 {
1238 DisasContext *dc = container_of(dcbase, DisasContext, base);
1239
1240 if (dc->icount) {
1241 dc->next_icount = tcg_temp_local_new_i32();
1242 }
1243 }
1244
1245 static void xtensa_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
1246 {
1247 tcg_gen_insn_start(dcbase->pc_next);
1248 }
1249
1250 static bool xtensa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
1251 const CPUBreakpoint *bp)
1252 {
1253 DisasContext *dc = container_of(dcbase, DisasContext, base);
1254
1255 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1256 gen_exception(dc, EXCP_DEBUG);
1257 dc->base.is_jmp = DISAS_NORETURN;
1258 /* The address covered by the breakpoint must be included in
1259 [tb->pc, tb->pc + tb->size) in order to for it to be
1260 properly cleared -- thus we increment the PC here so that
1261 the logic setting tb->size below does the right thing. */
1262 dc->base.pc_next += 2;
1263 return true;
1264 }
1265
1266 static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
1267 {
1268 DisasContext *dc = container_of(dcbase, DisasContext, base);
1269 CPUXtensaState *env = cpu->env_ptr;
1270 target_ulong page_start;
1271
1272 /* These two conditions only apply to the first insn in the TB,
1273 but this is the first TranslateOps hook that allows exiting. */
1274 if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT)
1275 && (dc->base.tb->flags & XTENSA_TBFLAG_YIELD)) {
1276 gen_exception(dc, EXCP_YIELD);
1277 dc->base.is_jmp = DISAS_NORETURN;
1278 return;
1279 }
1280 if (dc->base.tb->flags & XTENSA_TBFLAG_EXCEPTION) {
1281 gen_exception(dc, EXCP_DEBUG);
1282 dc->base.is_jmp = DISAS_NORETURN;
1283 return;
1284 }
1285
1286 if (dc->icount) {
1287 TCGLabel *label = gen_new_label();
1288
1289 tcg_gen_addi_i32(dc->next_icount, cpu_SR[ICOUNT], 1);
1290 tcg_gen_brcondi_i32(TCG_COND_NE, dc->next_icount, 0, label);
1291 tcg_gen_mov_i32(dc->next_icount, cpu_SR[ICOUNT]);
1292 if (dc->debug) {
1293 gen_debug_exception(dc, DEBUGCAUSE_IC);
1294 }
1295 gen_set_label(label);
1296 }
1297
1298 if (dc->debug) {
1299 gen_ibreak_check(env, dc);
1300 }
1301
1302 disas_xtensa_insn(env, dc);
1303
1304 if (dc->icount) {
1305 tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
1306 }
1307
1308 /* End the TB if the next insn will cross into the next page. */
1309 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1310 if (dc->base.is_jmp == DISAS_NEXT &&
1311 (dc->pc - page_start >= TARGET_PAGE_SIZE ||
1312 dc->pc - page_start + xtensa_insn_len(env, dc) > TARGET_PAGE_SIZE)) {
1313 dc->base.is_jmp = DISAS_TOO_MANY;
1314 }
1315 }
1316
1317 static void xtensa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1318 {
1319 DisasContext *dc = container_of(dcbase, DisasContext, base);
1320
1321 reset_sar_tracker(dc);
1322 if (dc->icount) {
1323 tcg_temp_free(dc->next_icount);
1324 }
1325
1326 switch (dc->base.is_jmp) {
1327 case DISAS_NORETURN:
1328 break;
1329 case DISAS_TOO_MANY:
1330 if (dc->base.singlestep_enabled) {
1331 tcg_gen_movi_i32(cpu_pc, dc->pc);
1332 gen_exception(dc, EXCP_DEBUG);
1333 } else {
1334 gen_jumpi(dc, dc->pc, 0);
1335 }
1336 break;
1337 default:
1338 g_assert_not_reached();
1339 }
1340 }
1341
1342 static void xtensa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
1343 {
1344 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
1345 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
1346 }
1347
1348 static const TranslatorOps xtensa_translator_ops = {
1349 .init_disas_context = xtensa_tr_init_disas_context,
1350 .tb_start = xtensa_tr_tb_start,
1351 .insn_start = xtensa_tr_insn_start,
1352 .breakpoint_check = xtensa_tr_breakpoint_check,
1353 .translate_insn = xtensa_tr_translate_insn,
1354 .tb_stop = xtensa_tr_tb_stop,
1355 .disas_log = xtensa_tr_disas_log,
1356 };
1357
1358 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1359 {
1360 DisasContext dc = {};
1361 translator_loop(&xtensa_translator_ops, &dc.base, cpu, tb, max_insns);
1362 }
1363
1364 void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1365 {
1366 XtensaCPU *cpu = XTENSA_CPU(cs);
1367 CPUXtensaState *env = &cpu->env;
1368 xtensa_isa isa = env->config->isa;
1369 int i, j;
1370
1371 qemu_fprintf(f, "PC=%08x\n\n", env->pc);
1372
1373 for (i = j = 0; i < xtensa_isa_num_sysregs(isa); ++i) {
1374 const uint32_t *reg =
1375 xtensa_sysreg_is_user(isa, i) ? env->uregs : env->sregs;
1376 int regno = xtensa_sysreg_number(isa, i);
1377
1378 if (regno >= 0) {
1379 qemu_fprintf(f, "%12s=%08x%c",
1380 xtensa_sysreg_name(isa, i),
1381 reg[regno],
1382 (j++ % 4) == 3 ? '\n' : ' ');
1383 }
1384 }
1385
1386 qemu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
1387
1388 for (i = 0; i < 16; ++i) {
1389 qemu_fprintf(f, " A%02d=%08x%c",
1390 i, env->regs[i], (i % 4) == 3 ? '\n' : ' ');
1391 }
1392
1393 xtensa_sync_phys_from_window(env);
1394 qemu_fprintf(f, "\n");
1395
1396 for (i = 0; i < env->config->nareg; ++i) {
1397 qemu_fprintf(f, "AR%02d=%08x ", i, env->phys_regs[i]);
1398 if (i % 4 == 3) {
1399 bool ws = (env->sregs[WINDOW_START] & (1 << (i / 4))) != 0;
1400 bool cw = env->sregs[WINDOW_BASE] == i / 4;
1401
1402 qemu_fprintf(f, "%c%c\n", ws ? '<' : ' ', cw ? '=' : ' ');
1403 }
1404 }
1405
1406 if ((flags & CPU_DUMP_FPU) &&
1407 xtensa_option_enabled(env->config, XTENSA_OPTION_FP_COPROCESSOR)) {
1408 qemu_fprintf(f, "\n");
1409
1410 for (i = 0; i < 16; ++i) {
1411 qemu_fprintf(f, "F%02d=%08x (%-+15.8e)%c", i,
1412 float32_val(env->fregs[i].f32[FP_F32_LOW]),
1413 *(float *)(env->fregs[i].f32 + FP_F32_LOW),
1414 (i % 2) == 1 ? '\n' : ' ');
1415 }
1416 }
1417
1418 if ((flags & CPU_DUMP_FPU) &&
1419 xtensa_option_enabled(env->config, XTENSA_OPTION_DFP_COPROCESSOR) &&
1420 !xtensa_option_enabled(env->config, XTENSA_OPTION_DFPU_SINGLE_ONLY)) {
1421 qemu_fprintf(f, "\n");
1422
1423 for (i = 0; i < 16; ++i) {
1424 qemu_fprintf(f, "F%02d=%016"PRIx64" (%-+24.16le)%c", i,
1425 float64_val(env->fregs[i].f64),
1426 *(double *)(&env->fregs[i].f64),
1427 (i % 2) == 1 ? '\n' : ' ');
1428 }
1429 }
1430 }
1431
1432 void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb,
1433 target_ulong *data)
1434 {
1435 env->pc = data[0];
1436 }
1437
1438 static void translate_abs(DisasContext *dc, const OpcodeArg arg[],
1439 const uint32_t par[])
1440 {
1441 tcg_gen_abs_i32(arg[0].out, arg[1].in);
1442 }
1443
1444 static void translate_add(DisasContext *dc, const OpcodeArg arg[],
1445 const uint32_t par[])
1446 {
1447 tcg_gen_add_i32(arg[0].out, arg[1].in, arg[2].in);
1448 }
1449
1450 static void translate_addi(DisasContext *dc, const OpcodeArg arg[],
1451 const uint32_t par[])
1452 {
1453 tcg_gen_addi_i32(arg[0].out, arg[1].in, arg[2].imm);
1454 }
1455
1456 static void translate_addx(DisasContext *dc, const OpcodeArg arg[],
1457 const uint32_t par[])
1458 {
1459 TCGv_i32 tmp = tcg_temp_new_i32();
1460 tcg_gen_shli_i32(tmp, arg[1].in, par[0]);
1461 tcg_gen_add_i32(arg[0].out, tmp, arg[2].in);
1462 tcg_temp_free(tmp);
1463 }
1464
1465 static void translate_all(DisasContext *dc, const OpcodeArg arg[],
1466 const uint32_t par[])
1467 {
1468 uint32_t shift = par[1];
1469 TCGv_i32 mask = tcg_const_i32(((1 << shift) - 1) << arg[1].imm);
1470 TCGv_i32 tmp = tcg_temp_new_i32();
1471
1472 tcg_gen_and_i32(tmp, arg[1].in, mask);
1473 if (par[0]) {
1474 tcg_gen_addi_i32(tmp, tmp, 1 << arg[1].imm);
1475 } else {
1476 tcg_gen_add_i32(tmp, tmp, mask);
1477 }
1478 tcg_gen_shri_i32(tmp, tmp, arg[1].imm + shift);
1479 tcg_gen_deposit_i32(arg[0].out, arg[0].out,
1480 tmp, arg[0].imm, 1);
1481 tcg_temp_free(mask);
1482 tcg_temp_free(tmp);
1483 }
1484
1485 static void translate_and(DisasContext *dc, const OpcodeArg arg[],
1486 const uint32_t par[])
1487 {
1488 tcg_gen_and_i32(arg[0].out, arg[1].in, arg[2].in);
1489 }
1490
1491 static void translate_ball(DisasContext *dc, const OpcodeArg arg[],
1492 const uint32_t par[])
1493 {
1494 TCGv_i32 tmp = tcg_temp_new_i32();
1495 tcg_gen_and_i32(tmp, arg[0].in, arg[1].in);
1496 gen_brcond(dc, par[0], tmp, arg[1].in, arg[2].imm);
1497 tcg_temp_free(tmp);
1498 }
1499
1500 static void translate_bany(DisasContext *dc, const OpcodeArg arg[],
1501 const uint32_t par[])
1502 {
1503 TCGv_i32 tmp = tcg_temp_new_i32();
1504 tcg_gen_and_i32(tmp, arg[0].in, arg[1].in);
1505 gen_brcondi(dc, par[0], tmp, 0, arg[2].imm);
1506 tcg_temp_free(tmp);
1507 }
1508
1509 static void translate_b(DisasContext *dc, const OpcodeArg arg[],
1510 const uint32_t par[])
1511 {
1512 gen_brcond(dc, par[0], arg[0].in, arg[1].in, arg[2].imm);
1513 }
1514
1515 static void translate_bb(DisasContext *dc, const OpcodeArg arg[],
1516 const uint32_t par[])
1517 {
1518 #ifdef TARGET_WORDS_BIGENDIAN
1519 TCGv_i32 bit = tcg_const_i32(0x80000000u);
1520 #else
1521 TCGv_i32 bit = tcg_const_i32(0x00000001u);
1522 #endif
1523 TCGv_i32 tmp = tcg_temp_new_i32();
1524 tcg_gen_andi_i32(tmp, arg[1].in, 0x1f);
1525 #ifdef TARGET_WORDS_BIGENDIAN
1526 tcg_gen_shr_i32(bit, bit, tmp);
1527 #else
1528 tcg_gen_shl_i32(bit, bit, tmp);
1529 #endif
1530 tcg_gen_and_i32(tmp, arg[0].in, bit);
1531 gen_brcondi(dc, par[0], tmp, 0, arg[2].imm);
1532 tcg_temp_free(tmp);
1533 tcg_temp_free(bit);
1534 }
1535
1536 static void translate_bbi(DisasContext *dc, const OpcodeArg arg[],
1537 const uint32_t par[])
1538 {
1539 TCGv_i32 tmp = tcg_temp_new_i32();
1540 #ifdef TARGET_WORDS_BIGENDIAN
1541 tcg_gen_andi_i32(tmp, arg[0].in, 0x80000000u >> arg[1].imm);
1542 #else
1543 tcg_gen_andi_i32(tmp, arg[0].in, 0x00000001u << arg[1].imm);
1544 #endif
1545 gen_brcondi(dc, par[0], tmp, 0, arg[2].imm);
1546 tcg_temp_free(tmp);
1547 }
1548
1549 static void translate_bi(DisasContext *dc, const OpcodeArg arg[],
1550 const uint32_t par[])
1551 {
1552 gen_brcondi(dc, par[0], arg[0].in, arg[1].imm, arg[2].imm);
1553 }
1554
1555 static void translate_bz(DisasContext *dc, const OpcodeArg arg[],
1556 const uint32_t par[])
1557 {
1558 gen_brcondi(dc, par[0], arg[0].in, 0, arg[1].imm);
1559 }
1560
1561 enum {
1562 BOOLEAN_AND,
1563 BOOLEAN_ANDC,
1564 BOOLEAN_OR,
1565 BOOLEAN_ORC,
1566 BOOLEAN_XOR,
1567 };
1568
1569 static void translate_boolean(DisasContext *dc, const OpcodeArg arg[],
1570 const uint32_t par[])
1571 {
1572 static void (* const op[])(TCGv_i32, TCGv_i32, TCGv_i32) = {
1573 [BOOLEAN_AND] = tcg_gen_and_i32,
1574 [BOOLEAN_ANDC] = tcg_gen_andc_i32,
1575 [BOOLEAN_OR] = tcg_gen_or_i32,
1576 [BOOLEAN_ORC] = tcg_gen_orc_i32,
1577 [BOOLEAN_XOR] = tcg_gen_xor_i32,
1578 };
1579
1580 TCGv_i32 tmp1 = tcg_temp_new_i32();
1581 TCGv_i32 tmp2 = tcg_temp_new_i32();
1582
1583 tcg_gen_shri_i32(tmp1, arg[1].in, arg[1].imm);
1584 tcg_gen_shri_i32(tmp2, arg[2].in, arg[2].imm);
1585 op[par[0]](tmp1, tmp1, tmp2);
1586 tcg_gen_deposit_i32(arg[0].out, arg[0].out, tmp1, arg[0].imm, 1);
1587 tcg_temp_free(tmp1);
1588 tcg_temp_free(tmp2);
1589 }
1590
1591 static void translate_bp(DisasContext *dc, const OpcodeArg arg[],
1592 const uint32_t par[])
1593 {
1594 TCGv_i32 tmp = tcg_temp_new_i32();
1595
1596 tcg_gen_andi_i32(tmp, arg[0].in, 1 << arg[0].imm);
1597 gen_brcondi(dc, par[0], tmp, 0, arg[1].imm);
1598 tcg_temp_free(tmp);
1599 }
1600
1601 static void translate_call0(DisasContext *dc, const OpcodeArg arg[],
1602 const uint32_t par[])
1603 {
1604 tcg_gen_movi_i32(cpu_R[0], dc->base.pc_next);
1605 gen_jumpi(dc, arg[0].imm, 0);
1606 }
1607
1608 static void translate_callw(DisasContext *dc, const OpcodeArg arg[],
1609 const uint32_t par[])
1610 {
1611 TCGv_i32 tmp = tcg_const_i32(arg[0].imm);
1612 gen_callw_slot(dc, par[0], tmp, adjust_jump_slot(dc, arg[0].imm, 0));
1613 tcg_temp_free(tmp);
1614 }
1615
1616 static void translate_callx0(DisasContext *dc, const OpcodeArg arg[],
1617 const uint32_t par[])
1618 {
1619 TCGv_i32 tmp = tcg_temp_new_i32();
1620 tcg_gen_mov_i32(tmp, arg[0].in);
1621 tcg_gen_movi_i32(cpu_R[0], dc->base.pc_next);
1622 gen_jump(dc, tmp);
1623 tcg_temp_free(tmp);
1624 }
1625
1626 static void translate_callxw(DisasContext *dc, const OpcodeArg arg[],
1627 const uint32_t par[])
1628 {
1629 TCGv_i32 tmp = tcg_temp_new_i32();
1630
1631 tcg_gen_mov_i32(tmp, arg[0].in);
1632 gen_callw_slot(dc, par[0], tmp, -1);
1633 tcg_temp_free(tmp);
1634 }
1635
1636 static void translate_clamps(DisasContext *dc, const OpcodeArg arg[],
1637 const uint32_t par[])
1638 {
1639 TCGv_i32 tmp1 = tcg_const_i32(-1u << arg[2].imm);
1640 TCGv_i32 tmp2 = tcg_const_i32((1 << arg[2].imm) - 1);
1641
1642 tcg_gen_smax_i32(tmp1, tmp1, arg[1].in);
1643 tcg_gen_smin_i32(arg[0].out, tmp1, tmp2);
1644 tcg_temp_free(tmp1);
1645 tcg_temp_free(tmp2);
1646 }
1647
1648 static void translate_clrb_expstate(DisasContext *dc, const OpcodeArg arg[],
1649 const uint32_t par[])
1650 {
1651 /* TODO: GPIO32 may be a part of coprocessor */
1652 tcg_gen_andi_i32(cpu_UR[EXPSTATE], cpu_UR[EXPSTATE], ~(1u << arg[0].imm));
1653 }
1654
1655 static void translate_clrex(DisasContext *dc, const OpcodeArg arg[],
1656 const uint32_t par[])
1657 {
1658 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
1659 }
1660
1661 static void translate_const16(DisasContext *dc, const OpcodeArg arg[],
1662 const uint32_t par[])
1663 {
1664 TCGv_i32 c = tcg_const_i32(arg[1].imm);
1665
1666 tcg_gen_deposit_i32(arg[0].out, c, arg[0].in, 16, 16);
1667 tcg_temp_free(c);
1668 }
1669
1670 static void translate_dcache(DisasContext *dc, const OpcodeArg arg[],
1671 const uint32_t par[])
1672 {
1673 TCGv_i32 addr = tcg_temp_new_i32();
1674 TCGv_i32 res = tcg_temp_new_i32();
1675
1676 tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm);
1677 tcg_gen_qemu_ld8u(res, addr, dc->cring);
1678 tcg_temp_free(addr);
1679 tcg_temp_free(res);
1680 }
1681
1682 static void translate_depbits(DisasContext *dc, const OpcodeArg arg[],
1683 const uint32_t par[])
1684 {
1685 tcg_gen_deposit_i32(arg[1].out, arg[1].in, arg[0].in,
1686 arg[2].imm, arg[3].imm);
1687 }
1688
1689 static void translate_diwbuip(DisasContext *dc, const OpcodeArg arg[],
1690 const uint32_t par[])
1691 {
1692 tcg_gen_addi_i32(arg[0].out, arg[0].in, dc->config->dcache_line_bytes);
1693 }
1694
1695 static uint32_t test_exceptions_entry(DisasContext *dc, const OpcodeArg arg[],
1696 const uint32_t par[])
1697 {
1698 if (arg[0].imm > 3 || !dc->cwoe) {
1699 qemu_log_mask(LOG_GUEST_ERROR,
1700 "Illegal entry instruction(pc = %08x)\n", dc->pc);
1701 return XTENSA_OP_ILL;
1702 } else {
1703 return 0;
1704 }
1705 }
1706
1707 static uint32_t test_overflow_entry(DisasContext *dc, const OpcodeArg arg[],
1708 const uint32_t par[])
1709 {
1710 return 1 << (dc->callinc * 4);
1711 }
1712
1713 static void translate_entry(DisasContext *dc, const OpcodeArg arg[],
1714 const uint32_t par[])
1715 {
1716 TCGv_i32 pc = tcg_const_i32(dc->pc);
1717 TCGv_i32 s = tcg_const_i32(arg[0].imm);
1718 TCGv_i32 imm = tcg_const_i32(arg[1].imm);
1719 gen_helper_entry(cpu_env, pc, s, imm);
1720 tcg_temp_free(imm);
1721 tcg_temp_free(s);
1722 tcg_temp_free(pc);
1723 }
1724
1725 static void translate_extui(DisasContext *dc, const OpcodeArg arg[],
1726 const uint32_t par[])
1727 {
1728 int maskimm = (1 << arg[3].imm) - 1;
1729
1730 TCGv_i32 tmp = tcg_temp_new_i32();
1731 tcg_gen_shri_i32(tmp, arg[1].in, arg[2].imm);
1732 tcg_gen_andi_i32(arg[0].out, tmp, maskimm);
1733 tcg_temp_free(tmp);
1734 }
1735
1736 static void translate_getex(DisasContext *dc, const OpcodeArg arg[],
1737 const uint32_t par[])
1738 {
1739 TCGv_i32 tmp = tcg_temp_new_i32();
1740
1741 tcg_gen_extract_i32(tmp, cpu_SR[ATOMCTL], 8, 1);
1742 tcg_gen_deposit_i32(cpu_SR[ATOMCTL], cpu_SR[ATOMCTL], arg[0].in, 8, 1);
1743 tcg_gen_mov_i32(arg[0].out, tmp);
1744 tcg_temp_free(tmp);
1745 }
1746
1747 static void translate_icache(DisasContext *dc, const OpcodeArg arg[],
1748 const uint32_t par[])
1749 {
1750 #ifndef CONFIG_USER_ONLY
1751 TCGv_i32 addr = tcg_temp_new_i32();
1752
1753 tcg_gen_movi_i32(cpu_pc, dc->pc);
1754 tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm);
1755 gen_helper_itlb_hit_test(cpu_env, addr);
1756 tcg_temp_free(addr);
1757 #endif
1758 }
1759
1760 static void translate_itlb(DisasContext *dc, const OpcodeArg arg[],
1761 const uint32_t par[])
1762 {
1763 #ifndef CONFIG_USER_ONLY
1764 TCGv_i32 dtlb = tcg_const_i32(par[0]);
1765
1766 gen_helper_itlb(cpu_env, arg[0].in, dtlb);
1767 tcg_temp_free(dtlb);
1768 #endif
1769 }
1770
1771 static void translate_j(DisasContext *dc, const OpcodeArg arg[],
1772 const uint32_t par[])
1773 {
1774 gen_jumpi(dc, arg[0].imm, 0);
1775 }
1776
1777 static void translate_jx(DisasContext *dc, const OpcodeArg arg[],
1778 const uint32_t par[])
1779 {
1780 gen_jump(dc, arg[0].in);
1781 }
1782
1783 static void translate_l32e(DisasContext *dc, const OpcodeArg arg[],
1784 const uint32_t par[])
1785 {
1786 TCGv_i32 addr = tcg_temp_new_i32();
1787
1788 tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
1789 gen_load_store_alignment(dc, 2, addr, false);
1790 tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, MO_TEUL);
1791 tcg_temp_free(addr);
1792 }
1793
1794 #ifdef CONFIG_USER_ONLY
1795 static void gen_check_exclusive(DisasContext *dc, TCGv_i32 addr, bool is_write)
1796 {
1797 }
1798 #else
1799 static void gen_check_exclusive(DisasContext *dc, TCGv_i32 addr, bool is_write)
1800 {
1801 if (!option_enabled(dc, XTENSA_OPTION_MPU)) {
1802 TCGv_i32 tpc = tcg_const_i32(dc->pc);
1803 TCGv_i32 write = tcg_const_i32(is_write);
1804
1805 gen_helper_check_exclusive(cpu_env, tpc, addr, write);
1806 tcg_temp_free(tpc);
1807 tcg_temp_free(write);
1808 }
1809 }
1810 #endif
1811
1812 static void translate_l32ex(DisasContext *dc, const OpcodeArg arg[],
1813 const uint32_t par[])
1814 {
1815 TCGv_i32 addr = tcg_temp_new_i32();
1816
1817 tcg_gen_mov_i32(addr, arg[1].in);
1818 gen_load_store_alignment(dc, 2, addr, true);
1819 gen_check_exclusive(dc, addr, false);
1820 tcg_gen_qemu_ld_i32(arg[0].out, addr, dc->ring, MO_TEUL);
1821 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
1822 tcg_gen_mov_i32(cpu_exclusive_val, arg[0].out);
1823 tcg_temp_free(addr);
1824 }
1825
1826 static void translate_ldst(DisasContext *dc, const OpcodeArg arg[],
1827 const uint32_t par[])
1828 {
1829 TCGv_i32 addr = tcg_temp_new_i32();
1830
1831 tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
1832 if (par[0] & MO_SIZE) {
1833 gen_load_store_alignment(dc, par[0] & MO_SIZE, addr, par[1]);
1834 }
1835 if (par[2]) {
1836 if (par[1]) {
1837 tcg_gen_mb(TCG_BAR_STRL | TCG_MO_ALL);
1838 }
1839 tcg_gen_qemu_st_tl(arg[0].in, addr, dc->cring, par[0]);
1840 } else {
1841 tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->cring, par[0]);
1842 if (par[1]) {
1843 tcg_gen_mb(TCG_BAR_LDAQ | TCG_MO_ALL);
1844 }
1845 }
1846 tcg_temp_free(addr);
1847 }
1848
1849 static void translate_l32r(DisasContext *dc, const OpcodeArg arg[],
1850 const uint32_t par[])
1851 {
1852 TCGv_i32 tmp;
1853
1854 if (dc->base.tb->flags & XTENSA_TBFLAG_LITBASE) {
1855 tmp = tcg_const_i32(arg[1].raw_imm - 1);
1856 tcg_gen_add_i32(tmp, cpu_SR[LITBASE], tmp);
1857 } else {
1858 tmp = tcg_const_i32(arg[1].imm);
1859 }
1860 tcg_gen_qemu_ld32u(arg[0].out, tmp, dc->cring);
1861 tcg_temp_free(tmp);
1862 }
1863
1864 static void translate_loop(DisasContext *dc, const OpcodeArg arg[],
1865 const uint32_t par[])
1866 {
1867 uint32_t lend = arg[1].imm;
1868
1869 tcg_gen_subi_i32(cpu_SR[LCOUNT], arg[0].in, 1);
1870 tcg_gen_movi_i32(cpu_SR[LBEG], dc->base.pc_next);
1871 tcg_gen_movi_i32(cpu_SR[LEND], lend);
1872
1873 if (par[0] != TCG_COND_NEVER) {
1874 TCGLabel *label = gen_new_label();
1875 tcg_gen_brcondi_i32(par[0], arg[0].in, 0, label);
1876 gen_jumpi(dc, lend, 1);
1877 gen_set_label(label);
1878 }
1879
1880 gen_jumpi(dc, dc->base.pc_next, 0);
1881 }
1882
1883 enum {
1884 MAC16_UMUL,
1885 MAC16_MUL,
1886 MAC16_MULA,
1887 MAC16_MULS,
1888 MAC16_NONE,
1889 };
1890
1891 enum {
1892 MAC16_LL,
1893 MAC16_HL,
1894 MAC16_LH,
1895 MAC16_HH,
1896
1897 MAC16_HX = 0x1,
1898 MAC16_XH = 0x2,
1899 };
1900
1901 static void translate_mac16(DisasContext *dc, const OpcodeArg arg[],
1902 const uint32_t par[])
1903 {
1904 int op = par[0];
1905 unsigned half = par[1];
1906 uint32_t ld_offset = par[2];
1907 unsigned off = ld_offset ? 2 : 0;
1908 TCGv_i32 vaddr = tcg_temp_new_i32();
1909 TCGv_i32 mem32 = tcg_temp_new_i32();
1910
1911 if (ld_offset) {
1912 tcg_gen_addi_i32(vaddr, arg[1].in, ld_offset);
1913 gen_load_store_alignment(dc, 2, vaddr, false);
1914 tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring);
1915 }
1916 if (op != MAC16_NONE) {
1917 TCGv_i32 m1 = gen_mac16_m(arg[off].in,
1918 half & MAC16_HX, op == MAC16_UMUL);
1919 TCGv_i32 m2 = gen_mac16_m(arg[off + 1].in,
1920 half & MAC16_XH, op == MAC16_UMUL);
1921
1922 if (op == MAC16_MUL || op == MAC16_UMUL) {
1923 tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2);
1924 if (op == MAC16_UMUL) {
1925 tcg_gen_movi_i32(cpu_SR[ACCHI], 0);
1926 } else {
1927 tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31);
1928 }
1929 } else {
1930 TCGv_i32 lo = tcg_temp_new_i32();
1931 TCGv_i32 hi = tcg_temp_new_i32();
1932
1933 tcg_gen_mul_i32(lo, m1, m2);
1934 tcg_gen_sari_i32(hi, lo, 31);
1935 if (op == MAC16_MULA) {
1936 tcg_gen_add2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI],
1937 cpu_SR[ACCLO], cpu_SR[ACCHI],
1938 lo, hi);
1939 } else {
1940 tcg_gen_sub2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI],
1941 cpu_SR[ACCLO], cpu_SR[ACCHI],
1942 lo, hi);
1943 }
1944 tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]);
1945
1946 tcg_temp_free_i32(lo);
1947 tcg_temp_free_i32(hi);
1948 }
1949 tcg_temp_free(m1);
1950 tcg_temp_free(m2);
1951 }
1952 if (ld_offset) {
1953 tcg_gen_mov_i32(arg[1].out, vaddr);
1954 tcg_gen_mov_i32(cpu_SR[MR + arg[0].imm], mem32);
1955 }
1956 tcg_temp_free(vaddr);
1957 tcg_temp_free(mem32);
1958 }
1959
1960 static void translate_memw(DisasContext *dc, const OpcodeArg arg[],
1961 const uint32_t par[])
1962 {
1963 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1964 }
1965
1966 static void translate_smin(DisasContext *dc, const OpcodeArg arg[],
1967 const uint32_t par[])
1968 {
1969 tcg_gen_smin_i32(arg[0].out, arg[1].in, arg[2].in);
1970 }
1971
1972 static void translate_umin(DisasContext *dc, const OpcodeArg arg[],
1973 const uint32_t par[])
1974 {
1975 tcg_gen_umin_i32(arg[0].out, arg[1].in, arg[2].in);
1976 }
1977
1978 static void translate_smax(DisasContext *dc, const OpcodeArg arg[],
1979 const uint32_t par[])
1980 {
1981 tcg_gen_smax_i32(arg[0].out, arg[1].in, arg[2].in);
1982 }
1983
1984 static void translate_umax(DisasContext *dc, const OpcodeArg arg[],
1985 const uint32_t par[])
1986 {
1987 tcg_gen_umax_i32(arg[0].out, arg[1].in, arg[2].in);
1988 }
1989
1990 static void translate_mov(DisasContext *dc, const OpcodeArg arg[],
1991 const uint32_t par[])
1992 {
1993 tcg_gen_mov_i32(arg[0].out, arg[1].in);
1994 }
1995
1996 static void translate_movcond(DisasContext *dc, const OpcodeArg arg[],
1997 const uint32_t par[])
1998 {
1999 TCGv_i32 zero = tcg_const_i32(0);
2000
2001 tcg_gen_movcond_i32(par[0], arg[0].out,
2002 arg[2].in, zero, arg[1].in, arg[0].in);
2003 tcg_temp_free(zero);
2004 }
2005
2006 static void translate_movi(DisasContext *dc, const OpcodeArg arg[],
2007 const uint32_t par[])
2008 {
2009 tcg_gen_movi_i32(arg[0].out, arg[1].imm);
2010 }
2011
2012 static void translate_movp(DisasContext *dc, const OpcodeArg arg[],
2013 const uint32_t par[])
2014 {
2015 TCGv_i32 zero = tcg_const_i32(0);
2016 TCGv_i32 tmp = tcg_temp_new_i32();
2017
2018 tcg_gen_andi_i32(tmp, arg[2].in, 1 << arg[2].imm);
2019 tcg_gen_movcond_i32(par[0],
2020 arg[0].out, tmp, zero,
2021 arg[1].in, arg[0].in);
2022 tcg_temp_free(tmp);
2023 tcg_temp_free(zero);
2024 }
2025
2026 static void translate_movsp(DisasContext *dc, const OpcodeArg arg[],
2027 const uint32_t par[])
2028 {
2029 tcg_gen_mov_i32(arg[0].out, arg[1].in);
2030 }
2031
2032 static void translate_mul16(DisasContext *dc, const OpcodeArg arg[],
2033 const uint32_t par[])
2034 {
2035 TCGv_i32 v1 = tcg_temp_new_i32();
2036 TCGv_i32 v2 = tcg_temp_new_i32();
2037
2038 if (par[0]) {
2039 tcg_gen_ext16s_i32(v1, arg[1].in);
2040 tcg_gen_ext16s_i32(v2, arg[2].in);
2041 } else {
2042 tcg_gen_ext16u_i32(v1, arg[1].in);
2043 tcg_gen_ext16u_i32(v2, arg[2].in);
2044 }
2045 tcg_gen_mul_i32(arg[0].out, v1, v2);
2046 tcg_temp_free(v2);
2047 tcg_temp_free(v1);
2048 }
2049
2050 static void translate_mull(DisasContext *dc, const OpcodeArg arg[],
2051 const uint32_t par[])
2052 {
2053 tcg_gen_mul_i32(arg[0].out, arg[1].in, arg[2].in);
2054 }
2055
2056 static void translate_mulh(DisasContext *dc, const OpcodeArg arg[],
2057 const uint32_t par[])
2058 {
2059 TCGv_i32 lo = tcg_temp_new();
2060
2061 if (par[0]) {
2062 tcg_gen_muls2_i32(lo, arg[0].out, arg[1].in, arg[2].in);
2063 } else {
2064 tcg_gen_mulu2_i32(lo, arg[0].out, arg[1].in, arg[2].in);
2065 }
2066 tcg_temp_free(lo);
2067 }
2068
2069 static void translate_neg(DisasContext *dc, const OpcodeArg arg[],
2070 const uint32_t par[])
2071 {
2072 tcg_gen_neg_i32(arg[0].out, arg[1].in);
2073 }
2074
2075 static void translate_nop(DisasContext *dc, const OpcodeArg arg[],
2076 const uint32_t par[])
2077 {
2078 }
2079
2080 static void translate_nsa(DisasContext *dc, const OpcodeArg arg[],
2081 const uint32_t par[])
2082 {
2083 tcg_gen_clrsb_i32(arg[0].out, arg[1].in);
2084 }
2085
2086 static void translate_nsau(DisasContext *dc, const OpcodeArg arg[],
2087 const uint32_t par[])
2088 {
2089 tcg_gen_clzi_i32(arg[0].out, arg[1].in, 32);
2090 }
2091
2092 static void translate_or(DisasContext *dc, const OpcodeArg arg[],
2093 const uint32_t par[])
2094 {
2095 tcg_gen_or_i32(arg[0].out, arg[1].in, arg[2].in);
2096 }
2097
2098 static void translate_ptlb(DisasContext *dc, const OpcodeArg arg[],
2099 const uint32_t par[])
2100 {
2101 #ifndef CONFIG_USER_ONLY
2102 TCGv_i32 dtlb = tcg_const_i32(par[0]);
2103
2104 tcg_gen_movi_i32(cpu_pc, dc->pc);
2105 gen_helper_ptlb(arg[0].out, cpu_env, arg[1].in, dtlb);
2106 tcg_temp_free(dtlb);
2107 #endif
2108 }
2109
2110 static void translate_pptlb(DisasContext *dc, const OpcodeArg arg[],
2111 const uint32_t par[])
2112 {
2113 #ifndef CONFIG_USER_ONLY
2114 tcg_gen_movi_i32(cpu_pc, dc->pc);
2115 gen_helper_pptlb(arg[0].out, cpu_env, arg[1].in);
2116 #endif
2117 }
2118
2119 static void translate_quos(DisasContext *dc, const OpcodeArg arg[],
2120 const uint32_t par[])
2121 {
2122 TCGLabel *label1 = gen_new_label();
2123 TCGLabel *label2 = gen_new_label();
2124
2125 tcg_gen_brcondi_i32(TCG_COND_NE, arg[1].in, 0x80000000,
2126 label1);
2127 tcg_gen_brcondi_i32(TCG_COND_NE, arg[2].in, 0xffffffff,
2128 label1);
2129 tcg_gen_movi_i32(arg[0].out,
2130 par[0] ? 0x80000000 : 0);
2131 tcg_gen_br(label2);
2132 gen_set_label(label1);
2133 if (par[0]) {
2134 tcg_gen_div_i32(arg[0].out,
2135 arg[1].in, arg[2].in);
2136 } else {
2137 tcg_gen_rem_i32(arg[0].out,
2138 arg[1].in, arg[2].in);
2139 }
2140 gen_set_label(label2);
2141 }
2142
2143 static void translate_quou(DisasContext *dc, const OpcodeArg arg[],
2144 const uint32_t par[])
2145 {
2146 tcg_gen_divu_i32(arg[0].out,
2147 arg[1].in, arg[2].in);
2148 }
2149
2150 static void translate_read_impwire(DisasContext *dc, const OpcodeArg arg[],
2151 const uint32_t par[])
2152 {
2153 /* TODO: GPIO32 may be a part of coprocessor */
2154 tcg_gen_movi_i32(arg[0].out, 0);
2155 }
2156
2157 static void translate_remu(DisasContext *dc, const OpcodeArg arg[],
2158 const uint32_t par[])
2159 {
2160 tcg_gen_remu_i32(arg[0].out,
2161 arg[1].in, arg[2].in);
2162 }
2163
2164 static void translate_rer(DisasContext *dc, const OpcodeArg arg[],
2165 const uint32_t par[])
2166 {
2167 gen_helper_rer(arg[0].out, cpu_env, arg[1].in);
2168 }
2169
2170 static void translate_ret(DisasContext *dc, const OpcodeArg arg[],
2171 const uint32_t par[])
2172 {
2173 gen_jump(dc, cpu_R[0]);
2174 }
2175
2176 static uint32_t test_exceptions_retw(DisasContext *dc, const OpcodeArg arg[],
2177 const uint32_t par[])
2178 {
2179 if (!dc->cwoe) {
2180 qemu_log_mask(LOG_GUEST_ERROR,
2181 "Illegal retw instruction(pc = %08x)\n", dc->pc);
2182 return XTENSA_OP_ILL;
2183 } else {
2184 TCGv_i32 tmp = tcg_const_i32(dc->pc);
2185
2186 gen_helper_test_ill_retw(cpu_env, tmp);
2187 tcg_temp_free(tmp);
2188 return 0;
2189 }
2190 }
2191
2192 static void translate_retw(DisasContext *dc, const OpcodeArg arg[],
2193 const uint32_t par[])
2194 {
2195 TCGv_i32 tmp = tcg_const_i32(1);
2196 tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]);
2197 tcg_gen_andc_i32(cpu_SR[WINDOW_START],
2198 cpu_SR[WINDOW_START], tmp);
2199 tcg_gen_movi_i32(tmp, dc->pc);
2200 tcg_gen_deposit_i32(tmp, tmp, cpu_R[0], 0, 30);
2201 gen_helper_retw(cpu_env, cpu_R[0]);
2202 gen_jump(dc, tmp);
2203 tcg_temp_free(tmp);
2204 }
2205
2206 static void translate_rfde(DisasContext *dc, const OpcodeArg arg[],
2207 const uint32_t par[])
2208 {
2209 gen_jump(dc, cpu_SR[dc->config->ndepc ? DEPC : EPC1]);
2210 }
2211
2212 static void translate_rfe(DisasContext *dc, const OpcodeArg arg[],
2213 const uint32_t par[])
2214 {
2215 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
2216 gen_jump(dc, cpu_SR[EPC1]);
2217 }
2218
2219 static void translate_rfi(DisasContext *dc, const OpcodeArg arg[],
2220 const uint32_t par[])
2221 {
2222 tcg_gen_mov_i32(cpu_SR[PS], cpu_SR[EPS2 + arg[0].imm - 2]);
2223 gen_jump(dc, cpu_SR[EPC1 + arg[0].imm - 1]);
2224 }
2225
2226 static void translate_rfw(DisasContext *dc, const OpcodeArg arg[],
2227 const uint32_t par[])
2228 {
2229 TCGv_i32 tmp = tcg_const_i32(1);
2230
2231 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
2232 tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]);
2233
2234 if (par[0]) {
2235 tcg_gen_andc_i32(cpu_SR[WINDOW_START],
2236 cpu_SR[WINDOW_START], tmp);
2237 } else {
2238 tcg_gen_or_i32(cpu_SR[WINDOW_START],
2239 cpu_SR[WINDOW_START], tmp);
2240 }
2241
2242 tcg_temp_free(tmp);
2243 gen_helper_restore_owb(cpu_env);
2244 gen_jump(dc, cpu_SR[EPC1]);
2245 }
2246
2247 static void translate_rotw(DisasContext *dc, const OpcodeArg arg[],
2248 const uint32_t par[])
2249 {
2250 tcg_gen_addi_i32(cpu_windowbase_next, cpu_SR[WINDOW_BASE], arg[0].imm);
2251 }
2252
2253 static void translate_rsil(DisasContext *dc, const OpcodeArg arg[],
2254 const uint32_t par[])
2255 {
2256 tcg_gen_mov_i32(arg[0].out, cpu_SR[PS]);
2257 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL);
2258 tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], arg[1].imm);
2259 }
2260
2261 static void translate_rsr(DisasContext *dc, const OpcodeArg arg[],
2262 const uint32_t par[])
2263 {
2264 if (sr_name[par[0]]) {
2265 tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
2266 } else {
2267 tcg_gen_movi_i32(arg[0].out, 0);
2268 }
2269 }
2270
2271 static void translate_rsr_ccount(DisasContext *dc, const OpcodeArg arg[],
2272 const uint32_t par[])
2273 {
2274 #ifndef CONFIG_USER_ONLY
2275 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
2276 gen_io_start();
2277 }
2278 gen_helper_update_ccount(cpu_env);
2279 tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
2280 #endif
2281 }
2282
2283 static void translate_rsr_ptevaddr(DisasContext *dc, const OpcodeArg arg[],
2284 const uint32_t par[])
2285 {
2286 #ifndef CONFIG_USER_ONLY
2287 TCGv_i32 tmp = tcg_temp_new_i32();
2288
2289 tcg_gen_shri_i32(tmp, cpu_SR[EXCVADDR], 10);
2290 tcg_gen_or_i32(tmp, tmp, cpu_SR[PTEVADDR]);
2291 tcg_gen_andi_i32(arg[0].out, tmp, 0xfffffffc);
2292 tcg_temp_free(tmp);
2293 #endif
2294 }
2295
2296 static void translate_rtlb(DisasContext *dc, const OpcodeArg arg[],
2297 const uint32_t par[])
2298 {
2299 #ifndef CONFIG_USER_ONLY
2300 static void (* const helper[])(TCGv_i32 r, TCGv_env env, TCGv_i32 a1,
2301 TCGv_i32 a2) = {
2302 gen_helper_rtlb0,
2303 gen_helper_rtlb1,
2304 };
2305 TCGv_i32 dtlb = tcg_const_i32(par[0]);
2306
2307 helper[par[1]](arg[0].out, cpu_env, arg[1].in, dtlb);
2308 tcg_temp_free(dtlb);
2309 #endif
2310 }
2311
2312 static void translate_rptlb0(DisasContext *dc, const OpcodeArg arg[],
2313 const uint32_t par[])
2314 {
2315 #ifndef CONFIG_USER_ONLY
2316 gen_helper_rptlb0(arg[0].out, cpu_env, arg[1].in);
2317 #endif
2318 }
2319
2320 static void translate_rptlb1(DisasContext *dc, const OpcodeArg arg[],
2321 const uint32_t par[])
2322 {
2323 #ifndef CONFIG_USER_ONLY
2324 gen_helper_rptlb1(arg[0].out, cpu_env, arg[1].in);
2325 #endif
2326 }
2327
2328 static void translate_rur(DisasContext *dc, const OpcodeArg arg[],
2329 const uint32_t par[])
2330 {
2331 tcg_gen_mov_i32(arg[0].out, cpu_UR[par[0]]);
2332 }
2333
2334 static void translate_setb_expstate(DisasContext *dc, const OpcodeArg arg[],
2335 const uint32_t par[])
2336 {
2337 /* TODO: GPIO32 may be a part of coprocessor */
2338 tcg_gen_ori_i32(cpu_UR[EXPSTATE], cpu_UR[EXPSTATE], 1u << arg[0].imm);
2339 }
2340
2341 #ifdef CONFIG_USER_ONLY
2342 static void gen_check_atomctl(DisasContext *dc, TCGv_i32 addr)
2343 {
2344 }
2345 #else
2346 static void gen_check_atomctl(DisasContext *dc, TCGv_i32 addr)
2347 {
2348 TCGv_i32 tpc = tcg_const_i32(dc->pc);
2349
2350 gen_helper_check_atomctl(cpu_env, tpc, addr);
2351 tcg_temp_free(tpc);
2352 }
2353 #endif
2354
2355 static void translate_s32c1i(DisasContext *dc, const OpcodeArg arg[],
2356 const uint32_t par[])
2357 {
2358 TCGv_i32 tmp = tcg_temp_local_new_i32();
2359 TCGv_i32 addr = tcg_temp_local_new_i32();
2360
2361 tcg_gen_mov_i32(tmp, arg[0].in);
2362 tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
2363 gen_load_store_alignment(dc, 2, addr, true);
2364 gen_check_atomctl(dc, addr);
2365 tcg_gen_atomic_cmpxchg_i32(arg[0].out, addr, cpu_SR[SCOMPARE1],
2366 tmp, dc->cring, MO_TEUL);
2367 tcg_temp_free(addr);
2368 tcg_temp_free(tmp);
2369 }
2370
2371 static void translate_s32e(DisasContext *dc, const OpcodeArg arg[],
2372 const uint32_t par[])
2373 {
2374 TCGv_i32 addr = tcg_temp_new_i32();
2375
2376 tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
2377 gen_load_store_alignment(dc, 2, addr, false);
2378 tcg_gen_qemu_st_tl(arg[0].in, addr, dc->ring, MO_TEUL);
2379 tcg_temp_free(addr);
2380 }
2381
2382 static void translate_s32ex(DisasContext *dc, const OpcodeArg arg[],
2383 const uint32_t par[])
2384 {
2385 TCGv_i32 prev = tcg_temp_new_i32();
2386 TCGv_i32 addr = tcg_temp_local_new_i32();
2387 TCGv_i32 res = tcg_temp_local_new_i32();
2388 TCGLabel *label = gen_new_label();
2389
2390 tcg_gen_movi_i32(res, 0);
2391 tcg_gen_mov_i32(addr, arg[1].in);
2392 gen_load_store_alignment(dc, 2, addr, true);
2393 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, label);
2394 gen_check_exclusive(dc, addr, true);
2395 tcg_gen_atomic_cmpxchg_i32(prev, cpu_exclusive_addr, cpu_exclusive_val,
2396 arg[0].in, dc->cring, MO_TEUL);
2397 tcg_gen_setcond_i32(TCG_COND_EQ, res, prev, cpu_exclusive_val);
2398 tcg_gen_movcond_i32(TCG_COND_EQ, cpu_exclusive_val,
2399 prev, cpu_exclusive_val, prev, cpu_exclusive_val);
2400 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
2401 gen_set_label(label);
2402 tcg_gen_extract_i32(arg[0].out, cpu_SR[ATOMCTL], 8, 1);
2403 tcg_gen_deposit_i32(cpu_SR[ATOMCTL], cpu_SR[ATOMCTL], res, 8, 1);
2404 tcg_temp_free(prev);
2405 tcg_temp_free(addr);
2406 tcg_temp_free(res);
2407 }
2408
2409 static void translate_salt(DisasContext *dc, const OpcodeArg arg[],
2410 const uint32_t par[])
2411 {
2412 tcg_gen_setcond_i32(par[0],
2413 arg[0].out,
2414 arg[1].in, arg[2].in);
2415 }
2416
2417 static void translate_sext(DisasContext *dc, const OpcodeArg arg[],
2418 const uint32_t par[])
2419 {
2420 int shift = 31 - arg[2].imm;
2421
2422 if (shift == 24) {
2423 tcg_gen_ext8s_i32(arg[0].out, arg[1].in);
2424 } else if (shift == 16) {
2425 tcg_gen_ext16s_i32(arg[0].out, arg[1].in);
2426 } else {
2427 TCGv_i32 tmp = tcg_temp_new_i32();
2428 tcg_gen_shli_i32(tmp, arg[1].in, shift);
2429 tcg_gen_sari_i32(arg[0].out, tmp, shift);
2430 tcg_temp_free(tmp);
2431 }
2432 }
2433
2434 static uint32_t test_exceptions_simcall(DisasContext *dc,
2435 const OpcodeArg arg[],
2436 const uint32_t par[])
2437 {
2438 #ifdef CONFIG_USER_ONLY
2439 bool ill = true;
2440 #else
2441 /* Between RE.2 and RE.3 simcall opcode's become nop for the hardware. */
2442 bool ill = dc->config->hw_version <= 250002 && !semihosting_enabled();
2443 #endif
2444 if (ill || !semihosting_enabled()) {
2445 qemu_log_mask(LOG_GUEST_ERROR, "SIMCALL but semihosting is disabled\n");
2446 }
2447 return ill ? XTENSA_OP_ILL : 0;
2448 }
2449
2450 static void translate_simcall(DisasContext *dc, const OpcodeArg arg[],
2451 const uint32_t par[])
2452 {
2453 #ifndef CONFIG_USER_ONLY
2454 if (semihosting_enabled()) {
2455 gen_helper_simcall(cpu_env);
2456 }
2457 #endif
2458 }
2459
2460 /*
2461 * Note: 64 bit ops are used here solely because SAR values
2462 * have range 0..63
2463 */
2464 #define gen_shift_reg(cmd, reg) do { \
2465 TCGv_i64 tmp = tcg_temp_new_i64(); \
2466 tcg_gen_extu_i32_i64(tmp, reg); \
2467 tcg_gen_##cmd##_i64(v, v, tmp); \
2468 tcg_gen_extrl_i64_i32(arg[0].out, v); \
2469 tcg_temp_free_i64(v); \
2470 tcg_temp_free_i64(tmp); \
2471 } while (0)
2472
2473 #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
2474
2475 static void translate_sll(DisasContext *dc, const OpcodeArg arg[],
2476 const uint32_t par[])
2477 {
2478 if (dc->sar_m32_5bit) {
2479 tcg_gen_shl_i32(arg[0].out, arg[1].in, dc->sar_m32);
2480 } else {
2481 TCGv_i64 v = tcg_temp_new_i64();
2482 TCGv_i32 s = tcg_const_i32(32);
2483 tcg_gen_sub_i32(s, s, cpu_SR[SAR]);
2484 tcg_gen_andi_i32(s, s, 0x3f);
2485 tcg_gen_extu_i32_i64(v, arg[1].in);
2486 gen_shift_reg(shl, s);
2487 tcg_temp_free(s);
2488 }
2489 }
2490
2491 static void translate_slli(DisasContext *dc, const OpcodeArg arg[],
2492 const uint32_t par[])
2493 {
2494 if (arg[2].imm == 32) {
2495 qemu_log_mask(LOG_GUEST_ERROR, "slli a%d, a%d, 32 is undefined\n",
2496 arg[0].imm, arg[1].imm);
2497 }
2498 tcg_gen_shli_i32(arg[0].out, arg[1].in, arg[2].imm & 0x1f);
2499 }
2500
2501 static void translate_sra(DisasContext *dc, const OpcodeArg arg[],
2502 const uint32_t par[])
2503 {
2504 if (dc->sar_m32_5bit) {
2505 tcg_gen_sar_i32(arg[0].out, arg[1].in, cpu_SR[SAR]);
2506 } else {
2507 TCGv_i64 v = tcg_temp_new_i64();
2508 tcg_gen_ext_i32_i64(v, arg[1].in);
2509 gen_shift(sar);
2510 }
2511 }
2512
2513 static void translate_srai(DisasContext *dc, const OpcodeArg arg[],
2514 const uint32_t par[])
2515 {
2516 tcg_gen_sari_i32(arg[0].out, arg[1].in, arg[2].imm);
2517 }
2518
2519 static void translate_src(DisasContext *dc, const OpcodeArg arg[],
2520 const uint32_t par[])
2521 {
2522 TCGv_i64 v = tcg_temp_new_i64();
2523 tcg_gen_concat_i32_i64(v, arg[2].in, arg[1].in);
2524 gen_shift(shr);
2525 }
2526
2527 static void translate_srl(DisasContext *dc, const OpcodeArg arg[],
2528 const uint32_t par[])
2529 {
2530 if (dc->sar_m32_5bit) {
2531 tcg_gen_shr_i32(arg[0].out, arg[1].in, cpu_SR[SAR]);
2532 } else {
2533 TCGv_i64 v = tcg_temp_new_i64();
2534 tcg_gen_extu_i32_i64(v, arg[1].in);
2535 gen_shift(shr);
2536 }
2537 }
2538
2539 #undef gen_shift
2540 #undef gen_shift_reg
2541
2542 static void translate_srli(DisasContext *dc, const OpcodeArg arg[],
2543 const uint32_t par[])
2544 {
2545 tcg_gen_shri_i32(arg[0].out, arg[1].in, arg[2].imm);
2546 }
2547
2548 static void translate_ssa8b(DisasContext *dc, const OpcodeArg arg[],
2549 const uint32_t par[])
2550 {
2551 TCGv_i32 tmp = tcg_temp_new_i32();
2552 tcg_gen_shli_i32(tmp, arg[0].in, 3);
2553 gen_left_shift_sar(dc, tmp);
2554 tcg_temp_free(tmp);
2555 }
2556
2557 static void translate_ssa8l(DisasContext *dc, const OpcodeArg arg[],
2558 const uint32_t par[])
2559 {
2560 TCGv_i32 tmp = tcg_temp_new_i32();
2561 tcg_gen_shli_i32(tmp, arg[0].in, 3);
2562 gen_right_shift_sar(dc, tmp);
2563 tcg_temp_free(tmp);
2564 }
2565
2566 static void translate_ssai(DisasContext *dc, const OpcodeArg arg[],
2567 const uint32_t par[])
2568 {
2569 TCGv_i32 tmp = tcg_const_i32(arg[0].imm);
2570 gen_right_shift_sar(dc, tmp);
2571 tcg_temp_free(tmp);
2572 }
2573
2574 static void translate_ssl(DisasContext *dc, const OpcodeArg arg[],
2575 const uint32_t par[])
2576 {
2577 gen_left_shift_sar(dc, arg[0].in);
2578 }
2579
2580 static void translate_ssr(DisasContext *dc, const OpcodeArg arg[],
2581 const uint32_t par[])
2582 {
2583 gen_right_shift_sar(dc, arg[0].in);
2584 }
2585
2586 static void translate_sub(DisasContext *dc, const OpcodeArg arg[],
2587 const uint32_t par[])
2588 {
2589 tcg_gen_sub_i32(arg[0].out, arg[1].in, arg[2].in);
2590 }
2591
2592 static void translate_subx(DisasContext *dc, const OpcodeArg arg[],
2593 const uint32_t par[])
2594 {
2595 TCGv_i32 tmp = tcg_temp_new_i32();
2596 tcg_gen_shli_i32(tmp, arg[1].in, par[0]);
2597 tcg_gen_sub_i32(arg[0].out, tmp, arg[2].in);
2598 tcg_temp_free(tmp);
2599 }
2600
2601 static void translate_waiti(DisasContext *dc, const OpcodeArg arg[],
2602 const uint32_t par[])
2603 {
2604 #ifndef CONFIG_USER_ONLY
2605 gen_waiti(dc, arg[0].imm);
2606 #endif
2607 }
2608
2609 static void translate_wtlb(DisasContext *dc, const OpcodeArg arg[],
2610 const uint32_t par[])
2611 {
2612 #ifndef CONFIG_USER_ONLY
2613 TCGv_i32 dtlb = tcg_const_i32(par[0]);
2614
2615 gen_helper_wtlb(cpu_env, arg[0].in, arg[1].in, dtlb);
2616 tcg_temp_free(dtlb);
2617 #endif
2618 }
2619
2620 static void translate_wptlb(DisasContext *dc, const OpcodeArg arg[],
2621 const uint32_t par[])
2622 {
2623 #ifndef CONFIG_USER_ONLY
2624 gen_helper_wptlb(cpu_env, arg[0].in, arg[1].in);
2625 #endif
2626 }
2627
2628 static void translate_wer(DisasContext *dc, const OpcodeArg arg[],
2629 const uint32_t par[])
2630 {
2631 gen_helper_wer(cpu_env, arg[0].in, arg[1].in);
2632 }
2633
2634 static void translate_wrmsk_expstate(DisasContext *dc, const OpcodeArg arg[],
2635 const uint32_t par[])
2636 {
2637 /* TODO: GPIO32 may be a part of coprocessor */
2638 tcg_gen_and_i32(cpu_UR[EXPSTATE], arg[0].in, arg[1].in);
2639 }
2640
2641 static void translate_wsr(DisasContext *dc, const OpcodeArg arg[],
2642 const uint32_t par[])
2643 {
2644 if (sr_name[par[0]]) {
2645 tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in);
2646 }
2647 }
2648
2649 static void translate_wsr_mask(DisasContext *dc, const OpcodeArg arg[],
2650 const uint32_t par[])
2651 {
2652 if (sr_name[par[0]]) {
2653 tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in, par[2]);
2654 }
2655 }
2656
2657 static void translate_wsr_acchi(DisasContext *dc, const OpcodeArg arg[],
2658 const uint32_t par[])
2659 {
2660 tcg_gen_ext8s_i32(cpu_SR[par[0]], arg[0].in);
2661 }
2662
2663 static void translate_wsr_ccompare(DisasContext *dc, const OpcodeArg arg[],
2664 const uint32_t par[])
2665 {
2666 #ifndef CONFIG_USER_ONLY
2667 uint32_t id = par[0] - CCOMPARE;
2668 TCGv_i32 tmp = tcg_const_i32(id);
2669
2670 assert(id < dc->config->nccompare);
2671 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
2672 gen_io_start();
2673 }
2674 tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in);
2675 gen_helper_update_ccompare(cpu_env, tmp);
2676 tcg_temp_free(tmp);
2677 #endif
2678 }
2679
2680 static void translate_wsr_ccount(DisasContext *dc, const OpcodeArg arg[],
2681 const uint32_t par[])
2682 {
2683 #ifndef CONFIG_USER_ONLY
2684 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
2685 gen_io_start();
2686 }
2687 gen_helper_wsr_ccount(cpu_env, arg[0].in);
2688 #endif
2689 }
2690
2691 static void translate_wsr_dbreaka(DisasContext *dc, const OpcodeArg arg[],
2692 const uint32_t par[])
2693 {
2694 #ifndef CONFIG_USER_ONLY
2695 unsigned id = par[0] - DBREAKA;
2696 TCGv_i32 tmp = tcg_const_i32(id);
2697
2698 assert(id < dc->config->ndbreak);
2699 gen_helper_wsr_dbreaka(cpu_env, tmp, arg[0].in);
2700 tcg_temp_free(tmp);
2701 #endif
2702 }
2703
2704 static void translate_wsr_dbreakc(DisasContext *dc, const OpcodeArg arg[],
2705 const uint32_t par[])
2706 {
2707 #ifndef CONFIG_USER_ONLY
2708 unsigned id = par[0] - DBREAKC;
2709 TCGv_i32 tmp = tcg_const_i32(id);
2710
2711 assert(id < dc->config->ndbreak);
2712 gen_helper_wsr_dbreakc(cpu_env, tmp, arg[0].in);
2713 tcg_temp_free(tmp);
2714 #endif
2715 }
2716
2717 static void translate_wsr_ibreaka(DisasContext *dc, const OpcodeArg arg[],
2718 const uint32_t par[])
2719 {
2720 #ifndef CONFIG_USER_ONLY
2721 unsigned id = par[0] - IBREAKA;
2722 TCGv_i32 tmp = tcg_const_i32(id);
2723
2724 assert(id < dc->config->nibreak);
2725 gen_helper_wsr_ibreaka(cpu_env, tmp, arg[0].in);
2726 tcg_temp_free(tmp);
2727 #endif
2728 }
2729
2730 static void translate_wsr_ibreakenable(DisasContext *dc, const OpcodeArg arg[],
2731 const uint32_t par[])
2732 {
2733 #ifndef CONFIG_USER_ONLY
2734 gen_helper_wsr_ibreakenable(cpu_env, arg[0].in);
2735 #endif
2736 }
2737
2738 static void translate_wsr_icount(DisasContext *dc, const OpcodeArg arg[],
2739 const uint32_t par[])
2740 {
2741 #ifndef CONFIG_USER_ONLY
2742 if (dc->icount) {
2743 tcg_gen_mov_i32(dc->next_icount, arg[0].in);
2744 } else {
2745 tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in);
2746 }
2747 #endif
2748 }
2749
2750 static void translate_wsr_intclear(DisasContext *dc, const OpcodeArg arg[],
2751 const uint32_t par[])
2752 {
2753 #ifndef CONFIG_USER_ONLY
2754 gen_helper_intclear(cpu_env, arg[0].in);
2755 #endif
2756 }
2757
2758 static void translate_wsr_intset(DisasContext *dc, const OpcodeArg arg[],
2759 const uint32_t par[])
2760 {
2761 #ifndef CONFIG_USER_ONLY
2762 gen_helper_intset(cpu_env, arg[0].in);
2763 #endif
2764 }
2765
2766 static void translate_wsr_memctl(DisasContext *dc, const OpcodeArg arg[],
2767 const uint32_t par[])
2768 {
2769 #ifndef CONFIG_USER_ONLY
2770 gen_helper_wsr_memctl(cpu_env, arg[0].in);
2771 #endif
2772 }
2773
2774 static void translate_wsr_mpuenb(DisasContext *dc, const OpcodeArg arg[],
2775 const uint32_t par[])
2776 {
2777 #ifndef CONFIG_USER_ONLY
2778 gen_helper_wsr_mpuenb(cpu_env, arg[0].in);
2779 #endif
2780 }
2781
2782 static void translate_wsr_ps(DisasContext *dc, const OpcodeArg arg[],
2783 const uint32_t par[])
2784 {
2785 #ifndef CONFIG_USER_ONLY
2786 uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB |
2787 PS_UM | PS_EXCM | PS_INTLEVEL;
2788
2789 if (option_enabled(dc, XTENSA_OPTION_MMU) ||
2790 option_enabled(dc, XTENSA_OPTION_MPU)) {
2791 mask |= PS_RING;
2792 }
2793 tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in, mask);
2794 #endif
2795 }
2796
2797 static void translate_wsr_rasid(DisasContext *dc, const OpcodeArg arg[],
2798 const uint32_t par[])
2799 {
2800 #ifndef CONFIG_USER_ONLY
2801 gen_helper_wsr_rasid(cpu_env, arg[0].in);
2802 #endif
2803 }
2804
2805 static void translate_wsr_sar(DisasContext *dc, const OpcodeArg arg[],
2806 const uint32_t par[])
2807 {
2808 tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in, 0x3f);
2809 if (dc->sar_m32_5bit) {
2810 tcg_gen_discard_i32(dc->sar_m32);
2811 }
2812 dc->sar_5bit = false;
2813 dc->sar_m32_5bit = false;
2814 }
2815
2816 static void translate_wsr_windowbase(DisasContext *dc, const OpcodeArg arg[],
2817 const uint32_t par[])
2818 {
2819 #ifndef CONFIG_USER_ONLY
2820 tcg_gen_mov_i32(cpu_windowbase_next, arg[0].in);
2821 #endif
2822 }
2823
2824 static void translate_wsr_windowstart(DisasContext *dc, const OpcodeArg arg[],
2825 const uint32_t par[])
2826 {
2827 #ifndef CONFIG_USER_ONLY
2828 tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in,
2829 (1 << dc->config->nareg / 4) - 1);
2830 #endif
2831 }
2832
2833 static void translate_wur(DisasContext *dc, const OpcodeArg arg[],
2834 const uint32_t par[])
2835 {
2836 tcg_gen_mov_i32(cpu_UR[par[0]], arg[0].in);
2837 }
2838
2839 static void translate_xor(DisasContext *dc, const OpcodeArg arg[],
2840 const uint32_t par[])
2841 {
2842 tcg_gen_xor_i32(arg[0].out, arg[1].in, arg[2].in);
2843 }
2844
2845 static void translate_xsr(DisasContext *dc, const OpcodeArg arg[],
2846 const uint32_t par[])
2847 {
2848 if (sr_name[par[0]]) {
2849 TCGv_i32 tmp = tcg_temp_new_i32();
2850
2851 tcg_gen_mov_i32(tmp, arg[0].in);
2852 tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
2853 tcg_gen_mov_i32(cpu_SR[par[0]], tmp);
2854 tcg_temp_free(tmp);
2855 } else {
2856 tcg_gen_movi_i32(arg[0].out, 0);
2857 }
2858 }
2859
2860 static void translate_xsr_mask(DisasContext *dc, const OpcodeArg arg[],
2861 const uint32_t par[])
2862 {
2863 if (sr_name[par[0]]) {
2864 TCGv_i32 tmp = tcg_temp_new_i32();
2865
2866 tcg_gen_mov_i32(tmp, arg[0].in);
2867 tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
2868 tcg_gen_andi_i32(cpu_SR[par[0]], tmp, par[2]);
2869 tcg_temp_free(tmp);
2870 } else {
2871 tcg_gen_movi_i32(arg[0].out, 0);
2872 }
2873 }
2874
2875 static void translate_xsr_ccount(DisasContext *dc, const OpcodeArg arg[],
2876 const uint32_t par[])
2877 {
2878 #ifndef CONFIG_USER_ONLY
2879 TCGv_i32 tmp = tcg_temp_new_i32();
2880
2881 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
2882 gen_io_start();
2883 }
2884
2885 gen_helper_update_ccount(cpu_env);
2886 tcg_gen_mov_i32(tmp, cpu_SR[par[0]]);
2887 gen_helper_wsr_ccount(cpu_env, arg[0].in);
2888 tcg_gen_mov_i32(arg[0].out, tmp);
2889 tcg_temp_free(tmp);
2890
2891 #endif
2892 }
2893
2894 #define gen_translate_xsr(name) \
2895 static void translate_xsr_##name(DisasContext *dc, const OpcodeArg arg[], \
2896 const uint32_t par[]) \
2897 { \
2898 TCGv_i32 tmp = tcg_temp_new_i32(); \
2899 \
2900 if (sr_name[par[0]]) { \
2901 tcg_gen_mov_i32(tmp, cpu_SR[par[0]]); \
2902 } else { \
2903 tcg_gen_movi_i32(tmp, 0); \
2904 } \
2905 translate_wsr_##name(dc, arg, par); \
2906 tcg_gen_mov_i32(arg[0].out, tmp); \
2907 tcg_temp_free(tmp); \
2908 }
2909
2910 gen_translate_xsr(acchi)
2911 gen_translate_xsr(ccompare)
2912 gen_translate_xsr(dbreaka)
2913 gen_translate_xsr(dbreakc)
2914 gen_translate_xsr(ibreaka)
2915 gen_translate_xsr(ibreakenable)
2916 gen_translate_xsr(icount)
2917 gen_translate_xsr(memctl)
2918 gen_translate_xsr(mpuenb)
2919 gen_translate_xsr(ps)
2920 gen_translate_xsr(rasid)
2921 gen_translate_xsr(sar)
2922 gen_translate_xsr(windowbase)
2923 gen_translate_xsr(windowstart)
2924
2925 #undef gen_translate_xsr
2926
2927 static const XtensaOpcodeOps core_ops[] = {
2928 {
2929 .name = "abs",
2930 .translate = translate_abs,
2931 }, {
2932 .name = (const char * const[]) {
2933 "add", "add.n", NULL,
2934 },
2935 .translate = translate_add,
2936 .op_flags = XTENSA_OP_NAME_ARRAY,
2937 }, {
2938 .name = (const char * const[]) {
2939 "addi", "addi.n", NULL,
2940 },
2941 .translate = translate_addi,
2942 .op_flags = XTENSA_OP_NAME_ARRAY,
2943 }, {
2944 .name = "addmi",
2945 .translate = translate_addi,
2946 }, {
2947 .name = "addx2",
2948 .translate = translate_addx,
2949 .par = (const uint32_t[]){1},
2950 }, {
2951 .name = "addx4",
2952 .translate = translate_addx,
2953 .par = (const uint32_t[]){2},
2954 }, {
2955 .name = "addx8",
2956 .translate = translate_addx,
2957 .par = (const uint32_t[]){3},
2958 }, {
2959 .name = "all4",
2960 .translate = translate_all,
2961 .par = (const uint32_t[]){true, 4},
2962 }, {
2963 .name = "all8",
2964 .translate = translate_all,
2965 .par = (const uint32_t[]){true, 8},
2966 }, {
2967 .name = "and",
2968 .translate = translate_and,
2969 }, {
2970 .name = "andb",
2971 .translate = translate_boolean,
2972 .par = (const uint32_t[]){BOOLEAN_AND},
2973 }, {
2974 .name = "andbc",
2975 .translate = translate_boolean,
2976 .par = (const uint32_t[]){BOOLEAN_ANDC},
2977 }, {
2978 .name = "any4",
2979 .translate = translate_all,
2980 .par = (const uint32_t[]){false, 4},
2981 }, {
2982 .name = "any8",
2983 .translate = translate_all,
2984 .par = (const uint32_t[]){false, 8},
2985 }, {
2986 .name = (const char * const[]) {
2987 "ball", "ball.w15", "ball.w18", NULL,
2988 },
2989 .translate = translate_ball,
2990 .par = (const uint32_t[]){TCG_COND_EQ},
2991 .op_flags = XTENSA_OP_NAME_ARRAY,
2992 }, {
2993 .name = (const char * const[]) {
2994 "bany", "bany.w15", "bany.w18", NULL,
2995 },
2996 .translate = translate_bany,
2997 .par = (const uint32_t[]){TCG_COND_NE},
2998 .op_flags = XTENSA_OP_NAME_ARRAY,
2999 }, {
3000 .name = (const char * const[]) {
3001 "bbc", "bbc.w15", "bbc.w18", NULL,
3002 },
3003 .translate = translate_bb,
3004 .par = (const uint32_t[]){TCG_COND_EQ},
3005 .op_flags = XTENSA_OP_NAME_ARRAY,
3006 }, {
3007 .name = (const char * const[]) {
3008 "bbci", "bbci.w15", "bbci.w18", NULL,
3009 },
3010 .translate = translate_bbi,
3011 .par = (const uint32_t[]){TCG_COND_EQ},
3012 .op_flags = XTENSA_OP_NAME_ARRAY,
3013 }, {
3014 .name = (const char * const[]) {
3015 "bbs", "bbs.w15", "bbs.w18", NULL,
3016 },
3017 .translate = translate_bb,
3018 .par = (const uint32_t[]){TCG_COND_NE},
3019 .op_flags = XTENSA_OP_NAME_ARRAY,
3020 }, {
3021 .name = (const char * const[]) {
3022 "bbsi", "bbsi.w15", "bbsi.w18", NULL,
3023 },
3024 .translate = translate_bbi,
3025 .par = (const uint32_t[]){TCG_COND_NE},
3026 .op_flags = XTENSA_OP_NAME_ARRAY,
3027 }, {
3028 .name = (const char * const[]) {
3029 "beq", "beq.w15", "beq.w18", NULL,
3030 },
3031 .translate = translate_b,
3032 .par = (const uint32_t[]){TCG_COND_EQ},
3033 .op_flags = XTENSA_OP_NAME_ARRAY,
3034 }, {
3035 .name = (const char * const[]) {
3036 "beqi", "beqi.w15", "beqi.w18", NULL,
3037 },
3038 .translate = translate_bi,
3039 .par = (const uint32_t[]){TCG_COND_EQ},
3040 .op_flags = XTENSA_OP_NAME_ARRAY,
3041 }, {
3042 .name = (const char * const[]) {
3043 "beqz", "beqz.n", "beqz.w15", "beqz.w18", NULL,
3044 },
3045 .translate = translate_bz,
3046 .par = (const uint32_t[]){TCG_COND_EQ},
3047 .op_flags = XTENSA_OP_NAME_ARRAY,
3048 }, {
3049 .name = "bf",
3050 .translate = translate_bp,
3051 .par = (const uint32_t[]){TCG_COND_EQ},
3052 }, {
3053 .name = (const char * const[]) {
3054 "bge", "bge.w15", "bge.w18", NULL,
3055 },
3056 .translate = translate_b,
3057 .par = (const uint32_t[]){TCG_COND_GE},
3058 .op_flags = XTENSA_OP_NAME_ARRAY,
3059 }, {
3060 .name = (const char * const[]) {
3061 "bgei", "bgei.w15", "bgei.w18", NULL,
3062 },
3063 .translate = translate_bi,
3064 .par = (const uint32_t[]){TCG_COND_GE},
3065 .op_flags = XTENSA_OP_NAME_ARRAY,
3066 }, {
3067 .name = (const char * const[]) {
3068 "bgeu", "bgeu.w15", "bgeu.w18", NULL,
3069 },
3070 .translate = translate_b,
3071 .par = (const uint32_t[]){TCG_COND_GEU},
3072 .op_flags = XTENSA_OP_NAME_ARRAY,
3073 }, {
3074 .name = (const char * const[]) {
3075 "bgeui", "bgeui.w15", "bgeui.w18", NULL,
3076 },
3077 .translate = translate_bi,
3078 .par = (const uint32_t[]){TCG_COND_GEU},
3079 .op_flags = XTENSA_OP_NAME_ARRAY,
3080 }, {
3081 .name = (const char * const[]) {
3082 "bgez", "bgez.w15", "bgez.w18", NULL,
3083 },
3084 .translate = translate_bz,
3085 .par = (const uint32_t[]){TCG_COND_GE},
3086 .op_flags = XTENSA_OP_NAME_ARRAY,
3087 }, {
3088 .name = (const char * const[]) {
3089 "blt", "blt.w15", "blt.w18", NULL,
3090 },
3091 .translate = translate_b,
3092 .par = (const uint32_t[]){TCG_COND_LT},
3093 .op_flags = XTENSA_OP_NAME_ARRAY,
3094 }, {
3095 .name = (const char * const[]) {
3096 "blti", "blti.w15", "blti.w18", NULL,
3097 },
3098 .translate = translate_bi,
3099 .par = (const uint32_t[]){TCG_COND_LT},
3100 .op_flags = XTENSA_OP_NAME_ARRAY,
3101 }, {
3102 .name = (const char * const[]) {
3103 "bltu", "bltu.w15", "bltu.w18", NULL,
3104 },
3105 .translate = translate_b,
3106 .par = (const uint32_t[]){TCG_COND_LTU},
3107 .op_flags = XTENSA_OP_NAME_ARRAY,
3108 }, {
3109 .name = (const char * const[]) {
3110 "bltui", "bltui.w15", "bltui.w18", NULL,
3111 },
3112 .translate = translate_bi,
3113 .par = (const uint32_t[]){TCG_COND_LTU},
3114 .op_flags = XTENSA_OP_NAME_ARRAY,
3115 }, {