acl: fix memory leak
[qemu.git] / target-xtensa / translate.c
1 /*
2 * Xtensa ISA:
3 * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
4 *
5 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of the Open Source and Linux Lab nor the
16 * names of its contributors may be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <stdio.h>
32
33 #include "cpu.h"
34 #include "exec/exec-all.h"
35 #include "disas/disas.h"
36 #include "tcg-op.h"
37 #include "qemu/log.h"
38 #include "sysemu/sysemu.h"
39 #include "exec/cpu_ldst.h"
40
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
43
44 #include "trace-tcg.h"
45
46
47 typedef struct DisasContext {
48 const XtensaConfig *config;
49 TranslationBlock *tb;
50 uint32_t pc;
51 uint32_t next_pc;
52 int cring;
53 int ring;
54 uint32_t lbeg;
55 uint32_t lend;
56 TCGv_i32 litbase;
57 int is_jmp;
58 int singlestep_enabled;
59
60 bool sar_5bit;
61 bool sar_m32_5bit;
62 bool sar_m32_allocated;
63 TCGv_i32 sar_m32;
64
65 uint32_t ccount_delta;
66 unsigned used_window;
67
68 bool debug;
69 bool icount;
70 TCGv_i32 next_icount;
71
72 unsigned cpenable;
73 } DisasContext;
74
75 static TCGv_ptr cpu_env;
76 static TCGv_i32 cpu_pc;
77 static TCGv_i32 cpu_R[16];
78 static TCGv_i32 cpu_FR[16];
79 static TCGv_i32 cpu_SR[256];
80 static TCGv_i32 cpu_UR[256];
81
82 #include "exec/gen-icount.h"
83
84 typedef struct XtensaReg {
85 const char *name;
86 uint64_t opt_bits;
87 enum {
88 SR_R = 1,
89 SR_W = 2,
90 SR_X = 4,
91 SR_RW = 3,
92 SR_RWX = 7,
93 } access;
94 } XtensaReg;
95
96 #define XTENSA_REG_ACCESS(regname, opt, acc) { \
97 .name = (regname), \
98 .opt_bits = XTENSA_OPTION_BIT(opt), \
99 .access = (acc), \
100 }
101
102 #define XTENSA_REG(regname, opt) XTENSA_REG_ACCESS(regname, opt, SR_RWX)
103
104 #define XTENSA_REG_BITS_ACCESS(regname, opt, acc) { \
105 .name = (regname), \
106 .opt_bits = (opt), \
107 .access = (acc), \
108 }
109
110 #define XTENSA_REG_BITS(regname, opt) \
111 XTENSA_REG_BITS_ACCESS(regname, opt, SR_RWX)
112
113 static const XtensaReg sregnames[256] = {
114 [LBEG] = XTENSA_REG("LBEG", XTENSA_OPTION_LOOP),
115 [LEND] = XTENSA_REG("LEND", XTENSA_OPTION_LOOP),
116 [LCOUNT] = XTENSA_REG("LCOUNT", XTENSA_OPTION_LOOP),
117 [SAR] = XTENSA_REG_BITS("SAR", XTENSA_OPTION_ALL),
118 [BR] = XTENSA_REG("BR", XTENSA_OPTION_BOOLEAN),
119 [LITBASE] = XTENSA_REG("LITBASE", XTENSA_OPTION_EXTENDED_L32R),
120 [SCOMPARE1] = XTENSA_REG("SCOMPARE1", XTENSA_OPTION_CONDITIONAL_STORE),
121 [ACCLO] = XTENSA_REG("ACCLO", XTENSA_OPTION_MAC16),
122 [ACCHI] = XTENSA_REG("ACCHI", XTENSA_OPTION_MAC16),
123 [MR] = XTENSA_REG("MR0", XTENSA_OPTION_MAC16),
124 [MR + 1] = XTENSA_REG("MR1", XTENSA_OPTION_MAC16),
125 [MR + 2] = XTENSA_REG("MR2", XTENSA_OPTION_MAC16),
126 [MR + 3] = XTENSA_REG("MR3", XTENSA_OPTION_MAC16),
127 [WINDOW_BASE] = XTENSA_REG("WINDOW_BASE", XTENSA_OPTION_WINDOWED_REGISTER),
128 [WINDOW_START] = XTENSA_REG("WINDOW_START",
129 XTENSA_OPTION_WINDOWED_REGISTER),
130 [PTEVADDR] = XTENSA_REG("PTEVADDR", XTENSA_OPTION_MMU),
131 [RASID] = XTENSA_REG("RASID", XTENSA_OPTION_MMU),
132 [ITLBCFG] = XTENSA_REG("ITLBCFG", XTENSA_OPTION_MMU),
133 [DTLBCFG] = XTENSA_REG("DTLBCFG", XTENSA_OPTION_MMU),
134 [IBREAKENABLE] = XTENSA_REG("IBREAKENABLE", XTENSA_OPTION_DEBUG),
135 [CACHEATTR] = XTENSA_REG("CACHEATTR", XTENSA_OPTION_CACHEATTR),
136 [ATOMCTL] = XTENSA_REG("ATOMCTL", XTENSA_OPTION_ATOMCTL),
137 [IBREAKA] = XTENSA_REG("IBREAKA0", XTENSA_OPTION_DEBUG),
138 [IBREAKA + 1] = XTENSA_REG("IBREAKA1", XTENSA_OPTION_DEBUG),
139 [DBREAKA] = XTENSA_REG("DBREAKA0", XTENSA_OPTION_DEBUG),
140 [DBREAKA + 1] = XTENSA_REG("DBREAKA1", XTENSA_OPTION_DEBUG),
141 [DBREAKC] = XTENSA_REG("DBREAKC0", XTENSA_OPTION_DEBUG),
142 [DBREAKC + 1] = XTENSA_REG("DBREAKC1", XTENSA_OPTION_DEBUG),
143 [CONFIGID0] = XTENSA_REG_BITS_ACCESS("CONFIGID0", XTENSA_OPTION_ALL, SR_R),
144 [EPC1] = XTENSA_REG("EPC1", XTENSA_OPTION_EXCEPTION),
145 [EPC1 + 1] = XTENSA_REG("EPC2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
146 [EPC1 + 2] = XTENSA_REG("EPC3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
147 [EPC1 + 3] = XTENSA_REG("EPC4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
148 [EPC1 + 4] = XTENSA_REG("EPC5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
149 [EPC1 + 5] = XTENSA_REG("EPC6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
150 [EPC1 + 6] = XTENSA_REG("EPC7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
151 [DEPC] = XTENSA_REG("DEPC", XTENSA_OPTION_EXCEPTION),
152 [EPS2] = XTENSA_REG("EPS2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
153 [EPS2 + 1] = XTENSA_REG("EPS3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
154 [EPS2 + 2] = XTENSA_REG("EPS4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
155 [EPS2 + 3] = XTENSA_REG("EPS5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
156 [EPS2 + 4] = XTENSA_REG("EPS6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
157 [EPS2 + 5] = XTENSA_REG("EPS7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
158 [CONFIGID1] = XTENSA_REG_BITS_ACCESS("CONFIGID1", XTENSA_OPTION_ALL, SR_R),
159 [EXCSAVE1] = XTENSA_REG("EXCSAVE1", XTENSA_OPTION_EXCEPTION),
160 [EXCSAVE1 + 1] = XTENSA_REG("EXCSAVE2",
161 XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
162 [EXCSAVE1 + 2] = XTENSA_REG("EXCSAVE3",
163 XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
164 [EXCSAVE1 + 3] = XTENSA_REG("EXCSAVE4",
165 XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
166 [EXCSAVE1 + 4] = XTENSA_REG("EXCSAVE5",
167 XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
168 [EXCSAVE1 + 5] = XTENSA_REG("EXCSAVE6",
169 XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
170 [EXCSAVE1 + 6] = XTENSA_REG("EXCSAVE7",
171 XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
172 [CPENABLE] = XTENSA_REG("CPENABLE", XTENSA_OPTION_COPROCESSOR),
173 [INTSET] = XTENSA_REG_ACCESS("INTSET", XTENSA_OPTION_INTERRUPT, SR_RW),
174 [INTCLEAR] = XTENSA_REG_ACCESS("INTCLEAR", XTENSA_OPTION_INTERRUPT, SR_W),
175 [INTENABLE] = XTENSA_REG("INTENABLE", XTENSA_OPTION_INTERRUPT),
176 [PS] = XTENSA_REG_BITS("PS", XTENSA_OPTION_ALL),
177 [VECBASE] = XTENSA_REG("VECBASE", XTENSA_OPTION_RELOCATABLE_VECTOR),
178 [EXCCAUSE] = XTENSA_REG("EXCCAUSE", XTENSA_OPTION_EXCEPTION),
179 [DEBUGCAUSE] = XTENSA_REG_ACCESS("DEBUGCAUSE", XTENSA_OPTION_DEBUG, SR_R),
180 [CCOUNT] = XTENSA_REG("CCOUNT", XTENSA_OPTION_TIMER_INTERRUPT),
181 [PRID] = XTENSA_REG_ACCESS("PRID", XTENSA_OPTION_PROCESSOR_ID, SR_R),
182 [ICOUNT] = XTENSA_REG("ICOUNT", XTENSA_OPTION_DEBUG),
183 [ICOUNTLEVEL] = XTENSA_REG("ICOUNTLEVEL", XTENSA_OPTION_DEBUG),
184 [EXCVADDR] = XTENSA_REG("EXCVADDR", XTENSA_OPTION_EXCEPTION),
185 [CCOMPARE] = XTENSA_REG("CCOMPARE0", XTENSA_OPTION_TIMER_INTERRUPT),
186 [CCOMPARE + 1] = XTENSA_REG("CCOMPARE1",
187 XTENSA_OPTION_TIMER_INTERRUPT),
188 [CCOMPARE + 2] = XTENSA_REG("CCOMPARE2",
189 XTENSA_OPTION_TIMER_INTERRUPT),
190 [MISC] = XTENSA_REG("MISC0", XTENSA_OPTION_MISC_SR),
191 [MISC + 1] = XTENSA_REG("MISC1", XTENSA_OPTION_MISC_SR),
192 [MISC + 2] = XTENSA_REG("MISC2", XTENSA_OPTION_MISC_SR),
193 [MISC + 3] = XTENSA_REG("MISC3", XTENSA_OPTION_MISC_SR),
194 };
195
196 static const XtensaReg uregnames[256] = {
197 [THREADPTR] = XTENSA_REG("THREADPTR", XTENSA_OPTION_THREAD_POINTER),
198 [FCR] = XTENSA_REG("FCR", XTENSA_OPTION_FP_COPROCESSOR),
199 [FSR] = XTENSA_REG("FSR", XTENSA_OPTION_FP_COPROCESSOR),
200 };
201
202 void xtensa_translate_init(void)
203 {
204 static const char * const regnames[] = {
205 "ar0", "ar1", "ar2", "ar3",
206 "ar4", "ar5", "ar6", "ar7",
207 "ar8", "ar9", "ar10", "ar11",
208 "ar12", "ar13", "ar14", "ar15",
209 };
210 static const char * const fregnames[] = {
211 "f0", "f1", "f2", "f3",
212 "f4", "f5", "f6", "f7",
213 "f8", "f9", "f10", "f11",
214 "f12", "f13", "f14", "f15",
215 };
216 int i;
217
218 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
219 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
220 offsetof(CPUXtensaState, pc), "pc");
221
222 for (i = 0; i < 16; i++) {
223 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
224 offsetof(CPUXtensaState, regs[i]),
225 regnames[i]);
226 }
227
228 for (i = 0; i < 16; i++) {
229 cpu_FR[i] = tcg_global_mem_new_i32(TCG_AREG0,
230 offsetof(CPUXtensaState, fregs[i]),
231 fregnames[i]);
232 }
233
234 for (i = 0; i < 256; ++i) {
235 if (sregnames[i].name) {
236 cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0,
237 offsetof(CPUXtensaState, sregs[i]),
238 sregnames[i].name);
239 }
240 }
241
242 for (i = 0; i < 256; ++i) {
243 if (uregnames[i].name) {
244 cpu_UR[i] = tcg_global_mem_new_i32(TCG_AREG0,
245 offsetof(CPUXtensaState, uregs[i]),
246 uregnames[i].name);
247 }
248 }
249 }
250
251 static inline bool option_bits_enabled(DisasContext *dc, uint64_t opt)
252 {
253 return xtensa_option_bits_enabled(dc->config, opt);
254 }
255
256 static inline bool option_enabled(DisasContext *dc, int opt)
257 {
258 return xtensa_option_enabled(dc->config, opt);
259 }
260
261 static void init_litbase(DisasContext *dc)
262 {
263 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
264 dc->litbase = tcg_temp_local_new_i32();
265 tcg_gen_andi_i32(dc->litbase, cpu_SR[LITBASE], 0xfffff000);
266 }
267 }
268
269 static void reset_litbase(DisasContext *dc)
270 {
271 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
272 tcg_temp_free(dc->litbase);
273 }
274 }
275
276 static void init_sar_tracker(DisasContext *dc)
277 {
278 dc->sar_5bit = false;
279 dc->sar_m32_5bit = false;
280 dc->sar_m32_allocated = false;
281 }
282
283 static void reset_sar_tracker(DisasContext *dc)
284 {
285 if (dc->sar_m32_allocated) {
286 tcg_temp_free(dc->sar_m32);
287 }
288 }
289
290 static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa)
291 {
292 tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
293 if (dc->sar_m32_5bit) {
294 tcg_gen_discard_i32(dc->sar_m32);
295 }
296 dc->sar_5bit = true;
297 dc->sar_m32_5bit = false;
298 }
299
300 static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa)
301 {
302 TCGv_i32 tmp = tcg_const_i32(32);
303 if (!dc->sar_m32_allocated) {
304 dc->sar_m32 = tcg_temp_local_new_i32();
305 dc->sar_m32_allocated = true;
306 }
307 tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
308 tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32);
309 dc->sar_5bit = false;
310 dc->sar_m32_5bit = true;
311 tcg_temp_free(tmp);
312 }
313
314 static void gen_advance_ccount_cond(DisasContext *dc)
315 {
316 if (dc->ccount_delta > 0) {
317 TCGv_i32 tmp = tcg_const_i32(dc->ccount_delta);
318 gen_helper_advance_ccount(cpu_env, tmp);
319 tcg_temp_free(tmp);
320 }
321 }
322
323 static void gen_advance_ccount(DisasContext *dc)
324 {
325 gen_advance_ccount_cond(dc);
326 dc->ccount_delta = 0;
327 }
328
329 static void reset_used_window(DisasContext *dc)
330 {
331 dc->used_window = 0;
332 }
333
334 static void gen_exception(DisasContext *dc, int excp)
335 {
336 TCGv_i32 tmp = tcg_const_i32(excp);
337 gen_advance_ccount(dc);
338 gen_helper_exception(cpu_env, tmp);
339 tcg_temp_free(tmp);
340 }
341
342 static void gen_exception_cause(DisasContext *dc, uint32_t cause)
343 {
344 TCGv_i32 tpc = tcg_const_i32(dc->pc);
345 TCGv_i32 tcause = tcg_const_i32(cause);
346 gen_advance_ccount(dc);
347 gen_helper_exception_cause(cpu_env, tpc, tcause);
348 tcg_temp_free(tpc);
349 tcg_temp_free(tcause);
350 if (cause == ILLEGAL_INSTRUCTION_CAUSE ||
351 cause == SYSCALL_CAUSE) {
352 dc->is_jmp = DISAS_UPDATE;
353 }
354 }
355
356 static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause,
357 TCGv_i32 vaddr)
358 {
359 TCGv_i32 tpc = tcg_const_i32(dc->pc);
360 TCGv_i32 tcause = tcg_const_i32(cause);
361 gen_advance_ccount(dc);
362 gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr);
363 tcg_temp_free(tpc);
364 tcg_temp_free(tcause);
365 }
366
367 static void gen_debug_exception(DisasContext *dc, uint32_t cause)
368 {
369 TCGv_i32 tpc = tcg_const_i32(dc->pc);
370 TCGv_i32 tcause = tcg_const_i32(cause);
371 gen_advance_ccount(dc);
372 gen_helper_debug_exception(cpu_env, tpc, tcause);
373 tcg_temp_free(tpc);
374 tcg_temp_free(tcause);
375 if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) {
376 dc->is_jmp = DISAS_UPDATE;
377 }
378 }
379
380 static void gen_check_privilege(DisasContext *dc)
381 {
382 if (dc->cring) {
383 gen_exception_cause(dc, PRIVILEGED_CAUSE);
384 dc->is_jmp = DISAS_UPDATE;
385 }
386 }
387
388 static void gen_check_cpenable(DisasContext *dc, unsigned cp)
389 {
390 if (option_enabled(dc, XTENSA_OPTION_COPROCESSOR) &&
391 !(dc->cpenable & (1 << cp))) {
392 gen_exception_cause(dc, COPROCESSOR0_DISABLED + cp);
393 dc->is_jmp = DISAS_UPDATE;
394 }
395 }
396
397 static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
398 {
399 tcg_gen_mov_i32(cpu_pc, dest);
400 gen_advance_ccount(dc);
401 if (dc->icount) {
402 tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
403 }
404 if (dc->singlestep_enabled) {
405 gen_exception(dc, EXCP_DEBUG);
406 } else {
407 if (slot >= 0) {
408 tcg_gen_goto_tb(slot);
409 tcg_gen_exit_tb((uintptr_t)dc->tb + slot);
410 } else {
411 tcg_gen_exit_tb(0);
412 }
413 }
414 dc->is_jmp = DISAS_UPDATE;
415 }
416
417 static void gen_jump(DisasContext *dc, TCGv dest)
418 {
419 gen_jump_slot(dc, dest, -1);
420 }
421
422 static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
423 {
424 TCGv_i32 tmp = tcg_const_i32(dest);
425 if (((dc->tb->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
426 slot = -1;
427 }
428 gen_jump_slot(dc, tmp, slot);
429 tcg_temp_free(tmp);
430 }
431
432 static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest,
433 int slot)
434 {
435 TCGv_i32 tcallinc = tcg_const_i32(callinc);
436
437 tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS],
438 tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN);
439 tcg_temp_free(tcallinc);
440 tcg_gen_movi_i32(cpu_R[callinc << 2],
441 (callinc << 30) | (dc->next_pc & 0x3fffffff));
442 gen_jump_slot(dc, dest, slot);
443 }
444
445 static void gen_callw(DisasContext *dc, int callinc, TCGv_i32 dest)
446 {
447 gen_callw_slot(dc, callinc, dest, -1);
448 }
449
450 static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot)
451 {
452 TCGv_i32 tmp = tcg_const_i32(dest);
453 if (((dc->tb->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
454 slot = -1;
455 }
456 gen_callw_slot(dc, callinc, tmp, slot);
457 tcg_temp_free(tmp);
458 }
459
460 static bool gen_check_loop_end(DisasContext *dc, int slot)
461 {
462 if (option_enabled(dc, XTENSA_OPTION_LOOP) &&
463 !(dc->tb->flags & XTENSA_TBFLAG_EXCM) &&
464 dc->next_pc == dc->lend) {
465 int label = gen_new_label();
466
467 gen_advance_ccount(dc);
468 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label);
469 tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1);
470 gen_jumpi(dc, dc->lbeg, slot);
471 gen_set_label(label);
472 gen_jumpi(dc, dc->next_pc, -1);
473 return true;
474 }
475 return false;
476 }
477
478 static void gen_jumpi_check_loop_end(DisasContext *dc, int slot)
479 {
480 if (!gen_check_loop_end(dc, slot)) {
481 gen_jumpi(dc, dc->next_pc, slot);
482 }
483 }
484
485 static void gen_brcond(DisasContext *dc, TCGCond cond,
486 TCGv_i32 t0, TCGv_i32 t1, uint32_t offset)
487 {
488 int label = gen_new_label();
489
490 gen_advance_ccount(dc);
491 tcg_gen_brcond_i32(cond, t0, t1, label);
492 gen_jumpi_check_loop_end(dc, 0);
493 gen_set_label(label);
494 gen_jumpi(dc, dc->pc + offset, 1);
495 }
496
497 static void gen_brcondi(DisasContext *dc, TCGCond cond,
498 TCGv_i32 t0, uint32_t t1, uint32_t offset)
499 {
500 TCGv_i32 tmp = tcg_const_i32(t1);
501 gen_brcond(dc, cond, t0, tmp, offset);
502 tcg_temp_free(tmp);
503 }
504
505 static bool gen_check_sr(DisasContext *dc, uint32_t sr, unsigned access)
506 {
507 if (!xtensa_option_bits_enabled(dc->config, sregnames[sr].opt_bits)) {
508 if (sregnames[sr].name) {
509 qemu_log("SR %s is not configured\n", sregnames[sr].name);
510 } else {
511 qemu_log("SR %d is not implemented\n", sr);
512 }
513 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
514 return false;
515 } else if (!(sregnames[sr].access & access)) {
516 static const char * const access_text[] = {
517 [SR_R] = "rsr",
518 [SR_W] = "wsr",
519 [SR_X] = "xsr",
520 };
521 assert(access < ARRAY_SIZE(access_text) && access_text[access]);
522 qemu_log("SR %s is not available for %s\n", sregnames[sr].name,
523 access_text[access]);
524 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
525 return false;
526 }
527 return true;
528 }
529
530 static void gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr)
531 {
532 gen_advance_ccount(dc);
533 tcg_gen_mov_i32(d, cpu_SR[sr]);
534 }
535
536 static void gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
537 {
538 tcg_gen_shri_i32(d, cpu_SR[EXCVADDR], 10);
539 tcg_gen_or_i32(d, d, cpu_SR[sr]);
540 tcg_gen_andi_i32(d, d, 0xfffffffc);
541 }
542
543 static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
544 {
545 static void (* const rsr_handler[256])(DisasContext *dc,
546 TCGv_i32 d, uint32_t sr) = {
547 [CCOUNT] = gen_rsr_ccount,
548 [PTEVADDR] = gen_rsr_ptevaddr,
549 };
550
551 if (rsr_handler[sr]) {
552 rsr_handler[sr](dc, d, sr);
553 } else {
554 tcg_gen_mov_i32(d, cpu_SR[sr]);
555 }
556 }
557
558 static void gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s)
559 {
560 gen_helper_wsr_lbeg(cpu_env, s);
561 gen_jumpi_check_loop_end(dc, 0);
562 }
563
564 static void gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s)
565 {
566 gen_helper_wsr_lend(cpu_env, s);
567 gen_jumpi_check_loop_end(dc, 0);
568 }
569
570 static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s)
571 {
572 tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f);
573 if (dc->sar_m32_5bit) {
574 tcg_gen_discard_i32(dc->sar_m32);
575 }
576 dc->sar_5bit = false;
577 dc->sar_m32_5bit = false;
578 }
579
580 static void gen_wsr_br(DisasContext *dc, uint32_t sr, TCGv_i32 s)
581 {
582 tcg_gen_andi_i32(cpu_SR[sr], s, 0xffff);
583 }
584
585 static void gen_wsr_litbase(DisasContext *dc, uint32_t sr, TCGv_i32 s)
586 {
587 tcg_gen_andi_i32(cpu_SR[sr], s, 0xfffff001);
588 /* This can change tb->flags, so exit tb */
589 gen_jumpi_check_loop_end(dc, -1);
590 }
591
592 static void gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s)
593 {
594 tcg_gen_ext8s_i32(cpu_SR[sr], s);
595 }
596
597 static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v)
598 {
599 gen_helper_wsr_windowbase(cpu_env, v);
600 reset_used_window(dc);
601 }
602
603 static void gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v)
604 {
605 tcg_gen_andi_i32(cpu_SR[sr], v, (1 << dc->config->nareg / 4) - 1);
606 reset_used_window(dc);
607 }
608
609 static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v)
610 {
611 tcg_gen_andi_i32(cpu_SR[sr], v, 0xffc00000);
612 }
613
614 static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
615 {
616 gen_helper_wsr_rasid(cpu_env, v);
617 /* This can change tb->flags, so exit tb */
618 gen_jumpi_check_loop_end(dc, -1);
619 }
620
621 static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v)
622 {
623 tcg_gen_andi_i32(cpu_SR[sr], v, 0x01130000);
624 }
625
626 static void gen_wsr_ibreakenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
627 {
628 gen_helper_wsr_ibreakenable(cpu_env, v);
629 gen_jumpi_check_loop_end(dc, 0);
630 }
631
632 static void gen_wsr_atomctl(DisasContext *dc, uint32_t sr, TCGv_i32 v)
633 {
634 tcg_gen_andi_i32(cpu_SR[sr], v, 0x3f);
635 }
636
637 static void gen_wsr_ibreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v)
638 {
639 unsigned id = sr - IBREAKA;
640
641 if (id < dc->config->nibreak) {
642 TCGv_i32 tmp = tcg_const_i32(id);
643 gen_helper_wsr_ibreaka(cpu_env, tmp, v);
644 tcg_temp_free(tmp);
645 gen_jumpi_check_loop_end(dc, 0);
646 }
647 }
648
649 static void gen_wsr_dbreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v)
650 {
651 unsigned id = sr - DBREAKA;
652
653 if (id < dc->config->ndbreak) {
654 TCGv_i32 tmp = tcg_const_i32(id);
655 gen_helper_wsr_dbreaka(cpu_env, tmp, v);
656 tcg_temp_free(tmp);
657 }
658 }
659
660 static void gen_wsr_dbreakc(DisasContext *dc, uint32_t sr, TCGv_i32 v)
661 {
662 unsigned id = sr - DBREAKC;
663
664 if (id < dc->config->ndbreak) {
665 TCGv_i32 tmp = tcg_const_i32(id);
666 gen_helper_wsr_dbreakc(cpu_env, tmp, v);
667 tcg_temp_free(tmp);
668 }
669 }
670
671 static void gen_wsr_cpenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
672 {
673 tcg_gen_andi_i32(cpu_SR[sr], v, 0xff);
674 /* This can change tb->flags, so exit tb */
675 gen_jumpi_check_loop_end(dc, -1);
676 }
677
678 static void gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v)
679 {
680 tcg_gen_andi_i32(cpu_SR[sr], v,
681 dc->config->inttype_mask[INTTYPE_SOFTWARE]);
682 gen_helper_check_interrupts(cpu_env);
683 gen_jumpi_check_loop_end(dc, 0);
684 }
685
686 static void gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v)
687 {
688 TCGv_i32 tmp = tcg_temp_new_i32();
689
690 tcg_gen_andi_i32(tmp, v,
691 dc->config->inttype_mask[INTTYPE_EDGE] |
692 dc->config->inttype_mask[INTTYPE_NMI] |
693 dc->config->inttype_mask[INTTYPE_SOFTWARE]);
694 tcg_gen_andc_i32(cpu_SR[INTSET], cpu_SR[INTSET], tmp);
695 tcg_temp_free(tmp);
696 gen_helper_check_interrupts(cpu_env);
697 }
698
699 static void gen_wsr_intenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
700 {
701 tcg_gen_mov_i32(cpu_SR[sr], v);
702 gen_helper_check_interrupts(cpu_env);
703 gen_jumpi_check_loop_end(dc, 0);
704 }
705
706 static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v)
707 {
708 uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB |
709 PS_UM | PS_EXCM | PS_INTLEVEL;
710
711 if (option_enabled(dc, XTENSA_OPTION_MMU)) {
712 mask |= PS_RING;
713 }
714 tcg_gen_andi_i32(cpu_SR[sr], v, mask);
715 reset_used_window(dc);
716 gen_helper_check_interrupts(cpu_env);
717 /* This can change mmu index and tb->flags, so exit tb */
718 gen_jumpi_check_loop_end(dc, -1);
719 }
720
721 static void gen_wsr_icount(DisasContext *dc, uint32_t sr, TCGv_i32 v)
722 {
723 if (dc->icount) {
724 tcg_gen_mov_i32(dc->next_icount, v);
725 } else {
726 tcg_gen_mov_i32(cpu_SR[sr], v);
727 }
728 }
729
730 static void gen_wsr_icountlevel(DisasContext *dc, uint32_t sr, TCGv_i32 v)
731 {
732 tcg_gen_andi_i32(cpu_SR[sr], v, 0xf);
733 /* This can change tb->flags, so exit tb */
734 gen_jumpi_check_loop_end(dc, -1);
735 }
736
737 static void gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v)
738 {
739 uint32_t id = sr - CCOMPARE;
740 if (id < dc->config->nccompare) {
741 uint32_t int_bit = 1 << dc->config->timerint[id];
742 gen_advance_ccount(dc);
743 tcg_gen_mov_i32(cpu_SR[sr], v);
744 tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit);
745 gen_helper_check_interrupts(cpu_env);
746 }
747 }
748
749 static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s)
750 {
751 static void (* const wsr_handler[256])(DisasContext *dc,
752 uint32_t sr, TCGv_i32 v) = {
753 [LBEG] = gen_wsr_lbeg,
754 [LEND] = gen_wsr_lend,
755 [SAR] = gen_wsr_sar,
756 [BR] = gen_wsr_br,
757 [LITBASE] = gen_wsr_litbase,
758 [ACCHI] = gen_wsr_acchi,
759 [WINDOW_BASE] = gen_wsr_windowbase,
760 [WINDOW_START] = gen_wsr_windowstart,
761 [PTEVADDR] = gen_wsr_ptevaddr,
762 [RASID] = gen_wsr_rasid,
763 [ITLBCFG] = gen_wsr_tlbcfg,
764 [DTLBCFG] = gen_wsr_tlbcfg,
765 [IBREAKENABLE] = gen_wsr_ibreakenable,
766 [ATOMCTL] = gen_wsr_atomctl,
767 [IBREAKA] = gen_wsr_ibreaka,
768 [IBREAKA + 1] = gen_wsr_ibreaka,
769 [DBREAKA] = gen_wsr_dbreaka,
770 [DBREAKA + 1] = gen_wsr_dbreaka,
771 [DBREAKC] = gen_wsr_dbreakc,
772 [DBREAKC + 1] = gen_wsr_dbreakc,
773 [CPENABLE] = gen_wsr_cpenable,
774 [INTSET] = gen_wsr_intset,
775 [INTCLEAR] = gen_wsr_intclear,
776 [INTENABLE] = gen_wsr_intenable,
777 [PS] = gen_wsr_ps,
778 [ICOUNT] = gen_wsr_icount,
779 [ICOUNTLEVEL] = gen_wsr_icountlevel,
780 [CCOMPARE] = gen_wsr_ccompare,
781 [CCOMPARE + 1] = gen_wsr_ccompare,
782 [CCOMPARE + 2] = gen_wsr_ccompare,
783 };
784
785 if (wsr_handler[sr]) {
786 wsr_handler[sr](dc, sr, s);
787 } else {
788 tcg_gen_mov_i32(cpu_SR[sr], s);
789 }
790 }
791
792 static void gen_wur(uint32_t ur, TCGv_i32 s)
793 {
794 switch (ur) {
795 case FCR:
796 gen_helper_wur_fcr(cpu_env, s);
797 break;
798
799 case FSR:
800 tcg_gen_andi_i32(cpu_UR[ur], s, 0xffffff80);
801 break;
802
803 default:
804 tcg_gen_mov_i32(cpu_UR[ur], s);
805 break;
806 }
807 }
808
809 static void gen_load_store_alignment(DisasContext *dc, int shift,
810 TCGv_i32 addr, bool no_hw_alignment)
811 {
812 if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
813 tcg_gen_andi_i32(addr, addr, ~0 << shift);
814 } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) &&
815 no_hw_alignment) {
816 int label = gen_new_label();
817 TCGv_i32 tmp = tcg_temp_new_i32();
818 tcg_gen_andi_i32(tmp, addr, ~(~0 << shift));
819 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
820 gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
821 gen_set_label(label);
822 tcg_temp_free(tmp);
823 }
824 }
825
826 static void gen_waiti(DisasContext *dc, uint32_t imm4)
827 {
828 TCGv_i32 pc = tcg_const_i32(dc->next_pc);
829 TCGv_i32 intlevel = tcg_const_i32(imm4);
830 gen_advance_ccount(dc);
831 gen_helper_waiti(cpu_env, pc, intlevel);
832 tcg_temp_free(pc);
833 tcg_temp_free(intlevel);
834 }
835
836 static void gen_window_check1(DisasContext *dc, unsigned r1)
837 {
838 if (dc->tb->flags & XTENSA_TBFLAG_EXCM) {
839 return;
840 }
841 if (option_enabled(dc, XTENSA_OPTION_WINDOWED_REGISTER) &&
842 r1 / 4 > dc->used_window) {
843 int label = gen_new_label();
844 TCGv_i32 ws = tcg_temp_new_i32();
845
846 dc->used_window = r1 / 4;
847 tcg_gen_deposit_i32(ws, cpu_SR[WINDOW_START], cpu_SR[WINDOW_START],
848 dc->config->nareg / 4, dc->config->nareg / 4);
849 tcg_gen_shr_i32(ws, ws, cpu_SR[WINDOW_BASE]);
850 tcg_gen_andi_i32(ws, ws, (2 << (r1 / 4)) - 2);
851 tcg_gen_brcondi_i32(TCG_COND_EQ, ws, 0, label);
852 {
853 TCGv_i32 pc = tcg_const_i32(dc->pc);
854 TCGv_i32 w = tcg_const_i32(r1 / 4);
855
856 gen_advance_ccount_cond(dc);
857 gen_helper_window_check(cpu_env, pc, w);
858
859 tcg_temp_free(w);
860 tcg_temp_free(pc);
861 }
862 gen_set_label(label);
863 tcg_temp_free(ws);
864 }
865 }
866
867 static void gen_window_check2(DisasContext *dc, unsigned r1, unsigned r2)
868 {
869 gen_window_check1(dc, r1 > r2 ? r1 : r2);
870 }
871
872 static void gen_window_check3(DisasContext *dc, unsigned r1, unsigned r2,
873 unsigned r3)
874 {
875 gen_window_check2(dc, r1, r2 > r3 ? r2 : r3);
876 }
877
878 static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned)
879 {
880 TCGv_i32 m = tcg_temp_new_i32();
881
882 if (hi) {
883 (is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16);
884 } else {
885 (is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v);
886 }
887 return m;
888 }
889
890 static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
891 {
892 #define HAS_OPTION_BITS(opt) do { \
893 if (!option_bits_enabled(dc, opt)) { \
894 qemu_log("Option is not enabled %s:%d\n", \
895 __FILE__, __LINE__); \
896 goto invalid_opcode; \
897 } \
898 } while (0)
899
900 #define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt))
901
902 #define TBD() qemu_log("TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__)
903 #define RESERVED() do { \
904 qemu_log("RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \
905 dc->pc, b0, b1, b2, __FILE__, __LINE__); \
906 goto invalid_opcode; \
907 } while (0)
908
909
910 #ifdef TARGET_WORDS_BIGENDIAN
911 #define OP0 (((b0) & 0xf0) >> 4)
912 #define OP1 (((b2) & 0xf0) >> 4)
913 #define OP2 ((b2) & 0xf)
914 #define RRR_R ((b1) & 0xf)
915 #define RRR_S (((b1) & 0xf0) >> 4)
916 #define RRR_T ((b0) & 0xf)
917 #else
918 #define OP0 (((b0) & 0xf))
919 #define OP1 (((b2) & 0xf))
920 #define OP2 (((b2) & 0xf0) >> 4)
921 #define RRR_R (((b1) & 0xf0) >> 4)
922 #define RRR_S (((b1) & 0xf))
923 #define RRR_T (((b0) & 0xf0) >> 4)
924 #endif
925 #define RRR_X ((RRR_R & 0x4) >> 2)
926 #define RRR_Y ((RRR_T & 0x4) >> 2)
927 #define RRR_W (RRR_R & 0x3)
928
929 #define RRRN_R RRR_R
930 #define RRRN_S RRR_S
931 #define RRRN_T RRR_T
932
933 #define RRI4_R RRR_R
934 #define RRI4_S RRR_S
935 #define RRI4_T RRR_T
936 #ifdef TARGET_WORDS_BIGENDIAN
937 #define RRI4_IMM4 ((b2) & 0xf)
938 #else
939 #define RRI4_IMM4 (((b2) & 0xf0) >> 4)
940 #endif
941
942 #define RRI8_R RRR_R
943 #define RRI8_S RRR_S
944 #define RRI8_T RRR_T
945 #define RRI8_IMM8 (b2)
946 #define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8)
947
948 #ifdef TARGET_WORDS_BIGENDIAN
949 #define RI16_IMM16 (((b1) << 8) | (b2))
950 #else
951 #define RI16_IMM16 (((b2) << 8) | (b1))
952 #endif
953
954 #ifdef TARGET_WORDS_BIGENDIAN
955 #define CALL_N (((b0) & 0xc) >> 2)
956 #define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2))
957 #else
958 #define CALL_N (((b0) & 0x30) >> 4)
959 #define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10))
960 #endif
961 #define CALL_OFFSET_SE \
962 (((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET)
963
964 #define CALLX_N CALL_N
965 #ifdef TARGET_WORDS_BIGENDIAN
966 #define CALLX_M ((b0) & 0x3)
967 #else
968 #define CALLX_M (((b0) & 0xc0) >> 6)
969 #endif
970 #define CALLX_S RRR_S
971
972 #define BRI12_M CALLX_M
973 #define BRI12_S RRR_S
974 #ifdef TARGET_WORDS_BIGENDIAN
975 #define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2))
976 #else
977 #define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4))
978 #endif
979 #define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12)
980
981 #define BRI8_M BRI12_M
982 #define BRI8_R RRI8_R
983 #define BRI8_S RRI8_S
984 #define BRI8_IMM8 RRI8_IMM8
985 #define BRI8_IMM8_SE RRI8_IMM8_SE
986
987 #define RSR_SR (b1)
988
989 uint8_t b0 = cpu_ldub_code(env, dc->pc);
990 uint8_t b1 = cpu_ldub_code(env, dc->pc + 1);
991 uint8_t b2 = 0;
992
993 static const uint32_t B4CONST[] = {
994 0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
995 };
996
997 static const uint32_t B4CONSTU[] = {
998 32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
999 };
1000
1001 if (OP0 >= 8) {
1002 dc->next_pc = dc->pc + 2;
1003 HAS_OPTION(XTENSA_OPTION_CODE_DENSITY);
1004 } else {
1005 dc->next_pc = dc->pc + 3;
1006 b2 = cpu_ldub_code(env, dc->pc + 2);
1007 }
1008
1009 switch (OP0) {
1010 case 0: /*QRST*/
1011 switch (OP1) {
1012 case 0: /*RST0*/
1013 switch (OP2) {
1014 case 0: /*ST0*/
1015 if ((RRR_R & 0xc) == 0x8) {
1016 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1017 }
1018
1019 switch (RRR_R) {
1020 case 0: /*SNM0*/
1021 switch (CALLX_M) {
1022 case 0: /*ILL*/
1023 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
1024 break;
1025
1026 case 1: /*reserved*/
1027 RESERVED();
1028 break;
1029
1030 case 2: /*JR*/
1031 switch (CALLX_N) {
1032 case 0: /*RET*/
1033 case 2: /*JX*/
1034 gen_window_check1(dc, CALLX_S);
1035 gen_jump(dc, cpu_R[CALLX_S]);
1036 break;
1037
1038 case 1: /*RETWw*/
1039 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1040 {
1041 TCGv_i32 tmp = tcg_const_i32(dc->pc);
1042 gen_advance_ccount(dc);
1043 gen_helper_retw(tmp, cpu_env, tmp);
1044 gen_jump(dc, tmp);
1045 tcg_temp_free(tmp);
1046 }
1047 break;
1048
1049 case 3: /*reserved*/
1050 RESERVED();
1051 break;
1052 }
1053 break;
1054
1055 case 3: /*CALLX*/
1056 gen_window_check2(dc, CALLX_S, CALLX_N << 2);
1057 switch (CALLX_N) {
1058 case 0: /*CALLX0*/
1059 {
1060 TCGv_i32 tmp = tcg_temp_new_i32();
1061 tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
1062 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
1063 gen_jump(dc, tmp);
1064 tcg_temp_free(tmp);
1065 }
1066 break;
1067
1068 case 1: /*CALLX4w*/
1069 case 2: /*CALLX8w*/
1070 case 3: /*CALLX12w*/
1071 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1072 {
1073 TCGv_i32 tmp = tcg_temp_new_i32();
1074
1075 tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
1076 gen_callw(dc, CALLX_N, tmp);
1077 tcg_temp_free(tmp);
1078 }
1079 break;
1080 }
1081 break;
1082 }
1083 break;
1084
1085 case 1: /*MOVSPw*/
1086 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1087 gen_window_check2(dc, RRR_T, RRR_S);
1088 {
1089 TCGv_i32 pc = tcg_const_i32(dc->pc);
1090 gen_advance_ccount(dc);
1091 gen_helper_movsp(cpu_env, pc);
1092 tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]);
1093 tcg_temp_free(pc);
1094 }
1095 break;
1096
1097 case 2: /*SYNC*/
1098 switch (RRR_T) {
1099 case 0: /*ISYNC*/
1100 break;
1101
1102 case 1: /*RSYNC*/
1103 break;
1104
1105 case 2: /*ESYNC*/
1106 break;
1107
1108 case 3: /*DSYNC*/
1109 break;
1110
1111 case 8: /*EXCW*/
1112 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
1113 break;
1114
1115 case 12: /*MEMW*/
1116 break;
1117
1118 case 13: /*EXTW*/
1119 break;
1120
1121 case 15: /*NOP*/
1122 break;
1123
1124 default: /*reserved*/
1125 RESERVED();
1126 break;
1127 }
1128 break;
1129
1130 case 3: /*RFEIx*/
1131 switch (RRR_T) {
1132 case 0: /*RFETx*/
1133 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
1134 switch (RRR_S) {
1135 case 0: /*RFEx*/
1136 gen_check_privilege(dc);
1137 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
1138 gen_helper_check_interrupts(cpu_env);
1139 gen_jump(dc, cpu_SR[EPC1]);
1140 break;
1141
1142 case 1: /*RFUEx*/
1143 RESERVED();
1144 break;
1145
1146 case 2: /*RFDEx*/
1147 gen_check_privilege(dc);
1148 gen_jump(dc, cpu_SR[
1149 dc->config->ndepc ? DEPC : EPC1]);
1150 break;
1151
1152 case 4: /*RFWOw*/
1153 case 5: /*RFWUw*/
1154 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1155 gen_check_privilege(dc);
1156 {
1157 TCGv_i32 tmp = tcg_const_i32(1);
1158
1159 tcg_gen_andi_i32(
1160 cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
1161 tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]);
1162
1163 if (RRR_S == 4) {
1164 tcg_gen_andc_i32(cpu_SR[WINDOW_START],
1165 cpu_SR[WINDOW_START], tmp);
1166 } else {
1167 tcg_gen_or_i32(cpu_SR[WINDOW_START],
1168 cpu_SR[WINDOW_START], tmp);
1169 }
1170
1171 gen_helper_restore_owb(cpu_env);
1172 gen_helper_check_interrupts(cpu_env);
1173 gen_jump(dc, cpu_SR[EPC1]);
1174
1175 tcg_temp_free(tmp);
1176 }
1177 break;
1178
1179 default: /*reserved*/
1180 RESERVED();
1181 break;
1182 }
1183 break;
1184
1185 case 1: /*RFIx*/
1186 HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT);
1187 if (RRR_S >= 2 && RRR_S <= dc->config->nlevel) {
1188 gen_check_privilege(dc);
1189 tcg_gen_mov_i32(cpu_SR[PS],
1190 cpu_SR[EPS2 + RRR_S - 2]);
1191 gen_helper_check_interrupts(cpu_env);
1192 gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]);
1193 } else {
1194 qemu_log("RFI %d is illegal\n", RRR_S);
1195 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
1196 }
1197 break;
1198
1199 case 2: /*RFME*/
1200 TBD();
1201 break;
1202
1203 default: /*reserved*/
1204 RESERVED();
1205 break;
1206
1207 }
1208 break;
1209
1210 case 4: /*BREAKx*/
1211 HAS_OPTION(XTENSA_OPTION_DEBUG);
1212 if (dc->debug) {
1213 gen_debug_exception(dc, DEBUGCAUSE_BI);
1214 }
1215 break;
1216
1217 case 5: /*SYSCALLx*/
1218 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
1219 switch (RRR_S) {
1220 case 0: /*SYSCALLx*/
1221 gen_exception_cause(dc, SYSCALL_CAUSE);
1222 break;
1223
1224 case 1: /*SIMCALL*/
1225 if (semihosting_enabled) {
1226 gen_check_privilege(dc);
1227 gen_helper_simcall(cpu_env);
1228 } else {
1229 qemu_log("SIMCALL but semihosting is disabled\n");
1230 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
1231 }
1232 break;
1233
1234 default:
1235 RESERVED();
1236 break;
1237 }
1238 break;
1239
1240 case 6: /*RSILx*/
1241 HAS_OPTION(XTENSA_OPTION_INTERRUPT);
1242 gen_check_privilege(dc);
1243 gen_window_check1(dc, RRR_T);
1244 tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]);
1245 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL);
1246 tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S);
1247 gen_helper_check_interrupts(cpu_env);
1248 gen_jumpi_check_loop_end(dc, 0);
1249 break;
1250
1251 case 7: /*WAITIx*/
1252 HAS_OPTION(XTENSA_OPTION_INTERRUPT);
1253 gen_check_privilege(dc);
1254 gen_waiti(dc, RRR_S);
1255 break;
1256
1257 case 8: /*ANY4p*/
1258 case 9: /*ALL4p*/
1259 case 10: /*ANY8p*/
1260 case 11: /*ALL8p*/
1261 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1262 {
1263 const unsigned shift = (RRR_R & 2) ? 8 : 4;
1264 TCGv_i32 mask = tcg_const_i32(
1265 ((1 << shift) - 1) << RRR_S);
1266 TCGv_i32 tmp = tcg_temp_new_i32();
1267
1268 tcg_gen_and_i32(tmp, cpu_SR[BR], mask);
1269 if (RRR_R & 1) { /*ALL*/
1270 tcg_gen_addi_i32(tmp, tmp, 1 << RRR_S);
1271 } else { /*ANY*/
1272 tcg_gen_add_i32(tmp, tmp, mask);
1273 }
1274 tcg_gen_shri_i32(tmp, tmp, RRR_S + shift);
1275 tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR],
1276 tmp, RRR_T, 1);
1277 tcg_temp_free(mask);
1278 tcg_temp_free(tmp);
1279 }
1280 break;
1281
1282 default: /*reserved*/
1283 RESERVED();
1284 break;
1285
1286 }
1287 break;
1288
1289 case 1: /*AND*/
1290 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1291 tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1292 break;
1293
1294 case 2: /*OR*/
1295 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1296 tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1297 break;
1298
1299 case 3: /*XOR*/
1300 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1301 tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1302 break;
1303
1304 case 4: /*ST1*/
1305 switch (RRR_R) {
1306 case 0: /*SSR*/
1307 gen_window_check1(dc, RRR_S);
1308 gen_right_shift_sar(dc, cpu_R[RRR_S]);
1309 break;
1310
1311 case 1: /*SSL*/
1312 gen_window_check1(dc, RRR_S);
1313 gen_left_shift_sar(dc, cpu_R[RRR_S]);
1314 break;
1315
1316 case 2: /*SSA8L*/
1317 gen_window_check1(dc, RRR_S);
1318 {
1319 TCGv_i32 tmp = tcg_temp_new_i32();
1320 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
1321 gen_right_shift_sar(dc, tmp);
1322 tcg_temp_free(tmp);
1323 }
1324 break;
1325
1326 case 3: /*SSA8B*/
1327 gen_window_check1(dc, RRR_S);
1328 {
1329 TCGv_i32 tmp = tcg_temp_new_i32();
1330 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
1331 gen_left_shift_sar(dc, tmp);
1332 tcg_temp_free(tmp);
1333 }
1334 break;
1335
1336 case 4: /*SSAI*/
1337 {
1338 TCGv_i32 tmp = tcg_const_i32(
1339 RRR_S | ((RRR_T & 1) << 4));
1340 gen_right_shift_sar(dc, tmp);
1341 tcg_temp_free(tmp);
1342 }
1343 break;
1344
1345 case 6: /*RER*/
1346 TBD();
1347 break;
1348
1349 case 7: /*WER*/
1350 TBD();
1351 break;
1352
1353 case 8: /*ROTWw*/
1354 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1355 gen_check_privilege(dc);
1356 {
1357 TCGv_i32 tmp = tcg_const_i32(
1358 RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0));
1359 gen_helper_rotw(cpu_env, tmp);
1360 tcg_temp_free(tmp);
1361 reset_used_window(dc);
1362 }
1363 break;
1364
1365 case 14: /*NSAu*/
1366 HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
1367 gen_window_check2(dc, RRR_S, RRR_T);
1368 gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]);
1369 break;
1370
1371 case 15: /*NSAUu*/
1372 HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
1373 gen_window_check2(dc, RRR_S, RRR_T);
1374 gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]);
1375 break;
1376
1377 default: /*reserved*/
1378 RESERVED();
1379 break;
1380 }
1381 break;
1382
1383 case 5: /*TLB*/
1384 HAS_OPTION_BITS(
1385 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) |
1386 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
1387 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION));
1388 gen_check_privilege(dc);
1389 gen_window_check2(dc, RRR_S, RRR_T);
1390 {
1391 TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0);
1392
1393 switch (RRR_R & 7) {
1394 case 3: /*RITLB0*/ /*RDTLB0*/
1395 gen_helper_rtlb0(cpu_R[RRR_T],
1396 cpu_env, cpu_R[RRR_S], dtlb);
1397 break;
1398
1399 case 4: /*IITLB*/ /*IDTLB*/
1400 gen_helper_itlb(cpu_env, cpu_R[RRR_S], dtlb);
1401 /* This could change memory mapping, so exit tb */
1402 gen_jumpi_check_loop_end(dc, -1);
1403 break;
1404
1405 case 5: /*PITLB*/ /*PDTLB*/
1406 tcg_gen_movi_i32(cpu_pc, dc->pc);
1407 gen_helper_ptlb(cpu_R[RRR_T],
1408 cpu_env, cpu_R[RRR_S], dtlb);
1409 break;
1410
1411 case 6: /*WITLB*/ /*WDTLB*/
1412 gen_helper_wtlb(
1413 cpu_env, cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1414 /* This could change memory mapping, so exit tb */
1415 gen_jumpi_check_loop_end(dc, -1);
1416 break;
1417
1418 case 7: /*RITLB1*/ /*RDTLB1*/
1419 gen_helper_rtlb1(cpu_R[RRR_T],
1420 cpu_env, cpu_R[RRR_S], dtlb);
1421 break;
1422
1423 default:
1424 tcg_temp_free(dtlb);
1425 RESERVED();
1426 break;
1427 }
1428 tcg_temp_free(dtlb);
1429 }
1430 break;
1431
1432 case 6: /*RT0*/
1433 gen_window_check2(dc, RRR_R, RRR_T);
1434 switch (RRR_S) {
1435 case 0: /*NEG*/
1436 tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1437 break;
1438
1439 case 1: /*ABS*/
1440 {
1441 TCGv_i32 zero = tcg_const_i32(0);
1442 TCGv_i32 neg = tcg_temp_new_i32();
1443
1444 tcg_gen_neg_i32(neg, cpu_R[RRR_T]);
1445 tcg_gen_movcond_i32(TCG_COND_GE, cpu_R[RRR_R],
1446 cpu_R[RRR_T], zero, cpu_R[RRR_T], neg);
1447 tcg_temp_free(neg);
1448 tcg_temp_free(zero);
1449 }
1450 break;
1451
1452 default: /*reserved*/
1453 RESERVED();
1454 break;
1455 }
1456 break;
1457
1458 case 7: /*reserved*/
1459 RESERVED();
1460 break;
1461
1462 case 8: /*ADD*/
1463 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1464 tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1465 break;
1466
1467 case 9: /*ADD**/
1468 case 10:
1469 case 11:
1470 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1471 {
1472 TCGv_i32 tmp = tcg_temp_new_i32();
1473 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8);
1474 tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
1475 tcg_temp_free(tmp);
1476 }
1477 break;
1478
1479 case 12: /*SUB*/
1480 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1481 tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1482 break;
1483
1484 case 13: /*SUB**/
1485 case 14:
1486 case 15:
1487 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1488 {
1489 TCGv_i32 tmp = tcg_temp_new_i32();
1490 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12);
1491 tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
1492 tcg_temp_free(tmp);
1493 }
1494 break;
1495 }
1496 break;
1497
1498 case 1: /*RST1*/
1499 switch (OP2) {
1500 case 0: /*SLLI*/
1501 case 1:
1502 gen_window_check2(dc, RRR_R, RRR_S);
1503 tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S],
1504 32 - (RRR_T | ((OP2 & 1) << 4)));
1505 break;
1506
1507 case 2: /*SRAI*/
1508 case 3:
1509 gen_window_check2(dc, RRR_R, RRR_T);
1510 tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T],
1511 RRR_S | ((OP2 & 1) << 4));
1512 break;
1513
1514 case 4: /*SRLI*/
1515 gen_window_check2(dc, RRR_R, RRR_T);
1516 tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S);
1517 break;
1518
1519 case 6: /*XSR*/
1520 if (gen_check_sr(dc, RSR_SR, SR_X)) {
1521 TCGv_i32 tmp = tcg_temp_new_i32();
1522
1523 if (RSR_SR >= 64) {
1524 gen_check_privilege(dc);
1525 }
1526 gen_window_check1(dc, RRR_T);
1527 tcg_gen_mov_i32(tmp, cpu_R[RRR_T]);
1528 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
1529 gen_wsr(dc, RSR_SR, tmp);
1530 tcg_temp_free(tmp);
1531 }
1532 break;
1533
1534 /*
1535 * Note: 64 bit ops are used here solely because SAR values
1536 * have range 0..63
1537 */
1538 #define gen_shift_reg(cmd, reg) do { \
1539 TCGv_i64 tmp = tcg_temp_new_i64(); \
1540 tcg_gen_extu_i32_i64(tmp, reg); \
1541 tcg_gen_##cmd##_i64(v, v, tmp); \
1542 tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \
1543 tcg_temp_free_i64(v); \
1544 tcg_temp_free_i64(tmp); \
1545 } while (0)
1546
1547 #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
1548
1549 case 8: /*SRC*/
1550 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1551 {
1552 TCGv_i64 v = tcg_temp_new_i64();
1553 tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]);
1554 gen_shift(shr);
1555 }
1556 break;
1557
1558 case 9: /*SRL*/
1559 gen_window_check2(dc, RRR_R, RRR_T);
1560 if (dc->sar_5bit) {
1561 tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
1562 } else {
1563 TCGv_i64 v = tcg_temp_new_i64();
1564 tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]);
1565 gen_shift(shr);
1566 }
1567 break;
1568
1569 case 10: /*SLL*/
1570 gen_window_check2(dc, RRR_R, RRR_S);
1571 if (dc->sar_m32_5bit) {
1572 tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32);
1573 } else {
1574 TCGv_i64 v = tcg_temp_new_i64();
1575 TCGv_i32 s = tcg_const_i32(32);
1576 tcg_gen_sub_i32(s, s, cpu_SR[SAR]);
1577 tcg_gen_andi_i32(s, s, 0x3f);
1578 tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]);
1579 gen_shift_reg(shl, s);
1580 tcg_temp_free(s);
1581 }
1582 break;
1583
1584 case 11: /*SRA*/
1585 gen_window_check2(dc, RRR_R, RRR_T);
1586 if (dc->sar_5bit) {
1587 tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
1588 } else {
1589 TCGv_i64 v = tcg_temp_new_i64();
1590 tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]);
1591 gen_shift(sar);
1592 }
1593 break;
1594 #undef gen_shift
1595 #undef gen_shift_reg
1596
1597 case 12: /*MUL16U*/
1598 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
1599 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1600 {
1601 TCGv_i32 v1 = tcg_temp_new_i32();
1602 TCGv_i32 v2 = tcg_temp_new_i32();
1603 tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]);
1604 tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]);
1605 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
1606 tcg_temp_free(v2);
1607 tcg_temp_free(v1);
1608 }
1609 break;
1610
1611 case 13: /*MUL16S*/
1612 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
1613 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1614 {
1615 TCGv_i32 v1 = tcg_temp_new_i32();
1616 TCGv_i32 v2 = tcg_temp_new_i32();
1617 tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]);
1618 tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]);
1619 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
1620 tcg_temp_free(v2);
1621 tcg_temp_free(v1);
1622 }
1623 break;
1624
1625 default: /*reserved*/
1626 RESERVED();
1627 break;
1628 }
1629 break;
1630
1631 case 2: /*RST2*/
1632 if (OP2 >= 8) {
1633 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1634 }
1635
1636 if (OP2 >= 12) {
1637 HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV);
1638 int label = gen_new_label();
1639 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label);
1640 gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE);
1641 gen_set_label(label);
1642 }
1643
1644 switch (OP2) {
1645 #define BOOLEAN_LOGIC(fn, r, s, t) \
1646 do { \
1647 HAS_OPTION(XTENSA_OPTION_BOOLEAN); \
1648 TCGv_i32 tmp1 = tcg_temp_new_i32(); \
1649 TCGv_i32 tmp2 = tcg_temp_new_i32(); \
1650 \
1651 tcg_gen_shri_i32(tmp1, cpu_SR[BR], s); \
1652 tcg_gen_shri_i32(tmp2, cpu_SR[BR], t); \
1653 tcg_gen_##fn##_i32(tmp1, tmp1, tmp2); \
1654 tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp1, r, 1); \
1655 tcg_temp_free(tmp1); \
1656 tcg_temp_free(tmp2); \
1657 } while (0)
1658
1659 case 0: /*ANDBp*/
1660 BOOLEAN_LOGIC(and, RRR_R, RRR_S, RRR_T);
1661 break;
1662
1663 case 1: /*ANDBCp*/
1664 BOOLEAN_LOGIC(andc, RRR_R, RRR_S, RRR_T);
1665 break;
1666
1667 case 2: /*ORBp*/
1668 BOOLEAN_LOGIC(or, RRR_R, RRR_S, RRR_T);
1669 break;
1670
1671 case 3: /*ORBCp*/
1672 BOOLEAN_LOGIC(orc, RRR_R, RRR_S, RRR_T);
1673 break;
1674
1675 case 4: /*XORBp*/
1676 BOOLEAN_LOGIC(xor, RRR_R, RRR_S, RRR_T);
1677 break;
1678
1679 #undef BOOLEAN_LOGIC
1680
1681 case 8: /*MULLi*/
1682 HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL);
1683 tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1684 break;
1685
1686 case 10: /*MULUHi*/
1687 case 11: /*MULSHi*/
1688 HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL_HIGH);
1689 {
1690 TCGv lo = tcg_temp_new();
1691
1692 if (OP2 == 10) {
1693 tcg_gen_mulu2_i32(lo, cpu_R[RRR_R],
1694 cpu_R[RRR_S], cpu_R[RRR_T]);
1695 } else {
1696 tcg_gen_muls2_i32(lo, cpu_R[RRR_R],
1697 cpu_R[RRR_S], cpu_R[RRR_T]);
1698 }
1699 tcg_temp_free(lo);
1700 }
1701 break;
1702
1703 case 12: /*QUOUi*/
1704 tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1705 break;
1706
1707 case 13: /*QUOSi*/
1708 case 15: /*REMSi*/
1709 {
1710 int label1 = gen_new_label();
1711 int label2 = gen_new_label();
1712
1713 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000,
1714 label1);
1715 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff,
1716 label1);
1717 tcg_gen_movi_i32(cpu_R[RRR_R],
1718 OP2 == 13 ? 0x80000000 : 0);
1719 tcg_gen_br(label2);
1720 gen_set_label(label1);
1721 if (OP2 == 13) {
1722 tcg_gen_div_i32(cpu_R[RRR_R],
1723 cpu_R[RRR_S], cpu_R[RRR_T]);
1724 } else {
1725 tcg_gen_rem_i32(cpu_R[RRR_R],
1726 cpu_R[RRR_S], cpu_R[RRR_T]);
1727 }
1728 gen_set_label(label2);
1729 }
1730 break;
1731
1732 case 14: /*REMUi*/
1733 tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1734 break;
1735
1736 default: /*reserved*/
1737 RESERVED();
1738 break;
1739 }
1740 break;
1741
1742 case 3: /*RST3*/
1743 switch (OP2) {
1744 case 0: /*RSR*/
1745 if (gen_check_sr(dc, RSR_SR, SR_R)) {
1746 if (RSR_SR >= 64) {
1747 gen_check_privilege(dc);
1748 }
1749 gen_window_check1(dc, RRR_T);
1750 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
1751 }
1752 break;
1753
1754 case 1: /*WSR*/
1755 if (gen_check_sr(dc, RSR_SR, SR_W)) {
1756 if (RSR_SR >= 64) {
1757 gen_check_privilege(dc);
1758 }
1759 gen_window_check1(dc, RRR_T);
1760 gen_wsr(dc, RSR_SR, cpu_R[RRR_T]);
1761 }
1762 break;
1763
1764 case 2: /*SEXTu*/
1765 HAS_OPTION(XTENSA_OPTION_MISC_OP_SEXT);
1766 gen_window_check2(dc, RRR_R, RRR_S);
1767 {
1768 int shift = 24 - RRR_T;
1769
1770 if (shift == 24) {
1771 tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1772 } else if (shift == 16) {
1773 tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1774 } else {
1775 TCGv_i32 tmp = tcg_temp_new_i32();
1776 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift);
1777 tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift);
1778 tcg_temp_free(tmp);
1779 }
1780 }
1781 break;
1782
1783 case 3: /*CLAMPSu*/
1784 HAS_OPTION(XTENSA_OPTION_MISC_OP_CLAMPS);
1785 gen_window_check2(dc, RRR_R, RRR_S);
1786 {
1787 TCGv_i32 tmp1 = tcg_temp_new_i32();
1788 TCGv_i32 tmp2 = tcg_temp_new_i32();
1789 TCGv_i32 zero = tcg_const_i32(0);
1790
1791 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T);
1792 tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]);
1793 tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7));
1794
1795 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31);
1796 tcg_gen_xori_i32(tmp1, tmp1, 0xffffffff >> (25 - RRR_T));
1797
1798 tcg_gen_movcond_i32(TCG_COND_EQ, cpu_R[RRR_R], tmp2, zero,
1799 cpu_R[RRR_S], tmp1);
1800 tcg_temp_free(tmp1);
1801 tcg_temp_free(tmp2);
1802 tcg_temp_free(zero);
1803 }
1804 break;
1805
1806 case 4: /*MINu*/
1807 case 5: /*MAXu*/
1808 case 6: /*MINUu*/
1809 case 7: /*MAXUu*/
1810 HAS_OPTION(XTENSA_OPTION_MISC_OP_MINMAX);
1811 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1812 {
1813 static const TCGCond cond[] = {
1814 TCG_COND_LE,
1815 TCG_COND_GE,
1816 TCG_COND_LEU,
1817 TCG_COND_GEU
1818 };
1819 tcg_gen_movcond_i32(cond[OP2 - 4], cpu_R[RRR_R],
1820 cpu_R[RRR_S], cpu_R[RRR_T],
1821 cpu_R[RRR_S], cpu_R[RRR_T]);
1822 }
1823 break;
1824
1825 case 8: /*MOVEQZ*/
1826 case 9: /*MOVNEZ*/
1827 case 10: /*MOVLTZ*/
1828 case 11: /*MOVGEZ*/
1829 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1830 {
1831 static const TCGCond cond[] = {
1832 TCG_COND_EQ,
1833 TCG_COND_NE,
1834 TCG_COND_LT,
1835 TCG_COND_GE,
1836 };
1837 TCGv_i32 zero = tcg_const_i32(0);
1838
1839 tcg_gen_movcond_i32(cond[OP2 - 8], cpu_R[RRR_R],
1840 cpu_R[RRR_T], zero, cpu_R[RRR_S], cpu_R[RRR_R]);
1841 tcg_temp_free(zero);
1842 }
1843 break;
1844
1845 case 12: /*MOVFp*/
1846 case 13: /*MOVTp*/
1847 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1848 gen_window_check2(dc, RRR_R, RRR_S);
1849 {
1850 TCGv_i32 zero = tcg_const_i32(0);
1851 TCGv_i32 tmp = tcg_temp_new_i32();
1852
1853 tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
1854 tcg_gen_movcond_i32(OP2 & 1 ? TCG_COND_NE : TCG_COND_EQ,
1855 cpu_R[RRR_R], tmp, zero,
1856 cpu_R[RRR_S], cpu_R[RRR_R]);
1857
1858 tcg_temp_free(tmp);
1859 tcg_temp_free(zero);
1860 }
1861 break;
1862
1863 case 14: /*RUR*/
1864 gen_window_check1(dc, RRR_R);
1865 {
1866 int st = (RRR_S << 4) + RRR_T;
1867 if (uregnames[st].name) {
1868 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]);
1869 } else {
1870 qemu_log("RUR %d not implemented, ", st);
1871 TBD();
1872 }
1873 }
1874 break;
1875
1876 case 15: /*WUR*/
1877 gen_window_check1(dc, RRR_T);
1878 if (uregnames[RSR_SR].name) {
1879 gen_wur(RSR_SR, cpu_R[RRR_T]);
1880 } else {
1881 qemu_log("WUR %d not implemented, ", RSR_SR);
1882 TBD();
1883 }
1884 break;
1885
1886 }
1887 break;
1888
1889 case 4: /*EXTUI*/
1890 case 5:
1891 gen_window_check2(dc, RRR_R, RRR_T);
1892 {
1893 int shiftimm = RRR_S | ((OP1 & 1) << 4);
1894 int maskimm = (1 << (OP2 + 1)) - 1;
1895
1896 TCGv_i32 tmp = tcg_temp_new_i32();
1897 tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm);
1898 tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm);
1899 tcg_temp_free(tmp);
1900 }
1901 break;
1902
1903 case 6: /*CUST0*/
1904 RESERVED();
1905 break;
1906
1907 case 7: /*CUST1*/
1908 RESERVED();
1909 break;
1910
1911 case 8: /*LSCXp*/
1912 switch (OP2) {
1913 case 0: /*LSXf*/
1914 case 1: /*LSXUf*/
1915 case 4: /*SSXf*/
1916 case 5: /*SSXUf*/
1917 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1918 gen_window_check2(dc, RRR_S, RRR_T);
1919 gen_check_cpenable(dc, 0);
1920 {
1921 TCGv_i32 addr = tcg_temp_new_i32();
1922 tcg_gen_add_i32(addr, cpu_R[RRR_S], cpu_R[RRR_T]);
1923 gen_load_store_alignment(dc, 2, addr, false);
1924 if (OP2 & 0x4) {
1925 tcg_gen_qemu_st32(cpu_FR[RRR_R], addr, dc->cring);
1926 } else {
1927 tcg_gen_qemu_ld32u(cpu_FR[RRR_R], addr, dc->cring);
1928 }
1929 if (OP2 & 0x1) {
1930 tcg_gen_mov_i32(cpu_R[RRR_S], addr);
1931 }
1932 tcg_temp_free(addr);
1933 }
1934 break;
1935
1936 default: /*reserved*/
1937 RESERVED();
1938 break;
1939 }
1940 break;
1941
1942 case 9: /*LSC4*/
1943 gen_window_check2(dc, RRR_S, RRR_T);
1944 switch (OP2) {
1945 case 0: /*L32E*/
1946 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1947 gen_check_privilege(dc);
1948 {
1949 TCGv_i32 addr = tcg_temp_new_i32();
1950 tcg_gen_addi_i32(addr, cpu_R[RRR_S],
1951 (0xffffffc0 | (RRR_R << 2)));
1952 tcg_gen_qemu_ld32u(cpu_R[RRR_T], addr, dc->ring);
1953 tcg_temp_free(addr);
1954 }
1955 break;
1956
1957 case 4: /*S32E*/
1958 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1959 gen_check_privilege(dc);
1960 {
1961 TCGv_i32 addr = tcg_temp_new_i32();
1962 tcg_gen_addi_i32(addr, cpu_R[RRR_S],
1963 (0xffffffc0 | (RRR_R << 2)));
1964 tcg_gen_qemu_st32(cpu_R[RRR_T], addr, dc->ring);
1965 tcg_temp_free(addr);
1966 }
1967 break;
1968
1969 default:
1970 RESERVED();
1971 break;
1972 }
1973 break;
1974
1975 case 10: /*FP0*/
1976 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1977 switch (OP2) {
1978 case 0: /*ADD.Sf*/
1979 gen_check_cpenable(dc, 0);
1980 gen_helper_add_s(cpu_FR[RRR_R], cpu_env,
1981 cpu_FR[RRR_S], cpu_FR[RRR_T]);
1982 break;
1983
1984 case 1: /*SUB.Sf*/
1985 gen_check_cpenable(dc, 0);
1986 gen_helper_sub_s(cpu_FR[RRR_R], cpu_env,
1987 cpu_FR[RRR_S], cpu_FR[RRR_T]);
1988 break;
1989
1990 case 2: /*MUL.Sf*/
1991 gen_check_cpenable(dc, 0);
1992 gen_helper_mul_s(cpu_FR[RRR_R], cpu_env,
1993 cpu_FR[RRR_S], cpu_FR[RRR_T]);
1994 break;
1995
1996 case 4: /*MADD.Sf*/
1997 gen_check_cpenable(dc, 0);
1998 gen_helper_madd_s(cpu_FR[RRR_R], cpu_env,
1999 cpu_FR[RRR_R], cpu_FR[RRR_S], cpu_FR[RRR_T]);
2000 break;
2001
2002 case 5: /*MSUB.Sf*/
2003 gen_check_cpenable(dc, 0);
2004 gen_helper_msub_s(cpu_FR[RRR_R], cpu_env,
2005 cpu_FR[RRR_R], cpu_FR[RRR_S], cpu_FR[RRR_T]);
2006 break;
2007
2008 case 8: /*ROUND.Sf*/
2009 case 9: /*TRUNC.Sf*/
2010 case 10: /*FLOOR.Sf*/
2011 case 11: /*CEIL.Sf*/
2012 case 14: /*UTRUNC.Sf*/
2013 gen_window_check1(dc, RRR_R);
2014 gen_check_cpenable(dc, 0);
2015 {
2016 static const unsigned rounding_mode_const[] = {
2017 float_round_nearest_even,
2018 float_round_to_zero,
2019 float_round_down,
2020 float_round_up,
2021 [6] = float_round_to_zero,
2022 };
2023 TCGv_i32 rounding_mode = tcg_const_i32(
2024 rounding_mode_const[OP2 & 7]);
2025 TCGv_i32 scale = tcg_const_i32(RRR_T);
2026
2027 if (OP2 == 14) {
2028 gen_helper_ftoui(cpu_R[RRR_R], cpu_FR[RRR_S],
2029 rounding_mode, scale);
2030 } else {
2031 gen_helper_ftoi(cpu_R[RRR_R], cpu_FR[RRR_S],
2032 rounding_mode, scale);
2033 }
2034
2035 tcg_temp_free(rounding_mode);
2036 tcg_temp_free(scale);
2037 }
2038 break;
2039
2040 case 12: /*FLOAT.Sf*/
2041 case 13: /*UFLOAT.Sf*/
2042 gen_window_check1(dc, RRR_S);
2043 gen_check_cpenable(dc, 0);
2044 {
2045 TCGv_i32 scale = tcg_const_i32(-RRR_T);
2046
2047 if (OP2 == 13) {
2048 gen_helper_uitof(cpu_FR[RRR_R], cpu_env,
2049 cpu_R[RRR_S], scale);
2050 } else {
2051 gen_helper_itof(cpu_FR[RRR_R], cpu_env,
2052 cpu_R[RRR_S], scale);
2053 }
2054 tcg_temp_free(scale);
2055 }
2056 break;
2057
2058 case 15: /*FP1OP*/
2059 switch (RRR_T) {
2060 case 0: /*MOV.Sf*/
2061 gen_check_cpenable(dc, 0);
2062 tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_FR[RRR_S]);
2063 break;
2064
2065 case 1: /*ABS.Sf*/
2066 gen_check_cpenable(dc, 0);
2067 gen_helper_abs_s(cpu_FR[RRR_R], cpu_FR[RRR_S]);
2068 break;
2069
2070 case 4: /*RFRf*/
2071 gen_window_check1(dc, RRR_R);
2072 gen_check_cpenable(dc, 0);
2073 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_FR[RRR_S]);
2074 break;
2075
2076 case 5: /*WFRf*/
2077 gen_window_check1(dc, RRR_S);
2078 gen_check_cpenable(dc, 0);
2079 tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_R[RRR_S]);
2080 break;
2081
2082 case 6: /*NEG.Sf*/
2083 gen_check_cpenable(dc, 0);
2084 gen_helper_neg_s(cpu_FR[RRR_R], cpu_FR[RRR_S]);
2085 break;
2086
2087 default: /*reserved*/
2088 RESERVED();
2089 break;
2090 }
2091 break;
2092
2093 default: /*reserved*/
2094 RESERVED();
2095 break;
2096 }
2097 break;
2098
2099 case 11: /*FP1*/
2100 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
2101
2102 #define gen_compare(rel, br, a, b) \
2103 do { \
2104 TCGv_i32 bit = tcg_const_i32(1 << br); \
2105 \
2106 gen_check_cpenable(dc, 0); \
2107 gen_helper_##rel(cpu_env, bit, cpu_FR[a], cpu_FR[b]); \
2108 tcg_temp_free(bit); \
2109 } while (0)
2110
2111 switch (OP2) {
2112 case 1: /*UN.Sf*/
2113 gen_compare(un_s, RRR_R, RRR_S, RRR_T);
2114 break;
2115
2116 case 2: /*OEQ.Sf*/
2117 gen_compare(oeq_s, RRR_R, RRR_S, RRR_T);
2118 break;
2119
2120 case 3: /*UEQ.Sf*/
2121 gen_compare(ueq_s, RRR_R, RRR_S, RRR_T);
2122 break;
2123
2124 case 4: /*OLT.Sf*/
2125 gen_compare(olt_s, RRR_R, RRR_S, RRR_T);
2126 break;
2127
2128 case 5: /*ULT.Sf*/
2129 gen_compare(ult_s, RRR_R, RRR_S, RRR_T);
2130 break;
2131
2132 case 6: /*OLE.Sf*/
2133 gen_compare(ole_s, RRR_R, RRR_S, RRR_T);
2134 break;
2135
2136 case 7: /*ULE.Sf*/
2137 gen_compare(ule_s, RRR_R, RRR_S, RRR_T);
2138 break;
2139
2140 #undef gen_compare
2141
2142 case 8: /*MOVEQZ.Sf*/
2143 case 9: /*MOVNEZ.Sf*/
2144 case 10: /*MOVLTZ.Sf*/
2145 case 11: /*MOVGEZ.Sf*/
2146 gen_window_check1(dc, RRR_T);
2147 gen_check_cpenable(dc, 0);
2148 {
2149 static const TCGCond cond[] = {
2150 TCG_COND_EQ,
2151 TCG_COND_NE,
2152 TCG_COND_LT,
2153 TCG_COND_GE,
2154 };
2155 TCGv_i32 zero = tcg_const_i32(0);
2156
2157 tcg_gen_movcond_i32(cond[OP2 - 8], cpu_FR[RRR_R],
2158 cpu_R[RRR_T], zero, cpu_FR[RRR_S], cpu_FR[RRR_R]);
2159 tcg_temp_free(zero);
2160 }
2161 break;
2162
2163 case 12: /*MOVF.Sf*/
2164 case 13: /*MOVT.Sf*/
2165 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
2166 gen_check_cpenable(dc, 0);
2167 {
2168 TCGv_i32 zero = tcg_const_i32(0);
2169 TCGv_i32 tmp = tcg_temp_new_i32();
2170
2171 tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
2172 tcg_gen_movcond_i32(OP2 & 1 ? TCG_COND_NE : TCG_COND_EQ,
2173 cpu_FR[RRR_R], tmp, zero,
2174 cpu_FR[RRR_S], cpu_FR[RRR_R]);
2175
2176 tcg_temp_free(tmp);
2177 tcg_temp_free(zero);
2178 }
2179 break;
2180
2181 default: /*reserved*/
2182 RESERVED();
2183 break;
2184 }
2185 break;
2186
2187 default: /*reserved*/
2188 RESERVED();
2189 break;
2190 }
2191 break;
2192
2193 case 1: /*L32R*/
2194 gen_window_check1(dc, RRR_T);
2195 {
2196 TCGv_i32 tmp = tcg_const_i32(
2197 ((dc->tb->flags & XTENSA_TBFLAG_LITBASE) ?
2198 0 : ((dc->pc + 3) & ~3)) +
2199 (0xfffc0000 | (RI16_IMM16 << 2)));
2200
2201 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
2202 tcg_gen_add_i32(tmp, tmp, dc->litbase);
2203 }
2204 tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring);
2205 tcg_temp_free(tmp);
2206 }
2207 break;
2208
2209 case 2: /*LSAI*/
2210 #define gen_load_store(type, shift) do { \
2211 TCGv_i32 addr = tcg_temp_new_i32(); \
2212 gen_window_check2(dc, RRI8_S, RRI8_T); \
2213 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \
2214 if (shift) { \
2215 gen_load_store_alignment(dc, shift, addr, false); \
2216 } \
2217 tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
2218 tcg_temp_free(addr); \
2219 } while (0)
2220
2221 switch (RRI8_R) {
2222 case 0: /*L8UI*/
2223 gen_load_store(ld8u, 0);
2224 break;
2225
2226 case 1: /*L16UI*/
2227 gen_load_store(ld16u, 1);
2228 break;
2229
2230 case 2: /*L32I*/
2231 gen_load_store(ld32u, 2);
2232 break;
2233
2234 case 4: /*S8I*/
2235 gen_load_store(st8, 0);
2236 break;
2237
2238 case 5: /*S16I*/
2239 gen_load_store(st16, 1);
2240 break;
2241
2242 case 6: /*S32I*/
2243 gen_load_store(st32, 2);
2244 break;
2245
2246 #define gen_dcache_hit_test(w, shift) do { \
2247 TCGv_i32 addr = tcg_temp_new_i32(); \
2248 TCGv_i32 res = tcg_temp_new_i32(); \
2249 gen_window_check1(dc, RRI##w##_S); \
2250 tcg_gen_addi_i32(addr, cpu_R[RRI##w##_S], \
2251 RRI##w##_IMM##w << shift); \
2252 tcg_gen_qemu_ld8u(res, addr, dc->cring); \
2253 tcg_temp_free(addr); \
2254 tcg_temp_free(res); \
2255 } while (0)
2256
2257 #define gen_dcache_hit_test4() gen_dcache_hit_test(4, 4)
2258 #define gen_dcache_hit_test8() gen_dcache_hit_test(8, 2)
2259
2260 case 7: /*CACHEc*/
2261 if (RRI8_T < 8) {
2262 HAS_OPTION(XTENSA_OPTION_DCACHE);
2263 }
2264
2265 switch (RRI8_T) {
2266 case 0: /*DPFRc*/
2267 gen_window_check1(dc, RRI8_S);
2268 break;
2269
2270 case 1: /*DPFWc*/
2271 gen_window_check1(dc, RRI8_S);
2272 break;
2273
2274 case 2: /*DPFROc*/
2275 gen_window_check1(dc, RRI8_S);
2276 break;
2277
2278 case 3: /*DPFWOc*/
2279 gen_window_check1(dc, RRI8_S);
2280 break;
2281
2282 case 4: /*DHWBc*/
2283 gen_dcache_hit_test8();
2284 break;
2285
2286 case 5: /*DHWBIc*/
2287 gen_dcache_hit_test8();
2288 break;
2289
2290 case 6: /*DHIc*/
2291 gen_check_privilege(dc);
2292 gen_dcache_hit_test8();
2293 break;
2294
2295 case 7: /*DIIc*/
2296 gen_check_privilege(dc);
2297 gen_window_check1(dc, RRI8_S);
2298 break;
2299
2300 case 8: /*DCEc*/
2301 switch (OP1) {
2302 case 0: /*DPFLl*/
2303 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
2304 gen_check_privilege(dc);
2305 gen_dcache_hit_test4();
2306 break;
2307
2308 case 2: /*DHUl*/
2309 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
2310 gen_check_privilege(dc);
2311 gen_dcache_hit_test4();
2312 break;
2313
2314 case 3: /*DIUl*/
2315 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
2316 gen_check_privilege(dc);
2317 gen_window_check1(dc, RRI4_S);
2318 break;
2319
2320 case 4: /*DIWBc*/
2321 HAS_OPTION(XTENSA_OPTION_DCACHE);
2322 gen_check_privilege(dc);
2323 gen_window_check1(dc, RRI4_S);
2324 break;
2325
2326 case 5: /*DIWBIc*/
2327 HAS_OPTION(XTENSA_OPTION_DCACHE);
2328 gen_check_privilege(dc);
2329 gen_window_check1(dc, RRI4_S);
2330 break;
2331
2332 default: /*reserved*/
2333 RESERVED();
2334 break;
2335
2336 }
2337 break;
2338
2339 #undef gen_dcache_hit_test
2340 #undef gen_dcache_hit_test4
2341 #undef gen_dcache_hit_test8
2342
2343 #define gen_icache_hit_test(w, shift) do { \
2344 TCGv_i32 addr = tcg_temp_new_i32(); \
2345 gen_window_check1(dc, RRI##w##_S); \
2346 tcg_gen_movi_i32(cpu_pc, dc->pc); \
2347 tcg_gen_addi_i32(addr, cpu_R[RRI##w##_S], \
2348 RRI##w##_IMM##w << shift); \
2349 gen_helper_itlb_hit_test(cpu_env, addr); \
2350 tcg_temp_free(addr); \
2351 } while (0)
2352
2353 #define gen_icache_hit_test4() gen_icache_hit_test(4, 4)
2354 #define gen_icache_hit_test8() gen_icache_hit_test(8, 2)
2355
2356 case 12: /*IPFc*/
2357 HAS_OPTION(XTENSA_OPTION_ICACHE);
2358 gen_window_check1(dc, RRI8_S);
2359 break;
2360
2361 case 13: /*ICEc*/
2362 switch (OP1) {
2363 case 0: /*IPFLl*/
2364 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
2365 gen_check_privilege(dc);
2366 gen_icache_hit_test4();
2367 break;
2368
2369 case 2: /*IHUl*/
2370 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
2371 gen_check_privilege(dc);
2372 gen_icache_hit_test4();
2373 break;
2374
2375 case 3: /*IIUl*/
2376 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
2377 gen_check_privilege(dc);
2378 gen_window_check1(dc, RRI4_S);
2379 break;
2380
2381 default: /*reserved*/
2382 RESERVED();
2383 break;
2384 }
2385 break;
2386
2387 case 14: /*IHIc*/
2388 HAS_OPTION(XTENSA_OPTION_ICACHE);
2389 gen_icache_hit_test8();
2390 break;
2391
2392 case 15: /*IIIc*/
2393 HAS_OPTION(XTENSA_OPTION_ICACHE);
2394 gen_check_privilege(dc);
2395 gen_window_check1(dc, RRI8_S);
2396 break;
2397
2398 default: /*reserved*/
2399 RESERVED();
2400 break;
2401 }
2402 break;
2403
2404 #undef gen_icache_hit_test
2405 #undef gen_icache_hit_test4
2406 #undef gen_icache_hit_test8
2407
2408 case 9: /*L16SI*/
2409 gen_load_store(ld16s, 1);
2410 break;
2411 #undef gen_load_store
2412
2413 case 10: /*MOVI*/
2414 gen_window_check1(dc, RRI8_T);
2415 tcg_gen_movi_i32(cpu_R[RRI8_T],
2416 RRI8_IMM8 | (RRI8_S << 8) |
2417 ((RRI8_S & 0x8) ? 0xfffff000 : 0));
2418 break;
2419
2420 #define gen_load_store_no_hw_align(type) do { \
2421 TCGv_i32 addr = tcg_temp_local_new_i32(); \
2422 gen_window_check2(dc, RRI8_S, RRI8_T); \
2423 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); \
2424 gen_load_store_alignment(dc, 2, addr, true); \
2425 tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
2426 tcg_temp_free(addr); \
2427 } while (0)
2428
2429 case 11: /*L32AIy*/
2430 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
2431 gen_load_store_no_hw_align(ld32u); /*TODO acquire?*/
2432 break;
2433
2434 case 12: /*ADDI*/
2435 gen_window_check2(dc, RRI8_S, RRI8_T);
2436 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE);
2437 break;
2438
2439 case 13: /*ADDMI*/
2440 gen_window_check2(dc, RRI8_S, RRI8_T);
2441 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE << 8);
2442 break;
2443
2444 case 14: /*S32C1Iy*/
2445 HAS_OPTION(XTENSA_OPTION_CONDITIONAL_STORE);
2446 gen_window_check2(dc, RRI8_S, RRI8_T);
2447 {
2448 int label = gen_new_label();
2449 TCGv_i32 tmp = tcg_temp_local_new_i32();
2450 TCGv_i32 addr = tcg_temp_local_new_i32();
2451 TCGv_i32 tpc;
2452
2453 tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]);
2454 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
2455 gen_load_store_alignment(dc, 2, addr, true);
2456
2457 gen_advance_ccount(dc);
2458 tpc = tcg_const_i32(dc->pc);
2459 gen_helper_check_atomctl(cpu_env, tpc, addr);
2460 tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring);
2461 tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T],
2462 cpu_SR[SCOMPARE1], label);
2463
2464 tcg_gen_qemu_st32(tmp, addr, dc->cring);
2465
2466 gen_set_label(label);
2467 tcg_temp_free(tpc);
2468 tcg_temp_free(addr);
2469 tcg_temp_free(tmp);
2470 }
2471 break;
2472
2473 case 15: /*S32RIy*/
2474 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
2475 gen_load_store_no_hw_align(st32); /*TODO release?*/
2476 break;
2477 #undef gen_load_store_no_hw_align
2478
2479 default: /*reserved*/
2480 RESERVED();
2481 break;
2482 }
2483 break;
2484
2485 case 3: /*LSCIp*/
2486 switch (RRI8_R) {
2487 case 0: /*LSIf*/
2488 case 4: /*SSIf*/
2489 case 8: /*LSIUf*/
2490 case 12: /*SSIUf*/
2491 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
2492 gen_window_check1(dc, RRI8_S);
2493 gen_check_cpenable(dc, 0);
2494 {
2495 TCGv_i32 addr = tcg_temp_new_i32();
2496 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
2497 gen_load_store_alignment(dc, 2, addr, false);
2498 if (RRI8_R & 0x4) {
2499 tcg_gen_qemu_st32(cpu_FR[RRI8_T], addr, dc->cring);
2500 } else {
2501 tcg_gen_qemu_ld32u(cpu_FR[RRI8_T], addr, dc->cring);
2502 }
2503 if (RRI8_R & 0x8) {
2504 tcg_gen_mov_i32(cpu_R[RRI8_S], addr);
2505 }
2506 tcg_temp_free(addr);
2507 }
2508 break;
2509
2510 default: /*reserved*/
2511 RESERVED();
2512 break;
2513 }
2514 break;
2515
2516 case 4: /*MAC16d*/
2517 HAS_OPTION(XTENSA_OPTION_MAC16);
2518 {
2519 enum {
2520 MAC16_UMUL = 0x0,
2521 MAC16_MUL = 0x4,
2522 MAC16_MULA = 0x8,
2523 MAC16_MULS = 0xc,
2524 MAC16_NONE = 0xf,
2525 } op = OP1 & 0xc;
2526 bool is_m1_sr = (OP2 & 0x3) == 2;
2527 bool is_m2_sr = (OP2 & 0xc) == 0;
2528 uint32_t ld_offset = 0;
2529
2530 if (OP2 > 9) {
2531 RESERVED();
2532 }
2533
2534 switch (OP2 & 2) {
2535 case 0: /*MACI?/MACC?*/
2536 is_m1_sr = true;
2537 ld_offset = (OP2 & 1) ? -4 : 4;
2538
2539 if (OP2 >= 8) { /*MACI/MACC*/
2540 if (OP1 == 0) { /*LDINC/LDDEC*/
2541 op = MAC16_NONE;
2542 } else {
2543 RESERVED();
2544 }
2545 } else if (op != MAC16_MULA) { /*MULA.*.*.LDINC/LDDEC*/
2546 RESERVED();
2547 }
2548 break;
2549
2550 case 2: /*MACD?/MACA?*/
2551 if (op == MAC16_UMUL && OP2 != 7) { /*UMUL only in MACAA*/
2552 RESERVED();
2553 }
2554 break;
2555 }
2556
2557 if (op != MAC16_NONE) {
2558 if (!is_m1_sr) {
2559 gen_window_check1(dc, RRR_S);
2560 }
2561 if (!is_m2_sr) {
2562 gen_window_check1(dc, RRR_T);
2563 }
2564 }
2565
2566 {
2567 TCGv_i32 vaddr = tcg_temp_new_i32();
2568 TCGv_i32 mem32 = tcg_temp_new_i32();
2569
2570 if (ld_offset) {
2571 gen_window_check1(dc, RRR_S);
2572 tcg_gen_addi_i32(vaddr, cpu_R[RRR_S], ld_offset);
2573 gen_load_store_alignment(dc, 2, vaddr, false);
2574 tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring);
2575 }
2576 if (op != MAC16_NONE) {
2577 TCGv_i32 m1 = gen_mac16_m(
2578 is_m1_sr ? cpu_SR[MR + RRR_X] : cpu_R[RRR_S],
2579 OP1 & 1, op == MAC16_UMUL);
2580 TCGv_i32 m2 = gen_mac16_m(
2581 is_m2_sr ? cpu_SR[MR + 2 + RRR_Y] : cpu_R[RRR_T],
2582 OP1 & 2, op == MAC16_UMUL);
2583
2584 if (op == MAC16_MUL || op == MAC16_UMUL) {
2585 tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2);
2586 if (op == MAC16_UMUL) {
2587 tcg_gen_movi_i32(cpu_SR[ACCHI], 0);
2588 } else {
2589 tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31);
2590 }
2591 } else {
2592 TCGv_i32 lo = tcg_temp_new_i32();
2593 TCGv_i32 hi = tcg_temp_new_i32();
2594
2595 tcg_gen_mul_i32(lo, m1, m2);
2596 tcg_gen_sari_i32(hi, lo, 31);
2597 if (op == MAC16_MULA) {
2598 tcg_gen_add2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI],
2599 cpu_SR[ACCLO], cpu_SR[ACCHI],
2600 lo, hi);
2601 } else {
2602 tcg_gen_sub2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI],
2603 cpu_SR[ACCLO], cpu_SR[ACCHI],
2604 lo, hi);
2605 }
2606 tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]);
2607
2608 tcg_temp_free_i32(lo);
2609 tcg_temp_free_i32(hi);
2610 }
2611 tcg_temp_free(m1);
2612 tcg_temp_free(m2);
2613 }
2614 if (ld_offset) {
2615 tcg_gen_mov_i32(cpu_R[RRR_S], vaddr);
2616 tcg_gen_mov_i32(cpu_SR[MR + RRR_W], mem32);
2617 }
2618 tcg_temp_free(vaddr);
2619 tcg_temp_free(mem32);
2620 }
2621 }
2622 break;
2623
2624 case 5: /*CALLN*/
2625 switch (CALL_N) {
2626 case 0: /*CALL0*/
2627 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
2628 gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
2629 break;
2630
2631 case 1: /*CALL4w*/
2632 case 2: /*CALL8w*/
2633 case 3: /*CALL12w*/
2634 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2635 gen_window_check1(dc, CALL_N << 2);
2636 gen_callwi(dc, CALL_N,
2637 (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
2638 break;
2639 }
2640 break;
2641
2642 case 6: /*SI*/
2643 switch (CALL_N) {
2644 case 0: /*J*/
2645 gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0);
2646 break;
2647
2648 case 1: /*BZ*/
2649 gen_window_check1(dc, BRI12_S);
2650 {
2651 static const TCGCond cond[] = {
2652 TCG_COND_EQ, /*BEQZ*/
2653 TCG_COND_NE, /*BNEZ*/
2654 TCG_COND_LT, /*BLTZ*/
2655 TCG_COND_GE, /*BGEZ*/
2656 };
2657
2658 gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0,
2659 4 + BRI12_IMM12_SE);
2660 }
2661 break;
2662
2663 case 2: /*BI0*/
2664 gen_window_check1(dc, BRI8_S);
2665 {
2666 static const TCGCond cond[] = {
2667 TCG_COND_EQ, /*BEQI*/
2668 TCG_COND_NE, /*BNEI*/
2669 TCG_COND_LT, /*BLTI*/
2670 TCG_COND_GE, /*BGEI*/
2671 };
2672
2673 gen_brcondi(dc, cond[BRI8_M & 3],
2674 cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE);
2675 }
2676 break;
2677
2678 case 3: /*BI1*/
2679 switch (BRI8_M) {
2680 case 0: /*ENTRYw*/
2681 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2682 {
2683 TCGv_i32 pc = tcg_const_i32(dc->pc);
2684 TCGv_i32 s = tcg_const_i32(BRI12_S);
2685 TCGv_i32 imm = tcg_const_i32(BRI12_IMM12);
2686 gen_advance_ccount(dc);
2687 gen_helper_entry(cpu_env, pc, s, imm);
2688 tcg_temp_free(imm);
2689 tcg_temp_free(s);
2690 tcg_temp_free(pc);
2691 reset_used_window(dc);
2692 }
2693 break;
2694
2695 case 1: /*B1*/
2696 switch (BRI8_R) {
2697 case 0: /*BFp*/
2698 case 1: /*BTp*/
2699 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
2700 {
2701 TCGv_i32 tmp = tcg_temp_new_i32();
2702 tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRI8_S);
2703 gen_brcondi(dc,
2704 BRI8_R == 1 ? TCG_COND_NE : TCG_COND_EQ,
2705 tmp, 0, 4 + RRI8_IMM8_SE);
2706 tcg_temp_free(tmp);
2707 }
2708 break;
2709
2710 case 8: /*LOOP*/
2711 case 9: /*LOOPNEZ*/
2712 case 10: /*LOOPGTZ*/
2713 HAS_OPTION(XTENSA_OPTION_LOOP);
2714 gen_window_check1(dc, RRI8_S);
2715 {
2716 uint32_t lend = dc->pc + RRI8_IMM8 + 4;
2717 TCGv_i32 tmp = tcg_const_i32(lend);
2718
2719 tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1);
2720 tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc);
2721 gen_helper_wsr_lend(cpu_env, tmp);
2722 tcg_temp_free(tmp);
2723
2724 if (BRI8_R > 8) {
2725 int label = gen_new_label();
2726 tcg_gen_brcondi_i32(
2727 BRI8_R == 9 ? TCG_COND_NE : TCG_COND_GT,
2728 cpu_R[RRI8_S], 0, label);
2729 gen_jumpi(dc, lend, 1);
2730 gen_set_label(label);
2731 }
2732
2733 gen_jumpi(dc, dc->next_pc, 0);
2734 }
2735 break;
2736
2737 default: /*reserved*/
2738 RESERVED();
2739 break;
2740
2741 }
2742 break;
2743
2744 case 2: /*BLTUI*/
2745 case 3: /*BGEUI*/
2746 gen_window_check1(dc, BRI8_S);
2747 gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU,
2748 cpu_R[BRI8_S], B4CONSTU[BRI8_R], 4 + BRI8_IMM8_SE);
2749 break;
2750 }
2751 break;
2752
2753 }
2754 break;
2755
2756 case 7: /*B*/
2757 {
2758 TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ;
2759
2760 switch (RRI8_R & 7) {
2761 case 0: /*BNONE*/ /*BANY*/
2762 gen_window_check2(dc, RRI8_S, RRI8_T);
2763 {
2764 TCGv_i32 tmp = tcg_temp_new_i32();
2765 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
2766 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2767 tcg_temp_free(tmp);
2768 }
2769 break;
2770
2771 case 1: /*BEQ*/ /*BNE*/
2772 case 2: /*BLT*/ /*BGE*/
2773 case 3: /*BLTU*/ /*BGEU*/
2774 gen_window_check2(dc, RRI8_S, RRI8_T);
2775 {
2776 static const TCGCond cond[] = {
2777 [1] = TCG_COND_EQ,
2778 [2] = TCG_COND_LT,
2779 [3] = TCG_COND_LTU,
2780 [9] = TCG_COND_NE,
2781 [10] = TCG_COND_GE,
2782 [11] = TCG_COND_GEU,
2783 };
2784 gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T],
2785 4 + RRI8_IMM8_SE);
2786 }
2787 break;
2788
2789 case 4: /*BALL*/ /*BNALL*/
2790 gen_window_check2(dc, RRI8_S, RRI8_T);
2791 {
2792 TCGv_i32 tmp = tcg_temp_new_i32();
2793 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
2794 gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T],
2795 4 + RRI8_IMM8_SE);
2796 tcg_temp_free(tmp);
2797 }
2798 break;
2799
2800 case 5: /*BBC*/ /*BBS*/
2801 gen_window_check2(dc, RRI8_S, RRI8_T);
2802 {
2803 #ifdef TARGET_WORDS_BIGENDIAN
2804 TCGv_i32 bit = tcg_const_i32(0x80000000);
2805 #else
2806 TCGv_i32 bit = tcg_const_i32(0x00000001);
2807 #endif
2808 TCGv_i32 tmp = tcg_temp_new_i32();
2809 tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f);
2810 #ifdef TARGET_WORDS_BIGENDIAN
2811 tcg_gen_shr_i32(bit, bit, tmp);
2812 #else
2813 tcg_gen_shl_i32(bit, bit, tmp);
2814 #endif
2815 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit);
2816 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2817 tcg_temp_free(tmp);
2818 tcg_temp_free(bit);
2819 }
2820 break;
2821
2822 case 6: /*BBCI*/ /*BBSI*/
2823 case 7:
2824 gen_window_check1(dc, RRI8_S);
2825 {
2826 TCGv_i32 tmp = tcg_temp_new_i32();
2827 tcg_gen_andi_i32(tmp, cpu_R[RRI8_S],
2828 #ifdef TARGET_WORDS_BIGENDIAN
2829 0x80000000 >> (((RRI8_R & 1) << 4) | RRI8_T));
2830 #else
2831 0x00000001 << (((RRI8_R & 1) << 4) | RRI8_T));
2832 #endif
2833 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2834 tcg_temp_free(tmp);
2835 }
2836 break;
2837
2838 }
2839 }
2840 break;
2841
2842 #define gen_narrow_load_store(type) do { \
2843 TCGv_i32 addr = tcg_temp_new_i32(); \
2844 gen_window_check2(dc, RRRN_S, RRRN_T); \
2845 tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \
2846 gen_load_store_alignment(dc, 2, addr, false); \
2847 tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \
2848 tcg_temp_free(addr); \
2849 } while (0)
2850
2851 case 8: /*L32I.Nn*/
2852 gen_narrow_load_store(ld32u);
2853 break;
2854
2855 case 9: /*S32I.Nn*/
2856 gen_narrow_load_store(st32);
2857 break;
2858 #undef gen_narrow_load_store
2859
2860 case 10: /*ADD.Nn*/
2861 gen_window_check3(dc, RRRN_R, RRRN_S, RRRN_T);
2862 tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]);
2863 break;
2864
2865 case 11: /*ADDI.Nn*/
2866 gen_window_check2(dc, RRRN_R, RRRN_S);
2867 tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], RRRN_T ? RRRN_T : -1);
2868 break;
2869
2870 case 12: /*ST2n*/
2871 gen_window_check1(dc, RRRN_S);
2872 if (RRRN_T < 8) { /*MOVI.Nn*/
2873 tcg_gen_movi_i32(cpu_R[RRRN_S],
2874 RRRN_R | (RRRN_T << 4) |
2875 ((RRRN_T & 6) == 6 ? 0xffffff80 : 0));
2876 } else { /*BEQZ.Nn*/ /*BNEZ.Nn*/
2877 TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ;
2878
2879 gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0,
2880 4 + (RRRN_R | ((RRRN_T & 3) << 4)));
2881 }
2882 break;
2883
2884 case 13: /*ST3n*/
2885 switch (RRRN_R) {
2886 case 0: /*MOV.Nn*/
2887 gen_window_check2(dc, RRRN_S, RRRN_T);
2888 tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]);
2889 break;
2890
2891 case 15: /*S3*/
2892 switch (RRRN_T) {
2893 case 0: /*RET.Nn*/
2894 gen_jump(dc, cpu_R[0]);
2895 break;
2896
2897 case 1: /*RETW.Nn*/
2898 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2899 {
2900 TCGv_i32 tmp = tcg_const_i32(dc->pc);
2901 gen_advance_ccount(dc);
2902 gen_helper_retw(tmp, cpu_env, tmp);
2903 gen_jump(dc, tmp);
2904 tcg_temp_free(tmp);
2905 }
2906 break;
2907
2908 case 2: /*BREAK.Nn*/
2909 HAS_OPTION(XTENSA_OPTION_DEBUG);
2910 if (dc->debug) {
2911 gen_debug_exception(dc, DEBUGCAUSE_BN);
2912 }
2913 break;
2914
2915 case 3: /*NOP.Nn*/
2916 break;
2917
2918 case 6: /*ILL.Nn*/
2919 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
2920 break;
2921
2922 default: /*reserved*/
2923 RESERVED();
2924 break;
2925 }
2926 break;
2927
2928 default: /*reserved*/
2929 RESERVED();
2930 break;
2931 }
2932 break;
2933
2934 default: /*reserved*/
2935 RESERVED();
2936 break;
2937 }
2938
2939 if (dc->is_jmp == DISAS_NEXT) {
2940 gen_check_loop_end(dc, 0);
2941 }
2942 dc->pc = dc->next_pc;
2943
2944 return;
2945
2946 invalid_opcode:
2947 qemu_log("INVALID(pc = %08x)\n", dc->pc);
2948 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
2949 #undef HAS_OPTION
2950 }
2951
2952 static void check_breakpoint(CPUXtensaState *env, DisasContext *dc)
2953 {
2954 CPUState *cs = CPU(xtensa_env_get_cpu(env));
2955 CPUBreakpoint *bp;
2956
2957 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
2958 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
2959 if (bp->pc == dc->pc) {
2960 tcg_gen_movi_i32(cpu_pc, dc->pc);
2961 gen_exception(dc, EXCP_DEBUG);
2962 dc->is_jmp = DISAS_UPDATE;
2963 }
2964 }
2965 }
2966 }
2967
2968 static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc)
2969 {
2970 unsigned i;
2971
2972 for (i = 0; i < dc->config->nibreak; ++i) {
2973 if ((env->sregs[IBREAKENABLE] & (1 << i)) &&
2974 env->sregs[IBREAKA + i] == dc->pc) {
2975 gen_debug_exception(dc, DEBUGCAUSE_IB);
2976 break;
2977 }
2978 }
2979 }
2980
2981 static inline
2982 void gen_intermediate_code_internal(XtensaCPU *cpu,
2983 TranslationBlock *tb, bool search_pc)
2984 {
2985 CPUState *cs = CPU(cpu);
2986 CPUXtensaState *env = &cpu->env;
2987 DisasContext dc;
2988 int insn_count = 0;
2989 int j, lj = -1;
2990 uint16_t *gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2991 int max_insns = tb->cflags & CF_COUNT_MASK;
2992 uint32_t pc_start = tb->pc;
2993 uint32_t next_page_start =
2994 (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2995
2996 if (max_insns == 0) {
2997 max_insns = CF_COUNT_MASK;
2998 }
2999
3000 dc.config = env->config;
3001 dc.singlestep_enabled = cs->singlestep_enabled;
3002 dc.tb = tb;
3003 dc.pc = pc_start;
3004 dc.ring = tb->flags & XTENSA_TBFLAG_RING_MASK;
3005 dc.cring = (tb->flags & XTENSA_TBFLAG_EXCM) ? 0 : dc.ring;
3006 dc.lbeg = env->sregs[LBEG];
3007 dc.lend = env->sregs[LEND];
3008 dc.is_jmp = DISAS_NEXT;
3009 dc.ccount_delta = 0;
3010 dc.debug = tb->flags & XTENSA_TBFLAG_DEBUG;
3011 dc.icount = tb->flags & XTENSA_TBFLAG_ICOUNT;
3012 dc.cpenable = (tb->flags & XTENSA_TBFLAG_CPENABLE_MASK) >>
3013 XTENSA_TBFLAG_CPENABLE_SHIFT;
3014
3015 init_litbase(&dc);
3016 init_sar_tracker(&dc);
3017 reset_used_window(&dc);
3018 if (dc.icount) {
3019 dc.next_icount = tcg_temp_local_new_i32();
3020 }
3021
3022 gen_tb_start();
3023
3024 if (tb->flags & XTENSA_TBFLAG_EXCEPTION) {
3025 tcg_gen_movi_i32(cpu_pc, dc.pc);
3026 gen_exception(&dc, EXCP_DEBUG);
3027 }
3028
3029 do {
3030 check_breakpoint(env, &dc);
3031
3032 if (search_pc) {
3033 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3034 if (lj < j) {
3035 lj++;
3036 while (lj < j) {
3037 tcg_ctx.gen_opc_instr_start[lj++] = 0;
3038 }
3039 }
3040 tcg_ctx.gen_opc_pc[lj] = dc.pc;
3041 tcg_ctx.gen_opc_instr_start[lj] = 1;
3042 tcg_ctx.gen_opc_icount[lj] = insn_count;
3043 }
3044
3045 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
3046 tcg_gen_debug_insn_start(dc.pc);
3047 }
3048
3049 ++dc.ccount_delta;
3050
3051 if (insn_count + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
3052 gen_io_start();
3053 }
3054
3055 if (dc.icount) {
3056 int label = gen_new_label();
3057
3058 tcg_gen_addi_i32(dc.next_icount, cpu_SR[ICOUNT], 1);
3059 tcg_gen_brcondi_i32(TCG_COND_NE, dc.next_icount, 0, label);
3060 tcg_gen_mov_i32(dc.next_icount, cpu_SR[ICOUNT]);
3061 if (dc.debug) {
3062 gen_debug_exception(&dc, DEBUGCAUSE_IC);
3063 }
3064 gen_set_label(label);
3065 }
3066
3067 if (dc.debug) {
3068 gen_ibreak_check(env, &dc);
3069 }
3070
3071 disas_xtensa_insn(env, &dc);
3072 ++insn_count;
3073 if (dc.icount) {
3074 tcg_gen_mov_i32(cpu_SR[ICOUNT], dc.next_icount);
3075 }
3076 if (cs->singlestep_enabled) {
3077 tcg_gen_movi_i32(cpu_pc, dc.pc);
3078 gen_exception(&dc, EXCP_DEBUG);
3079 break;
3080 }
3081 } while (dc.is_jmp == DISAS_NEXT &&
3082 insn_count < max_insns &&
3083 dc.pc < next_page_start &&
3084 tcg_ctx.gen_opc_ptr < gen_opc_end);
3085
3086 reset_litbase(&dc);
3087 reset_sar_tracker(&dc);
3088 if (dc.icount) {
3089 tcg_temp_free(dc.next_icount);
3090 }
3091
3092 if (tb->cflags & CF_LAST_IO) {
3093 gen_io_end();
3094 }
3095
3096 if (dc.is_jmp == DISAS_NEXT) {
3097 gen_jumpi(&dc, dc.pc, 0);
3098 }
3099 gen_tb_end(tb, insn_count);
3100 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
3101
3102 #ifdef DEBUG_DISAS
3103 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3104 qemu_log("----------------\n");
3105 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3106 log_target_disas(env, pc_start, dc.pc - pc_start, 0);
3107 qemu_log("\n");
3108 }
3109 #endif
3110 if (search_pc) {
3111 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3112 memset(tcg_ctx.gen_opc_instr_start + lj + 1, 0,
3113 (j - lj) * sizeof(tcg_ctx.gen_opc_instr_start[0]));
3114 } else {
3115 tb->size = dc.pc - pc_start;
3116 tb->icount = insn_count;
3117 }
3118 }
3119
3120 void gen_intermediate_code(CPUXtensaState *env, TranslationBlock *tb)
3121 {
3122 gen_intermediate_code_internal(xtensa_env_get_cpu(env), tb, false);
3123 }
3124
3125 void gen_intermediate_code_pc(CPUXtensaState *env, TranslationBlock *tb)
3126 {
3127 gen_intermediate_code_internal(xtensa_env_get_cpu(env), tb, true);
3128 }
3129
3130 void xtensa_cpu_dump_state(CPUState *cs, FILE *f,
3131 fprintf_function cpu_fprintf, int flags)
3132 {
3133 XtensaCPU *cpu = XTENSA_CPU(cs);
3134 CPUXtensaState *env = &cpu->env;
3135 int i, j;
3136
3137 cpu_fprintf(f, "PC=%08x\n\n", env->pc);
3138
3139 for (i = j = 0; i < 256; ++i) {
3140 if (xtensa_option_bits_enabled(env->config, sregnames[i].opt_bits)) {
3141 cpu_fprintf(f, "%12s=%08x%c", sregnames[i].name, env->sregs[i],
3142 (j++ % 4) == 3 ? '\n' : ' ');
3143 }
3144 }
3145
3146 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
3147
3148 for (i = j = 0; i < 256; ++i) {
3149 if (xtensa_option_bits_enabled(env->config, uregnames[i].opt_bits)) {
3150 cpu_fprintf(f, "%s=%08x%c", uregnames[i].name, env->uregs[i],
3151 (j++ % 4) == 3 ? '\n' : ' ');
3152 }
3153 }
3154
3155 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
3156
3157 for (i = 0; i < 16; ++i) {
3158 cpu_fprintf(f, " A%02d=%08x%c", i, env->regs[i],
3159 (i % 4) == 3 ? '\n' : ' ');
3160 }
3161
3162 cpu_fprintf(f, "\n");
3163
3164 for (i = 0; i < env->config->nareg; ++i) {
3165 cpu_fprintf(f, "AR%02d=%08x%c", i, env->phys_regs[i],
3166 (i % 4) == 3 ? '\n' : ' ');
3167 }
3168
3169 if (xtensa_option_enabled(env->config, XTENSA_OPTION_FP_COPROCESSOR)) {
3170 cpu_fprintf(f, "\n");
3171
3172 for (i = 0; i < 16; ++i) {
3173 cpu_fprintf(f, "F%02d=%08x (%+10.8e)%c", i,
3174 float32_val(env->fregs[i]),
3175 *(float *)&env->fregs[i], (i % 2) == 1 ? '\n' : ' ');
3176 }
3177 }
3178 }
3179
3180 void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb, int pc_pos)
3181 {
3182 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
3183 }