migration: convert unix socket protocol to use QIOChannel
[qemu.git] / target-lm32 / translate.c
1 /*
2 * LatticeMico32 main translation routines.
3 *
4 * Copyright (c) 2010 Michael Walle <michael@walle.cc>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26
27 #include "exec/cpu_ldst.h"
28 #include "hw/lm32/lm32_pic.h"
29
30 #include "exec/helper-gen.h"
31
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34
35
36 #define DISAS_LM32 1
37 #if DISAS_LM32
38 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
39 #else
40 # define LOG_DIS(...) do { } while (0)
41 #endif
42
43 #define EXTRACT_FIELD(src, start, end) \
44 (((src) >> start) & ((1 << (end - start + 1)) - 1))
45
46 #define MEM_INDEX 0
47
48 static TCGv_env cpu_env;
49 static TCGv cpu_R[32];
50 static TCGv cpu_pc;
51 static TCGv cpu_ie;
52 static TCGv cpu_icc;
53 static TCGv cpu_dcc;
54 static TCGv cpu_cc;
55 static TCGv cpu_cfg;
56 static TCGv cpu_eba;
57 static TCGv cpu_dc;
58 static TCGv cpu_deba;
59 static TCGv cpu_bp[4];
60 static TCGv cpu_wp[4];
61
62 #include "exec/gen-icount.h"
63
64 enum {
65 OP_FMT_RI,
66 OP_FMT_RR,
67 OP_FMT_CR,
68 OP_FMT_I
69 };
70
71 /* This is the state at translation time. */
72 typedef struct DisasContext {
73 target_ulong pc;
74
75 /* Decoder. */
76 int format;
77 uint32_t ir;
78 uint8_t opcode;
79 uint8_t r0, r1, r2, csr;
80 uint16_t imm5;
81 uint16_t imm16;
82 uint32_t imm26;
83
84 unsigned int delayed_branch;
85 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
86 int is_jmp;
87
88 struct TranslationBlock *tb;
89 int singlestep_enabled;
90
91 uint32_t features;
92 uint8_t num_breakpoints;
93 uint8_t num_watchpoints;
94 } DisasContext;
95
96 static const char *regnames[] = {
97 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
98 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
99 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
100 "r24", "r25", "r26/gp", "r27/fp", "r28/sp", "r29/ra",
101 "r30/ea", "r31/ba", "bp0", "bp1", "bp2", "bp3", "wp0",
102 "wp1", "wp2", "wp3"
103 };
104
105 static inline int zero_extend(unsigned int val, int width)
106 {
107 return val & ((1 << width) - 1);
108 }
109
110 static inline int sign_extend(unsigned int val, int width)
111 {
112 int sval;
113
114 /* LSL. */
115 val <<= 32 - width;
116 sval = val;
117 /* ASR. */
118 sval >>= 32 - width;
119
120 return sval;
121 }
122
123 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
124 {
125 TCGv_i32 tmp = tcg_const_i32(index);
126
127 gen_helper_raise_exception(cpu_env, tmp);
128 tcg_temp_free_i32(tmp);
129 }
130
131 static inline void t_gen_illegal_insn(DisasContext *dc)
132 {
133 tcg_gen_movi_tl(cpu_pc, dc->pc);
134 gen_helper_ill(cpu_env);
135 }
136
137 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
138 {
139 if (unlikely(dc->singlestep_enabled)) {
140 return false;
141 }
142
143 #ifndef CONFIG_USER_ONLY
144 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
145 #else
146 return true;
147 #endif
148 }
149
150 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
151 {
152 if (use_goto_tb(dc, dest)) {
153 tcg_gen_goto_tb(n);
154 tcg_gen_movi_tl(cpu_pc, dest);
155 tcg_gen_exit_tb((uintptr_t)dc->tb + n);
156 } else {
157 tcg_gen_movi_tl(cpu_pc, dest);
158 if (dc->singlestep_enabled) {
159 t_gen_raise_exception(dc, EXCP_DEBUG);
160 }
161 tcg_gen_exit_tb(0);
162 }
163 }
164
165 static void dec_add(DisasContext *dc)
166 {
167 if (dc->format == OP_FMT_RI) {
168 if (dc->r0 == R_R0) {
169 if (dc->r1 == R_R0 && dc->imm16 == 0) {
170 LOG_DIS("nop\n");
171 } else {
172 LOG_DIS("mvi r%d, %d\n", dc->r1, sign_extend(dc->imm16, 16));
173 }
174 } else {
175 LOG_DIS("addi r%d, r%d, %d\n", dc->r1, dc->r0,
176 sign_extend(dc->imm16, 16));
177 }
178 } else {
179 LOG_DIS("add r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
180 }
181
182 if (dc->format == OP_FMT_RI) {
183 tcg_gen_addi_tl(cpu_R[dc->r1], cpu_R[dc->r0],
184 sign_extend(dc->imm16, 16));
185 } else {
186 tcg_gen_add_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
187 }
188 }
189
190 static void dec_and(DisasContext *dc)
191 {
192 if (dc->format == OP_FMT_RI) {
193 LOG_DIS("andi r%d, r%d, %d\n", dc->r1, dc->r0,
194 zero_extend(dc->imm16, 16));
195 } else {
196 LOG_DIS("and r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
197 }
198
199 if (dc->format == OP_FMT_RI) {
200 tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0],
201 zero_extend(dc->imm16, 16));
202 } else {
203 if (dc->r0 == 0 && dc->r1 == 0 && dc->r2 == 0) {
204 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
205 gen_helper_hlt(cpu_env);
206 } else {
207 tcg_gen_and_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
208 }
209 }
210 }
211
212 static void dec_andhi(DisasContext *dc)
213 {
214 LOG_DIS("andhi r%d, r%d, %d\n", dc->r2, dc->r0, dc->imm16);
215
216 tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16));
217 }
218
219 static void dec_b(DisasContext *dc)
220 {
221 if (dc->r0 == R_RA) {
222 LOG_DIS("ret\n");
223 } else if (dc->r0 == R_EA) {
224 LOG_DIS("eret\n");
225 } else if (dc->r0 == R_BA) {
226 LOG_DIS("bret\n");
227 } else {
228 LOG_DIS("b r%d\n", dc->r0);
229 }
230
231 /* restore IE.IE in case of an eret */
232 if (dc->r0 == R_EA) {
233 TCGv t0 = tcg_temp_new();
234 TCGLabel *l1 = gen_new_label();
235 tcg_gen_andi_tl(t0, cpu_ie, IE_EIE);
236 tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
237 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_EIE, l1);
238 tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE);
239 gen_set_label(l1);
240 tcg_temp_free(t0);
241 } else if (dc->r0 == R_BA) {
242 TCGv t0 = tcg_temp_new();
243 TCGLabel *l1 = gen_new_label();
244 tcg_gen_andi_tl(t0, cpu_ie, IE_BIE);
245 tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
246 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_BIE, l1);
247 tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE);
248 gen_set_label(l1);
249 tcg_temp_free(t0);
250 }
251 tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]);
252
253 dc->is_jmp = DISAS_JUMP;
254 }
255
256 static void dec_bi(DisasContext *dc)
257 {
258 LOG_DIS("bi %d\n", sign_extend(dc->imm26 << 2, 26));
259
260 gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26)));
261
262 dc->is_jmp = DISAS_TB_JUMP;
263 }
264
265 static inline void gen_cond_branch(DisasContext *dc, int cond)
266 {
267 TCGLabel *l1 = gen_new_label();
268 tcg_gen_brcond_tl(cond, cpu_R[dc->r0], cpu_R[dc->r1], l1);
269 gen_goto_tb(dc, 0, dc->pc + 4);
270 gen_set_label(l1);
271 gen_goto_tb(dc, 1, dc->pc + (sign_extend(dc->imm16 << 2, 16)));
272 dc->is_jmp = DISAS_TB_JUMP;
273 }
274
275 static void dec_be(DisasContext *dc)
276 {
277 LOG_DIS("be r%d, r%d, %d\n", dc->r0, dc->r1,
278 sign_extend(dc->imm16, 16) * 4);
279
280 gen_cond_branch(dc, TCG_COND_EQ);
281 }
282
283 static void dec_bg(DisasContext *dc)
284 {
285 LOG_DIS("bg r%d, r%d, %d\n", dc->r0, dc->r1,
286 sign_extend(dc->imm16, 16 * 4));
287
288 gen_cond_branch(dc, TCG_COND_GT);
289 }
290
291 static void dec_bge(DisasContext *dc)
292 {
293 LOG_DIS("bge r%d, r%d, %d\n", dc->r0, dc->r1,
294 sign_extend(dc->imm16, 16) * 4);
295
296 gen_cond_branch(dc, TCG_COND_GE);
297 }
298
299 static void dec_bgeu(DisasContext *dc)
300 {
301 LOG_DIS("bgeu r%d, r%d, %d\n", dc->r0, dc->r1,
302 sign_extend(dc->imm16, 16) * 4);
303
304 gen_cond_branch(dc, TCG_COND_GEU);
305 }
306
307 static void dec_bgu(DisasContext *dc)
308 {
309 LOG_DIS("bgu r%d, r%d, %d\n", dc->r0, dc->r1,
310 sign_extend(dc->imm16, 16) * 4);
311
312 gen_cond_branch(dc, TCG_COND_GTU);
313 }
314
315 static void dec_bne(DisasContext *dc)
316 {
317 LOG_DIS("bne r%d, r%d, %d\n", dc->r0, dc->r1,
318 sign_extend(dc->imm16, 16) * 4);
319
320 gen_cond_branch(dc, TCG_COND_NE);
321 }
322
323 static void dec_call(DisasContext *dc)
324 {
325 LOG_DIS("call r%d\n", dc->r0);
326
327 tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
328 tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]);
329
330 dc->is_jmp = DISAS_JUMP;
331 }
332
333 static void dec_calli(DisasContext *dc)
334 {
335 LOG_DIS("calli %d\n", sign_extend(dc->imm26, 26) * 4);
336
337 tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
338 gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26)));
339
340 dc->is_jmp = DISAS_TB_JUMP;
341 }
342
343 static inline void gen_compare(DisasContext *dc, int cond)
344 {
345 int rX = (dc->format == OP_FMT_RR) ? dc->r2 : dc->r1;
346 int rY = (dc->format == OP_FMT_RR) ? dc->r0 : dc->r0;
347 int rZ = (dc->format == OP_FMT_RR) ? dc->r1 : -1;
348 int i;
349
350 if (dc->format == OP_FMT_RI) {
351 switch (cond) {
352 case TCG_COND_GEU:
353 case TCG_COND_GTU:
354 i = zero_extend(dc->imm16, 16);
355 break;
356 default:
357 i = sign_extend(dc->imm16, 16);
358 break;
359 }
360
361 tcg_gen_setcondi_tl(cond, cpu_R[rX], cpu_R[rY], i);
362 } else {
363 tcg_gen_setcond_tl(cond, cpu_R[rX], cpu_R[rY], cpu_R[rZ]);
364 }
365 }
366
367 static void dec_cmpe(DisasContext *dc)
368 {
369 if (dc->format == OP_FMT_RI) {
370 LOG_DIS("cmpei r%d, r%d, %d\n", dc->r0, dc->r1,
371 sign_extend(dc->imm16, 16));
372 } else {
373 LOG_DIS("cmpe r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
374 }
375
376 gen_compare(dc, TCG_COND_EQ);
377 }
378
379 static void dec_cmpg(DisasContext *dc)
380 {
381 if (dc->format == OP_FMT_RI) {
382 LOG_DIS("cmpgi r%d, r%d, %d\n", dc->r0, dc->r1,
383 sign_extend(dc->imm16, 16));
384 } else {
385 LOG_DIS("cmpg r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
386 }
387
388 gen_compare(dc, TCG_COND_GT);
389 }
390
391 static void dec_cmpge(DisasContext *dc)
392 {
393 if (dc->format == OP_FMT_RI) {
394 LOG_DIS("cmpgei r%d, r%d, %d\n", dc->r0, dc->r1,
395 sign_extend(dc->imm16, 16));
396 } else {
397 LOG_DIS("cmpge r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
398 }
399
400 gen_compare(dc, TCG_COND_GE);
401 }
402
403 static void dec_cmpgeu(DisasContext *dc)
404 {
405 if (dc->format == OP_FMT_RI) {
406 LOG_DIS("cmpgeui r%d, r%d, %d\n", dc->r0, dc->r1,
407 zero_extend(dc->imm16, 16));
408 } else {
409 LOG_DIS("cmpgeu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
410 }
411
412 gen_compare(dc, TCG_COND_GEU);
413 }
414
415 static void dec_cmpgu(DisasContext *dc)
416 {
417 if (dc->format == OP_FMT_RI) {
418 LOG_DIS("cmpgui r%d, r%d, %d\n", dc->r0, dc->r1,
419 zero_extend(dc->imm16, 16));
420 } else {
421 LOG_DIS("cmpgu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
422 }
423
424 gen_compare(dc, TCG_COND_GTU);
425 }
426
427 static void dec_cmpne(DisasContext *dc)
428 {
429 if (dc->format == OP_FMT_RI) {
430 LOG_DIS("cmpnei r%d, r%d, %d\n", dc->r0, dc->r1,
431 sign_extend(dc->imm16, 16));
432 } else {
433 LOG_DIS("cmpne r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
434 }
435
436 gen_compare(dc, TCG_COND_NE);
437 }
438
439 static void dec_divu(DisasContext *dc)
440 {
441 TCGLabel *l1;
442
443 LOG_DIS("divu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
444
445 if (!(dc->features & LM32_FEATURE_DIVIDE)) {
446 qemu_log_mask(LOG_GUEST_ERROR, "hardware divider is not available\n");
447 t_gen_illegal_insn(dc);
448 return;
449 }
450
451 l1 = gen_new_label();
452 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1);
453 tcg_gen_movi_tl(cpu_pc, dc->pc);
454 t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO);
455 gen_set_label(l1);
456 tcg_gen_divu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
457 }
458
459 static void dec_lb(DisasContext *dc)
460 {
461 TCGv t0;
462
463 LOG_DIS("lb r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
464
465 t0 = tcg_temp_new();
466 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
467 tcg_gen_qemu_ld8s(cpu_R[dc->r1], t0, MEM_INDEX);
468 tcg_temp_free(t0);
469 }
470
471 static void dec_lbu(DisasContext *dc)
472 {
473 TCGv t0;
474
475 LOG_DIS("lbu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
476
477 t0 = tcg_temp_new();
478 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
479 tcg_gen_qemu_ld8u(cpu_R[dc->r1], t0, MEM_INDEX);
480 tcg_temp_free(t0);
481 }
482
483 static void dec_lh(DisasContext *dc)
484 {
485 TCGv t0;
486
487 LOG_DIS("lh r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
488
489 t0 = tcg_temp_new();
490 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
491 tcg_gen_qemu_ld16s(cpu_R[dc->r1], t0, MEM_INDEX);
492 tcg_temp_free(t0);
493 }
494
495 static void dec_lhu(DisasContext *dc)
496 {
497 TCGv t0;
498
499 LOG_DIS("lhu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
500
501 t0 = tcg_temp_new();
502 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
503 tcg_gen_qemu_ld16u(cpu_R[dc->r1], t0, MEM_INDEX);
504 tcg_temp_free(t0);
505 }
506
507 static void dec_lw(DisasContext *dc)
508 {
509 TCGv t0;
510
511 LOG_DIS("lw r%d, (r%d+%d)\n", dc->r1, dc->r0, sign_extend(dc->imm16, 16));
512
513 t0 = tcg_temp_new();
514 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
515 tcg_gen_qemu_ld32s(cpu_R[dc->r1], t0, MEM_INDEX);
516 tcg_temp_free(t0);
517 }
518
519 static void dec_modu(DisasContext *dc)
520 {
521 TCGLabel *l1;
522
523 LOG_DIS("modu r%d, r%d, %d\n", dc->r2, dc->r0, dc->r1);
524
525 if (!(dc->features & LM32_FEATURE_DIVIDE)) {
526 qemu_log_mask(LOG_GUEST_ERROR, "hardware divider is not available\n");
527 t_gen_illegal_insn(dc);
528 return;
529 }
530
531 l1 = gen_new_label();
532 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1);
533 tcg_gen_movi_tl(cpu_pc, dc->pc);
534 t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO);
535 gen_set_label(l1);
536 tcg_gen_remu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
537 }
538
539 static void dec_mul(DisasContext *dc)
540 {
541 if (dc->format == OP_FMT_RI) {
542 LOG_DIS("muli r%d, r%d, %d\n", dc->r0, dc->r1,
543 sign_extend(dc->imm16, 16));
544 } else {
545 LOG_DIS("mul r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
546 }
547
548 if (!(dc->features & LM32_FEATURE_MULTIPLY)) {
549 qemu_log_mask(LOG_GUEST_ERROR,
550 "hardware multiplier is not available\n");
551 t_gen_illegal_insn(dc);
552 return;
553 }
554
555 if (dc->format == OP_FMT_RI) {
556 tcg_gen_muli_tl(cpu_R[dc->r1], cpu_R[dc->r0],
557 sign_extend(dc->imm16, 16));
558 } else {
559 tcg_gen_mul_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
560 }
561 }
562
563 static void dec_nor(DisasContext *dc)
564 {
565 if (dc->format == OP_FMT_RI) {
566 LOG_DIS("nori r%d, r%d, %d\n", dc->r0, dc->r1,
567 zero_extend(dc->imm16, 16));
568 } else {
569 LOG_DIS("nor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
570 }
571
572 if (dc->format == OP_FMT_RI) {
573 TCGv t0 = tcg_temp_new();
574 tcg_gen_movi_tl(t0, zero_extend(dc->imm16, 16));
575 tcg_gen_nor_tl(cpu_R[dc->r1], cpu_R[dc->r0], t0);
576 tcg_temp_free(t0);
577 } else {
578 tcg_gen_nor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
579 }
580 }
581
582 static void dec_or(DisasContext *dc)
583 {
584 if (dc->format == OP_FMT_RI) {
585 LOG_DIS("ori r%d, r%d, %d\n", dc->r1, dc->r0,
586 zero_extend(dc->imm16, 16));
587 } else {
588 if (dc->r1 == R_R0) {
589 LOG_DIS("mv r%d, r%d\n", dc->r2, dc->r0);
590 } else {
591 LOG_DIS("or r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
592 }
593 }
594
595 if (dc->format == OP_FMT_RI) {
596 tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
597 zero_extend(dc->imm16, 16));
598 } else {
599 tcg_gen_or_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
600 }
601 }
602
603 static void dec_orhi(DisasContext *dc)
604 {
605 if (dc->r0 == R_R0) {
606 LOG_DIS("mvhi r%d, %d\n", dc->r1, dc->imm16);
607 } else {
608 LOG_DIS("orhi r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm16);
609 }
610
611 tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16));
612 }
613
614 static void dec_scall(DisasContext *dc)
615 {
616 switch (dc->imm5) {
617 case 2:
618 LOG_DIS("break\n");
619 tcg_gen_movi_tl(cpu_pc, dc->pc);
620 t_gen_raise_exception(dc, EXCP_BREAKPOINT);
621 break;
622 case 7:
623 LOG_DIS("scall\n");
624 tcg_gen_movi_tl(cpu_pc, dc->pc);
625 t_gen_raise_exception(dc, EXCP_SYSTEMCALL);
626 break;
627 default:
628 qemu_log_mask(LOG_GUEST_ERROR, "invalid opcode @0x%x", dc->pc);
629 t_gen_illegal_insn(dc);
630 break;
631 }
632 }
633
634 static void dec_rcsr(DisasContext *dc)
635 {
636 LOG_DIS("rcsr r%d, %d\n", dc->r2, dc->csr);
637
638 switch (dc->csr) {
639 case CSR_IE:
640 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_ie);
641 break;
642 case CSR_IM:
643 gen_helper_rcsr_im(cpu_R[dc->r2], cpu_env);
644 break;
645 case CSR_IP:
646 gen_helper_rcsr_ip(cpu_R[dc->r2], cpu_env);
647 break;
648 case CSR_CC:
649 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cc);
650 break;
651 case CSR_CFG:
652 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cfg);
653 break;
654 case CSR_EBA:
655 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_eba);
656 break;
657 case CSR_DC:
658 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_dc);
659 break;
660 case CSR_DEBA:
661 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_deba);
662 break;
663 case CSR_JTX:
664 gen_helper_rcsr_jtx(cpu_R[dc->r2], cpu_env);
665 break;
666 case CSR_JRX:
667 gen_helper_rcsr_jrx(cpu_R[dc->r2], cpu_env);
668 break;
669 case CSR_ICC:
670 case CSR_DCC:
671 case CSR_BP0:
672 case CSR_BP1:
673 case CSR_BP2:
674 case CSR_BP3:
675 case CSR_WP0:
676 case CSR_WP1:
677 case CSR_WP2:
678 case CSR_WP3:
679 qemu_log_mask(LOG_GUEST_ERROR, "invalid read access csr=%x\n", dc->csr);
680 break;
681 default:
682 qemu_log_mask(LOG_GUEST_ERROR, "read_csr: unknown csr=%x\n", dc->csr);
683 break;
684 }
685 }
686
687 static void dec_sb(DisasContext *dc)
688 {
689 TCGv t0;
690
691 LOG_DIS("sb (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1);
692
693 t0 = tcg_temp_new();
694 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
695 tcg_gen_qemu_st8(cpu_R[dc->r1], t0, MEM_INDEX);
696 tcg_temp_free(t0);
697 }
698
699 static void dec_sextb(DisasContext *dc)
700 {
701 LOG_DIS("sextb r%d, r%d\n", dc->r2, dc->r0);
702
703 if (!(dc->features & LM32_FEATURE_SIGN_EXTEND)) {
704 qemu_log_mask(LOG_GUEST_ERROR,
705 "hardware sign extender is not available\n");
706 t_gen_illegal_insn(dc);
707 return;
708 }
709
710 tcg_gen_ext8s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
711 }
712
713 static void dec_sexth(DisasContext *dc)
714 {
715 LOG_DIS("sexth r%d, r%d\n", dc->r2, dc->r0);
716
717 if (!(dc->features & LM32_FEATURE_SIGN_EXTEND)) {
718 qemu_log_mask(LOG_GUEST_ERROR,
719 "hardware sign extender is not available\n");
720 t_gen_illegal_insn(dc);
721 return;
722 }
723
724 tcg_gen_ext16s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
725 }
726
727 static void dec_sh(DisasContext *dc)
728 {
729 TCGv t0;
730
731 LOG_DIS("sh (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1);
732
733 t0 = tcg_temp_new();
734 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
735 tcg_gen_qemu_st16(cpu_R[dc->r1], t0, MEM_INDEX);
736 tcg_temp_free(t0);
737 }
738
739 static void dec_sl(DisasContext *dc)
740 {
741 if (dc->format == OP_FMT_RI) {
742 LOG_DIS("sli r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
743 } else {
744 LOG_DIS("sl r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
745 }
746
747 if (!(dc->features & LM32_FEATURE_SHIFT)) {
748 qemu_log_mask(LOG_GUEST_ERROR, "hardware shifter is not available\n");
749 t_gen_illegal_insn(dc);
750 return;
751 }
752
753 if (dc->format == OP_FMT_RI) {
754 tcg_gen_shli_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
755 } else {
756 TCGv t0 = tcg_temp_new();
757 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
758 tcg_gen_shl_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
759 tcg_temp_free(t0);
760 }
761 }
762
763 static void dec_sr(DisasContext *dc)
764 {
765 if (dc->format == OP_FMT_RI) {
766 LOG_DIS("sri r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
767 } else {
768 LOG_DIS("sr r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
769 }
770
771 /* The real CPU (w/o hardware shifter) only supports right shift by exactly
772 * one bit */
773 if (dc->format == OP_FMT_RI) {
774 if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) {
775 qemu_log_mask(LOG_GUEST_ERROR,
776 "hardware shifter is not available\n");
777 t_gen_illegal_insn(dc);
778 return;
779 }
780 tcg_gen_sari_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
781 } else {
782 TCGLabel *l1 = gen_new_label();
783 TCGLabel *l2 = gen_new_label();
784 TCGv t0 = tcg_temp_local_new();
785 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
786
787 if (!(dc->features & LM32_FEATURE_SHIFT)) {
788 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1);
789 t_gen_illegal_insn(dc);
790 tcg_gen_br(l2);
791 }
792
793 gen_set_label(l1);
794 tcg_gen_sar_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
795 gen_set_label(l2);
796
797 tcg_temp_free(t0);
798 }
799 }
800
801 static void dec_sru(DisasContext *dc)
802 {
803 if (dc->format == OP_FMT_RI) {
804 LOG_DIS("srui r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
805 } else {
806 LOG_DIS("sru r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
807 }
808
809 if (dc->format == OP_FMT_RI) {
810 if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) {
811 qemu_log_mask(LOG_GUEST_ERROR,
812 "hardware shifter is not available\n");
813 t_gen_illegal_insn(dc);
814 return;
815 }
816 tcg_gen_shri_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
817 } else {
818 TCGLabel *l1 = gen_new_label();
819 TCGLabel *l2 = gen_new_label();
820 TCGv t0 = tcg_temp_local_new();
821 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
822
823 if (!(dc->features & LM32_FEATURE_SHIFT)) {
824 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1);
825 t_gen_illegal_insn(dc);
826 tcg_gen_br(l2);
827 }
828
829 gen_set_label(l1);
830 tcg_gen_shr_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
831 gen_set_label(l2);
832
833 tcg_temp_free(t0);
834 }
835 }
836
837 static void dec_sub(DisasContext *dc)
838 {
839 LOG_DIS("sub r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
840
841 tcg_gen_sub_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
842 }
843
844 static void dec_sw(DisasContext *dc)
845 {
846 TCGv t0;
847
848 LOG_DIS("sw (r%d+%d), r%d\n", dc->r0, sign_extend(dc->imm16, 16), dc->r1);
849
850 t0 = tcg_temp_new();
851 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
852 tcg_gen_qemu_st32(cpu_R[dc->r1], t0, MEM_INDEX);
853 tcg_temp_free(t0);
854 }
855
856 static void dec_user(DisasContext *dc)
857 {
858 LOG_DIS("user");
859
860 qemu_log_mask(LOG_GUEST_ERROR, "user instruction undefined\n");
861 t_gen_illegal_insn(dc);
862 }
863
864 static void dec_wcsr(DisasContext *dc)
865 {
866 int no;
867
868 LOG_DIS("wcsr r%d, %d\n", dc->r1, dc->csr);
869
870 switch (dc->csr) {
871 case CSR_IE:
872 tcg_gen_mov_tl(cpu_ie, cpu_R[dc->r1]);
873 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
874 dc->is_jmp = DISAS_UPDATE;
875 break;
876 case CSR_IM:
877 /* mark as an io operation because it could cause an interrupt */
878 if (dc->tb->cflags & CF_USE_ICOUNT) {
879 gen_io_start();
880 }
881 gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]);
882 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
883 if (dc->tb->cflags & CF_USE_ICOUNT) {
884 gen_io_end();
885 }
886 dc->is_jmp = DISAS_UPDATE;
887 break;
888 case CSR_IP:
889 /* mark as an io operation because it could cause an interrupt */
890 if (dc->tb->cflags & CF_USE_ICOUNT) {
891 gen_io_start();
892 }
893 gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]);
894 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
895 if (dc->tb->cflags & CF_USE_ICOUNT) {
896 gen_io_end();
897 }
898 dc->is_jmp = DISAS_UPDATE;
899 break;
900 case CSR_ICC:
901 /* TODO */
902 break;
903 case CSR_DCC:
904 /* TODO */
905 break;
906 case CSR_EBA:
907 tcg_gen_mov_tl(cpu_eba, cpu_R[dc->r1]);
908 break;
909 case CSR_DEBA:
910 tcg_gen_mov_tl(cpu_deba, cpu_R[dc->r1]);
911 break;
912 case CSR_JTX:
913 gen_helper_wcsr_jtx(cpu_env, cpu_R[dc->r1]);
914 break;
915 case CSR_JRX:
916 gen_helper_wcsr_jrx(cpu_env, cpu_R[dc->r1]);
917 break;
918 case CSR_DC:
919 gen_helper_wcsr_dc(cpu_env, cpu_R[dc->r1]);
920 break;
921 case CSR_BP0:
922 case CSR_BP1:
923 case CSR_BP2:
924 case CSR_BP3:
925 no = dc->csr - CSR_BP0;
926 if (dc->num_breakpoints <= no) {
927 qemu_log_mask(LOG_GUEST_ERROR,
928 "breakpoint #%i is not available\n", no);
929 t_gen_illegal_insn(dc);
930 break;
931 }
932 gen_helper_wcsr_bp(cpu_env, cpu_R[dc->r1], tcg_const_i32(no));
933 break;
934 case CSR_WP0:
935 case CSR_WP1:
936 case CSR_WP2:
937 case CSR_WP3:
938 no = dc->csr - CSR_WP0;
939 if (dc->num_watchpoints <= no) {
940 qemu_log_mask(LOG_GUEST_ERROR,
941 "watchpoint #%i is not available\n", no);
942 t_gen_illegal_insn(dc);
943 break;
944 }
945 gen_helper_wcsr_wp(cpu_env, cpu_R[dc->r1], tcg_const_i32(no));
946 break;
947 case CSR_CC:
948 case CSR_CFG:
949 qemu_log_mask(LOG_GUEST_ERROR, "invalid write access csr=%x\n",
950 dc->csr);
951 break;
952 default:
953 qemu_log_mask(LOG_GUEST_ERROR, "write_csr: unknown csr=%x\n",
954 dc->csr);
955 break;
956 }
957 }
958
959 static void dec_xnor(DisasContext *dc)
960 {
961 if (dc->format == OP_FMT_RI) {
962 LOG_DIS("xnori r%d, r%d, %d\n", dc->r0, dc->r1,
963 zero_extend(dc->imm16, 16));
964 } else {
965 if (dc->r1 == R_R0) {
966 LOG_DIS("not r%d, r%d\n", dc->r2, dc->r0);
967 } else {
968 LOG_DIS("xnor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
969 }
970 }
971
972 if (dc->format == OP_FMT_RI) {
973 tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
974 zero_extend(dc->imm16, 16));
975 tcg_gen_not_tl(cpu_R[dc->r1], cpu_R[dc->r1]);
976 } else {
977 tcg_gen_eqv_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
978 }
979 }
980
981 static void dec_xor(DisasContext *dc)
982 {
983 if (dc->format == OP_FMT_RI) {
984 LOG_DIS("xori r%d, r%d, %d\n", dc->r0, dc->r1,
985 zero_extend(dc->imm16, 16));
986 } else {
987 LOG_DIS("xor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
988 }
989
990 if (dc->format == OP_FMT_RI) {
991 tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
992 zero_extend(dc->imm16, 16));
993 } else {
994 tcg_gen_xor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
995 }
996 }
997
998 static void dec_ill(DisasContext *dc)
999 {
1000 qemu_log_mask(LOG_GUEST_ERROR, "invalid opcode 0x%02x\n", dc->opcode);
1001 t_gen_illegal_insn(dc);
1002 }
1003
1004 typedef void (*DecoderInfo)(DisasContext *dc);
1005 static const DecoderInfo decinfo[] = {
1006 dec_sru, dec_nor, dec_mul, dec_sh, dec_lb, dec_sr, dec_xor, dec_lh,
1007 dec_and, dec_xnor, dec_lw, dec_lhu, dec_sb, dec_add, dec_or, dec_sl,
1008 dec_lbu, dec_be, dec_bg, dec_bge, dec_bgeu, dec_bgu, dec_sw, dec_bne,
1009 dec_andhi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_orhi,
1010 dec_cmpne,
1011 dec_sru, dec_nor, dec_mul, dec_divu, dec_rcsr, dec_sr, dec_xor, dec_ill,
1012 dec_and, dec_xnor, dec_ill, dec_scall, dec_sextb, dec_add, dec_or, dec_sl,
1013 dec_b, dec_modu, dec_sub, dec_user, dec_wcsr, dec_ill, dec_call, dec_sexth,
1014 dec_bi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_calli,
1015 dec_cmpne
1016 };
1017
1018 static inline void decode(DisasContext *dc, uint32_t ir)
1019 {
1020 dc->ir = ir;
1021 LOG_DIS("%8.8x\t", dc->ir);
1022
1023 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1024
1025 dc->imm5 = EXTRACT_FIELD(ir, 0, 4);
1026 dc->imm16 = EXTRACT_FIELD(ir, 0, 15);
1027 dc->imm26 = EXTRACT_FIELD(ir, 0, 25);
1028
1029 dc->csr = EXTRACT_FIELD(ir, 21, 25);
1030 dc->r0 = EXTRACT_FIELD(ir, 21, 25);
1031 dc->r1 = EXTRACT_FIELD(ir, 16, 20);
1032 dc->r2 = EXTRACT_FIELD(ir, 11, 15);
1033
1034 /* bit 31 seems to indicate insn type. */
1035 if (ir & (1 << 31)) {
1036 dc->format = OP_FMT_RR;
1037 } else {
1038 dc->format = OP_FMT_RI;
1039 }
1040
1041 assert(ARRAY_SIZE(decinfo) == 64);
1042 assert(dc->opcode < 64);
1043
1044 decinfo[dc->opcode](dc);
1045 }
1046
1047 /* generate intermediate code for basic block 'tb'. */
1048 void gen_intermediate_code(CPULM32State *env, struct TranslationBlock *tb)
1049 {
1050 LM32CPU *cpu = lm32_env_get_cpu(env);
1051 CPUState *cs = CPU(cpu);
1052 struct DisasContext ctx, *dc = &ctx;
1053 uint32_t pc_start;
1054 uint32_t next_page_start;
1055 int num_insns;
1056 int max_insns;
1057
1058 pc_start = tb->pc;
1059 dc->features = cpu->features;
1060 dc->num_breakpoints = cpu->num_breakpoints;
1061 dc->num_watchpoints = cpu->num_watchpoints;
1062 dc->tb = tb;
1063
1064 dc->is_jmp = DISAS_NEXT;
1065 dc->pc = pc_start;
1066 dc->singlestep_enabled = cs->singlestep_enabled;
1067
1068 if (pc_start & 3) {
1069 qemu_log_mask(LOG_GUEST_ERROR,
1070 "unaligned PC=%x. Ignoring lowest bits.\n", pc_start);
1071 pc_start &= ~3;
1072 }
1073
1074 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1075 num_insns = 0;
1076 max_insns = tb->cflags & CF_COUNT_MASK;
1077 if (max_insns == 0) {
1078 max_insns = CF_COUNT_MASK;
1079 }
1080 if (max_insns > TCG_MAX_INSNS) {
1081 max_insns = TCG_MAX_INSNS;
1082 }
1083
1084 gen_tb_start(tb);
1085 do {
1086 tcg_gen_insn_start(dc->pc);
1087 num_insns++;
1088
1089 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1090 tcg_gen_movi_tl(cpu_pc, dc->pc);
1091 t_gen_raise_exception(dc, EXCP_DEBUG);
1092 dc->is_jmp = DISAS_UPDATE;
1093 /* The address covered by the breakpoint must be included in
1094 [tb->pc, tb->pc + tb->size) in order to for it to be
1095 properly cleared -- thus we increment the PC here so that
1096 the logic setting tb->size below does the right thing. */
1097 dc->pc += 4;
1098 break;
1099 }
1100
1101 /* Pretty disas. */
1102 LOG_DIS("%8.8x:\t", dc->pc);
1103
1104 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1105 gen_io_start();
1106 }
1107
1108 decode(dc, cpu_ldl_code(env, dc->pc));
1109 dc->pc += 4;
1110 } while (!dc->is_jmp
1111 && !tcg_op_buf_full()
1112 && !cs->singlestep_enabled
1113 && !singlestep
1114 && (dc->pc < next_page_start)
1115 && num_insns < max_insns);
1116
1117 if (tb->cflags & CF_LAST_IO) {
1118 gen_io_end();
1119 }
1120
1121 if (unlikely(cs->singlestep_enabled)) {
1122 if (dc->is_jmp == DISAS_NEXT) {
1123 tcg_gen_movi_tl(cpu_pc, dc->pc);
1124 }
1125 t_gen_raise_exception(dc, EXCP_DEBUG);
1126 } else {
1127 switch (dc->is_jmp) {
1128 case DISAS_NEXT:
1129 gen_goto_tb(dc, 1, dc->pc);
1130 break;
1131 default:
1132 case DISAS_JUMP:
1133 case DISAS_UPDATE:
1134 /* indicate that the hash table must be used
1135 to find the next TB */
1136 tcg_gen_exit_tb(0);
1137 break;
1138 case DISAS_TB_JUMP:
1139 /* nothing more to generate */
1140 break;
1141 }
1142 }
1143
1144 gen_tb_end(tb, num_insns);
1145
1146 tb->size = dc->pc - pc_start;
1147 tb->icount = num_insns;
1148
1149 #ifdef DEBUG_DISAS
1150 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1151 qemu_log("\n");
1152 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
1153 qemu_log("\nisize=%d osize=%d\n",
1154 dc->pc - pc_start, tcg_op_buf_count());
1155 }
1156 #endif
1157 }
1158
1159 void lm32_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1160 int flags)
1161 {
1162 LM32CPU *cpu = LM32_CPU(cs);
1163 CPULM32State *env = &cpu->env;
1164 int i;
1165
1166 if (!env || !f) {
1167 return;
1168 }
1169
1170 cpu_fprintf(f, "IN: PC=%x %s\n",
1171 env->pc, lookup_symbol(env->pc));
1172
1173 cpu_fprintf(f, "ie=%8.8x (IE=%x EIE=%x BIE=%x) im=%8.8x ip=%8.8x\n",
1174 env->ie,
1175 (env->ie & IE_IE) ? 1 : 0,
1176 (env->ie & IE_EIE) ? 1 : 0,
1177 (env->ie & IE_BIE) ? 1 : 0,
1178 lm32_pic_get_im(env->pic_state),
1179 lm32_pic_get_ip(env->pic_state));
1180 cpu_fprintf(f, "eba=%8.8x deba=%8.8x\n",
1181 env->eba,
1182 env->deba);
1183
1184 for (i = 0; i < 32; i++) {
1185 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1186 if ((i + 1) % 4 == 0) {
1187 cpu_fprintf(f, "\n");
1188 }
1189 }
1190 cpu_fprintf(f, "\n\n");
1191 }
1192
1193 void restore_state_to_opc(CPULM32State *env, TranslationBlock *tb,
1194 target_ulong *data)
1195 {
1196 env->pc = data[0];
1197 }
1198
1199 void lm32_translate_init(void)
1200 {
1201 int i;
1202
1203 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1204
1205 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1206 cpu_R[i] = tcg_global_mem_new(cpu_env,
1207 offsetof(CPULM32State, regs[i]),
1208 regnames[i]);
1209 }
1210
1211 for (i = 0; i < ARRAY_SIZE(cpu_bp); i++) {
1212 cpu_bp[i] = tcg_global_mem_new(cpu_env,
1213 offsetof(CPULM32State, bp[i]),
1214 regnames[32+i]);
1215 }
1216
1217 for (i = 0; i < ARRAY_SIZE(cpu_wp); i++) {
1218 cpu_wp[i] = tcg_global_mem_new(cpu_env,
1219 offsetof(CPULM32State, wp[i]),
1220 regnames[36+i]);
1221 }
1222
1223 cpu_pc = tcg_global_mem_new(cpu_env,
1224 offsetof(CPULM32State, pc),
1225 "pc");
1226 cpu_ie = tcg_global_mem_new(cpu_env,
1227 offsetof(CPULM32State, ie),
1228 "ie");
1229 cpu_icc = tcg_global_mem_new(cpu_env,
1230 offsetof(CPULM32State, icc),
1231 "icc");
1232 cpu_dcc = tcg_global_mem_new(cpu_env,
1233 offsetof(CPULM32State, dcc),
1234 "dcc");
1235 cpu_cc = tcg_global_mem_new(cpu_env,
1236 offsetof(CPULM32State, cc),
1237 "cc");
1238 cpu_cfg = tcg_global_mem_new(cpu_env,
1239 offsetof(CPULM32State, cfg),
1240 "cfg");
1241 cpu_eba = tcg_global_mem_new(cpu_env,
1242 offsetof(CPULM32State, eba),
1243 "eba");
1244 cpu_dc = tcg_global_mem_new(cpu_env,
1245 offsetof(CPULM32State, dc),
1246 "dc");
1247 cpu_deba = tcg_global_mem_new(cpu_env,
1248 offsetof(CPULM32State, deba),
1249 "deba");
1250 }
1251