scsi: esp: check buffer length before reading scsi command
[qemu.git] / linux-user / main.c
1 /*
2 * qemu user main
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu-version.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
23
24 #include "qemu.h"
25 #include "qemu/path.h"
26 #include "qemu/cutils.h"
27 #include "qemu/help_option.h"
28 #include "cpu.h"
29 #include "exec/exec-all.h"
30 #include "tcg.h"
31 #include "qemu/timer.h"
32 #include "qemu/envlist.h"
33 #include "elf.h"
34 #include "exec/log.h"
35
36 char *exec_path;
37
38 int singlestep;
39 static const char *filename;
40 static const char *argv0;
41 static int gdbstub_port;
42 static envlist_t *envlist;
43 static const char *cpu_model;
44 unsigned long mmap_min_addr;
45 unsigned long guest_base;
46 int have_guest_base;
47
48 #define EXCP_DUMP(env, fmt, ...) \
49 do { \
50 CPUState *cs = ENV_GET_CPU(env); \
51 fprintf(stderr, fmt , ## __VA_ARGS__); \
52 cpu_dump_state(cs, stderr, fprintf, 0); \
53 if (qemu_log_separate()) { \
54 qemu_log(fmt, ## __VA_ARGS__); \
55 log_cpu_state(cs, 0); \
56 } \
57 } while (0)
58
59 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
60 /*
61 * When running 32-on-64 we should make sure we can fit all of the possible
62 * guest address space into a contiguous chunk of virtual host memory.
63 *
64 * This way we will never overlap with our own libraries or binaries or stack
65 * or anything else that QEMU maps.
66 */
67 # ifdef TARGET_MIPS
68 /* MIPS only supports 31 bits of virtual address space for user space */
69 unsigned long reserved_va = 0x77000000;
70 # else
71 unsigned long reserved_va = 0xf7000000;
72 # endif
73 #else
74 unsigned long reserved_va;
75 #endif
76
77 static void usage(int exitcode);
78
79 static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX;
80 const char *qemu_uname_release;
81
82 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
83 we allocate a bigger stack. Need a better solution, for example
84 by remapping the process stack directly at the right place */
85 unsigned long guest_stack_size = 8 * 1024 * 1024UL;
86
87 void gemu_log(const char *fmt, ...)
88 {
89 va_list ap;
90
91 va_start(ap, fmt);
92 vfprintf(stderr, fmt, ap);
93 va_end(ap);
94 }
95
96 #if defined(TARGET_I386)
97 int cpu_get_pic_interrupt(CPUX86State *env)
98 {
99 return -1;
100 }
101 #endif
102
103 /***********************************************************/
104 /* Helper routines for implementing atomic operations. */
105
106 /* To implement exclusive operations we force all cpus to syncronise.
107 We don't require a full sync, only that no cpus are executing guest code.
108 The alternative is to map target atomic ops onto host equivalents,
109 which requires quite a lot of per host/target work. */
110 static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
111 static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
112 static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
113 static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
114 static int pending_cpus;
115
116 /* Make sure everything is in a consistent state for calling fork(). */
117 void fork_start(void)
118 {
119 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
120 pthread_mutex_lock(&exclusive_lock);
121 mmap_fork_start();
122 }
123
124 void fork_end(int child)
125 {
126 mmap_fork_end(child);
127 if (child) {
128 CPUState *cpu, *next_cpu;
129 /* Child processes created by fork() only have a single thread.
130 Discard information about the parent threads. */
131 CPU_FOREACH_SAFE(cpu, next_cpu) {
132 if (cpu != thread_cpu) {
133 QTAILQ_REMOVE(&cpus, cpu, node);
134 }
135 }
136 pending_cpus = 0;
137 pthread_mutex_init(&exclusive_lock, NULL);
138 pthread_mutex_init(&cpu_list_mutex, NULL);
139 pthread_cond_init(&exclusive_cond, NULL);
140 pthread_cond_init(&exclusive_resume, NULL);
141 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
142 gdbserver_fork(thread_cpu);
143 } else {
144 pthread_mutex_unlock(&exclusive_lock);
145 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
146 }
147 }
148
149 /* Wait for pending exclusive operations to complete. The exclusive lock
150 must be held. */
151 static inline void exclusive_idle(void)
152 {
153 while (pending_cpus) {
154 pthread_cond_wait(&exclusive_resume, &exclusive_lock);
155 }
156 }
157
158 /* Start an exclusive operation.
159 Must only be called from outside cpu_arm_exec. */
160 static inline void start_exclusive(void)
161 {
162 CPUState *other_cpu;
163
164 pthread_mutex_lock(&exclusive_lock);
165 exclusive_idle();
166
167 pending_cpus = 1;
168 /* Make all other cpus stop executing. */
169 CPU_FOREACH(other_cpu) {
170 if (other_cpu->running) {
171 pending_cpus++;
172 cpu_exit(other_cpu);
173 }
174 }
175 if (pending_cpus > 1) {
176 pthread_cond_wait(&exclusive_cond, &exclusive_lock);
177 }
178 }
179
180 /* Finish an exclusive operation. */
181 static inline void __attribute__((unused)) end_exclusive(void)
182 {
183 pending_cpus = 0;
184 pthread_cond_broadcast(&exclusive_resume);
185 pthread_mutex_unlock(&exclusive_lock);
186 }
187
188 /* Wait for exclusive ops to finish, and begin cpu execution. */
189 static inline void cpu_exec_start(CPUState *cpu)
190 {
191 pthread_mutex_lock(&exclusive_lock);
192 exclusive_idle();
193 cpu->running = true;
194 pthread_mutex_unlock(&exclusive_lock);
195 }
196
197 /* Mark cpu as not executing, and release pending exclusive ops. */
198 static inline void cpu_exec_end(CPUState *cpu)
199 {
200 pthread_mutex_lock(&exclusive_lock);
201 cpu->running = false;
202 if (pending_cpus > 1) {
203 pending_cpus--;
204 if (pending_cpus == 1) {
205 pthread_cond_signal(&exclusive_cond);
206 }
207 }
208 exclusive_idle();
209 pthread_mutex_unlock(&exclusive_lock);
210 }
211
212 void cpu_list_lock(void)
213 {
214 pthread_mutex_lock(&cpu_list_mutex);
215 }
216
217 void cpu_list_unlock(void)
218 {
219 pthread_mutex_unlock(&cpu_list_mutex);
220 }
221
222
223 #ifdef TARGET_I386
224 /***********************************************************/
225 /* CPUX86 core interface */
226
227 uint64_t cpu_get_tsc(CPUX86State *env)
228 {
229 return cpu_get_host_ticks();
230 }
231
232 static void write_dt(void *ptr, unsigned long addr, unsigned long limit,
233 int flags)
234 {
235 unsigned int e1, e2;
236 uint32_t *p;
237 e1 = (addr << 16) | (limit & 0xffff);
238 e2 = ((addr >> 16) & 0xff) | (addr & 0xff000000) | (limit & 0x000f0000);
239 e2 |= flags;
240 p = ptr;
241 p[0] = tswap32(e1);
242 p[1] = tswap32(e2);
243 }
244
245 static uint64_t *idt_table;
246 #ifdef TARGET_X86_64
247 static void set_gate64(void *ptr, unsigned int type, unsigned int dpl,
248 uint64_t addr, unsigned int sel)
249 {
250 uint32_t *p, e1, e2;
251 e1 = (addr & 0xffff) | (sel << 16);
252 e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8);
253 p = ptr;
254 p[0] = tswap32(e1);
255 p[1] = tswap32(e2);
256 p[2] = tswap32(addr >> 32);
257 p[3] = 0;
258 }
259 /* only dpl matters as we do only user space emulation */
260 static void set_idt(int n, unsigned int dpl)
261 {
262 set_gate64(idt_table + n * 2, 0, dpl, 0, 0);
263 }
264 #else
265 static void set_gate(void *ptr, unsigned int type, unsigned int dpl,
266 uint32_t addr, unsigned int sel)
267 {
268 uint32_t *p, e1, e2;
269 e1 = (addr & 0xffff) | (sel << 16);
270 e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8);
271 p = ptr;
272 p[0] = tswap32(e1);
273 p[1] = tswap32(e2);
274 }
275
276 /* only dpl matters as we do only user space emulation */
277 static void set_idt(int n, unsigned int dpl)
278 {
279 set_gate(idt_table + n, 0, dpl, 0, 0);
280 }
281 #endif
282
283 void cpu_loop(CPUX86State *env)
284 {
285 CPUState *cs = CPU(x86_env_get_cpu(env));
286 int trapnr;
287 abi_ulong pc;
288 abi_ulong ret;
289 target_siginfo_t info;
290
291 for(;;) {
292 cpu_exec_start(cs);
293 trapnr = cpu_x86_exec(cs);
294 cpu_exec_end(cs);
295 switch(trapnr) {
296 case 0x80:
297 /* linux syscall from int $0x80 */
298 ret = do_syscall(env,
299 env->regs[R_EAX],
300 env->regs[R_EBX],
301 env->regs[R_ECX],
302 env->regs[R_EDX],
303 env->regs[R_ESI],
304 env->regs[R_EDI],
305 env->regs[R_EBP],
306 0, 0);
307 if (ret == -TARGET_ERESTARTSYS) {
308 env->eip -= 2;
309 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
310 env->regs[R_EAX] = ret;
311 }
312 break;
313 #ifndef TARGET_ABI32
314 case EXCP_SYSCALL:
315 /* linux syscall from syscall instruction */
316 ret = do_syscall(env,
317 env->regs[R_EAX],
318 env->regs[R_EDI],
319 env->regs[R_ESI],
320 env->regs[R_EDX],
321 env->regs[10],
322 env->regs[8],
323 env->regs[9],
324 0, 0);
325 if (ret == -TARGET_ERESTARTSYS) {
326 env->eip -= 2;
327 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
328 env->regs[R_EAX] = ret;
329 }
330 break;
331 #endif
332 case EXCP0B_NOSEG:
333 case EXCP0C_STACK:
334 info.si_signo = TARGET_SIGBUS;
335 info.si_errno = 0;
336 info.si_code = TARGET_SI_KERNEL;
337 info._sifields._sigfault._addr = 0;
338 queue_signal(env, info.si_signo, &info);
339 break;
340 case EXCP0D_GPF:
341 /* XXX: potential problem if ABI32 */
342 #ifndef TARGET_X86_64
343 if (env->eflags & VM_MASK) {
344 handle_vm86_fault(env);
345 } else
346 #endif
347 {
348 info.si_signo = TARGET_SIGSEGV;
349 info.si_errno = 0;
350 info.si_code = TARGET_SI_KERNEL;
351 info._sifields._sigfault._addr = 0;
352 queue_signal(env, info.si_signo, &info);
353 }
354 break;
355 case EXCP0E_PAGE:
356 info.si_signo = TARGET_SIGSEGV;
357 info.si_errno = 0;
358 if (!(env->error_code & 1))
359 info.si_code = TARGET_SEGV_MAPERR;
360 else
361 info.si_code = TARGET_SEGV_ACCERR;
362 info._sifields._sigfault._addr = env->cr[2];
363 queue_signal(env, info.si_signo, &info);
364 break;
365 case EXCP00_DIVZ:
366 #ifndef TARGET_X86_64
367 if (env->eflags & VM_MASK) {
368 handle_vm86_trap(env, trapnr);
369 } else
370 #endif
371 {
372 /* division by zero */
373 info.si_signo = TARGET_SIGFPE;
374 info.si_errno = 0;
375 info.si_code = TARGET_FPE_INTDIV;
376 info._sifields._sigfault._addr = env->eip;
377 queue_signal(env, info.si_signo, &info);
378 }
379 break;
380 case EXCP01_DB:
381 case EXCP03_INT3:
382 #ifndef TARGET_X86_64
383 if (env->eflags & VM_MASK) {
384 handle_vm86_trap(env, trapnr);
385 } else
386 #endif
387 {
388 info.si_signo = TARGET_SIGTRAP;
389 info.si_errno = 0;
390 if (trapnr == EXCP01_DB) {
391 info.si_code = TARGET_TRAP_BRKPT;
392 info._sifields._sigfault._addr = env->eip;
393 } else {
394 info.si_code = TARGET_SI_KERNEL;
395 info._sifields._sigfault._addr = 0;
396 }
397 queue_signal(env, info.si_signo, &info);
398 }
399 break;
400 case EXCP04_INTO:
401 case EXCP05_BOUND:
402 #ifndef TARGET_X86_64
403 if (env->eflags & VM_MASK) {
404 handle_vm86_trap(env, trapnr);
405 } else
406 #endif
407 {
408 info.si_signo = TARGET_SIGSEGV;
409 info.si_errno = 0;
410 info.si_code = TARGET_SI_KERNEL;
411 info._sifields._sigfault._addr = 0;
412 queue_signal(env, info.si_signo, &info);
413 }
414 break;
415 case EXCP06_ILLOP:
416 info.si_signo = TARGET_SIGILL;
417 info.si_errno = 0;
418 info.si_code = TARGET_ILL_ILLOPN;
419 info._sifields._sigfault._addr = env->eip;
420 queue_signal(env, info.si_signo, &info);
421 break;
422 case EXCP_INTERRUPT:
423 /* just indicate that signals should be handled asap */
424 break;
425 case EXCP_DEBUG:
426 {
427 int sig;
428
429 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
430 if (sig)
431 {
432 info.si_signo = sig;
433 info.si_errno = 0;
434 info.si_code = TARGET_TRAP_BRKPT;
435 queue_signal(env, info.si_signo, &info);
436 }
437 }
438 break;
439 default:
440 pc = env->segs[R_CS].base + env->eip;
441 EXCP_DUMP(env, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
442 (long)pc, trapnr);
443 abort();
444 }
445 process_pending_signals(env);
446 }
447 }
448 #endif
449
450 #ifdef TARGET_ARM
451
452 #define get_user_code_u32(x, gaddr, env) \
453 ({ abi_long __r = get_user_u32((x), (gaddr)); \
454 if (!__r && bswap_code(arm_sctlr_b(env))) { \
455 (x) = bswap32(x); \
456 } \
457 __r; \
458 })
459
460 #define get_user_code_u16(x, gaddr, env) \
461 ({ abi_long __r = get_user_u16((x), (gaddr)); \
462 if (!__r && bswap_code(arm_sctlr_b(env))) { \
463 (x) = bswap16(x); \
464 } \
465 __r; \
466 })
467
468 #define get_user_data_u32(x, gaddr, env) \
469 ({ abi_long __r = get_user_u32((x), (gaddr)); \
470 if (!__r && arm_cpu_bswap_data(env)) { \
471 (x) = bswap32(x); \
472 } \
473 __r; \
474 })
475
476 #define get_user_data_u16(x, gaddr, env) \
477 ({ abi_long __r = get_user_u16((x), (gaddr)); \
478 if (!__r && arm_cpu_bswap_data(env)) { \
479 (x) = bswap16(x); \
480 } \
481 __r; \
482 })
483
484 #define put_user_data_u32(x, gaddr, env) \
485 ({ typeof(x) __x = (x); \
486 if (arm_cpu_bswap_data(env)) { \
487 __x = bswap32(__x); \
488 } \
489 put_user_u32(__x, (gaddr)); \
490 })
491
492 #define put_user_data_u16(x, gaddr, env) \
493 ({ typeof(x) __x = (x); \
494 if (arm_cpu_bswap_data(env)) { \
495 __x = bswap16(__x); \
496 } \
497 put_user_u16(__x, (gaddr)); \
498 })
499
500 #ifdef TARGET_ABI32
501 /* Commpage handling -- there is no commpage for AArch64 */
502
503 /*
504 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
505 * Input:
506 * r0 = pointer to oldval
507 * r1 = pointer to newval
508 * r2 = pointer to target value
509 *
510 * Output:
511 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
512 * C set if *ptr was changed, clear if no exchange happened
513 *
514 * Note segv's in kernel helpers are a bit tricky, we can set the
515 * data address sensibly but the PC address is just the entry point.
516 */
517 static void arm_kernel_cmpxchg64_helper(CPUARMState *env)
518 {
519 uint64_t oldval, newval, val;
520 uint32_t addr, cpsr;
521 target_siginfo_t info;
522
523 /* Based on the 32 bit code in do_kernel_trap */
524
525 /* XXX: This only works between threads, not between processes.
526 It's probably possible to implement this with native host
527 operations. However things like ldrex/strex are much harder so
528 there's not much point trying. */
529 start_exclusive();
530 cpsr = cpsr_read(env);
531 addr = env->regs[2];
532
533 if (get_user_u64(oldval, env->regs[0])) {
534 env->exception.vaddress = env->regs[0];
535 goto segv;
536 };
537
538 if (get_user_u64(newval, env->regs[1])) {
539 env->exception.vaddress = env->regs[1];
540 goto segv;
541 };
542
543 if (get_user_u64(val, addr)) {
544 env->exception.vaddress = addr;
545 goto segv;
546 }
547
548 if (val == oldval) {
549 val = newval;
550
551 if (put_user_u64(val, addr)) {
552 env->exception.vaddress = addr;
553 goto segv;
554 };
555
556 env->regs[0] = 0;
557 cpsr |= CPSR_C;
558 } else {
559 env->regs[0] = -1;
560 cpsr &= ~CPSR_C;
561 }
562 cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
563 end_exclusive();
564 return;
565
566 segv:
567 end_exclusive();
568 /* We get the PC of the entry address - which is as good as anything,
569 on a real kernel what you get depends on which mode it uses. */
570 info.si_signo = TARGET_SIGSEGV;
571 info.si_errno = 0;
572 /* XXX: check env->error_code */
573 info.si_code = TARGET_SEGV_MAPERR;
574 info._sifields._sigfault._addr = env->exception.vaddress;
575 queue_signal(env, info.si_signo, &info);
576 }
577
578 /* Handle a jump to the kernel code page. */
579 static int
580 do_kernel_trap(CPUARMState *env)
581 {
582 uint32_t addr;
583 uint32_t cpsr;
584 uint32_t val;
585
586 switch (env->regs[15]) {
587 case 0xffff0fa0: /* __kernel_memory_barrier */
588 /* ??? No-op. Will need to do better for SMP. */
589 break;
590 case 0xffff0fc0: /* __kernel_cmpxchg */
591 /* XXX: This only works between threads, not between processes.
592 It's probably possible to implement this with native host
593 operations. However things like ldrex/strex are much harder so
594 there's not much point trying. */
595 start_exclusive();
596 cpsr = cpsr_read(env);
597 addr = env->regs[2];
598 /* FIXME: This should SEGV if the access fails. */
599 if (get_user_u32(val, addr))
600 val = ~env->regs[0];
601 if (val == env->regs[0]) {
602 val = env->regs[1];
603 /* FIXME: Check for segfaults. */
604 put_user_u32(val, addr);
605 env->regs[0] = 0;
606 cpsr |= CPSR_C;
607 } else {
608 env->regs[0] = -1;
609 cpsr &= ~CPSR_C;
610 }
611 cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
612 end_exclusive();
613 break;
614 case 0xffff0fe0: /* __kernel_get_tls */
615 env->regs[0] = cpu_get_tls(env);
616 break;
617 case 0xffff0f60: /* __kernel_cmpxchg64 */
618 arm_kernel_cmpxchg64_helper(env);
619 break;
620
621 default:
622 return 1;
623 }
624 /* Jump back to the caller. */
625 addr = env->regs[14];
626 if (addr & 1) {
627 env->thumb = 1;
628 addr &= ~1;
629 }
630 env->regs[15] = addr;
631
632 return 0;
633 }
634
635 /* Store exclusive handling for AArch32 */
636 static int do_strex(CPUARMState *env)
637 {
638 uint64_t val;
639 int size;
640 int rc = 1;
641 int segv = 0;
642 uint32_t addr;
643 start_exclusive();
644 if (env->exclusive_addr != env->exclusive_test) {
645 goto fail;
646 }
647 /* We know we're always AArch32 so the address is in uint32_t range
648 * unless it was the -1 exclusive-monitor-lost value (which won't
649 * match exclusive_test above).
650 */
651 assert(extract64(env->exclusive_addr, 32, 32) == 0);
652 addr = env->exclusive_addr;
653 size = env->exclusive_info & 0xf;
654 switch (size) {
655 case 0:
656 segv = get_user_u8(val, addr);
657 break;
658 case 1:
659 segv = get_user_data_u16(val, addr, env);
660 break;
661 case 2:
662 case 3:
663 segv = get_user_data_u32(val, addr, env);
664 break;
665 default:
666 abort();
667 }
668 if (segv) {
669 env->exception.vaddress = addr;
670 goto done;
671 }
672 if (size == 3) {
673 uint32_t valhi;
674 segv = get_user_data_u32(valhi, addr + 4, env);
675 if (segv) {
676 env->exception.vaddress = addr + 4;
677 goto done;
678 }
679 if (arm_cpu_bswap_data(env)) {
680 val = deposit64((uint64_t)valhi, 32, 32, val);
681 } else {
682 val = deposit64(val, 32, 32, valhi);
683 }
684 }
685 if (val != env->exclusive_val) {
686 goto fail;
687 }
688
689 val = env->regs[(env->exclusive_info >> 8) & 0xf];
690 switch (size) {
691 case 0:
692 segv = put_user_u8(val, addr);
693 break;
694 case 1:
695 segv = put_user_data_u16(val, addr, env);
696 break;
697 case 2:
698 case 3:
699 segv = put_user_data_u32(val, addr, env);
700 break;
701 }
702 if (segv) {
703 env->exception.vaddress = addr;
704 goto done;
705 }
706 if (size == 3) {
707 val = env->regs[(env->exclusive_info >> 12) & 0xf];
708 segv = put_user_data_u32(val, addr + 4, env);
709 if (segv) {
710 env->exception.vaddress = addr + 4;
711 goto done;
712 }
713 }
714 rc = 0;
715 fail:
716 env->regs[15] += 4;
717 env->regs[(env->exclusive_info >> 4) & 0xf] = rc;
718 done:
719 end_exclusive();
720 return segv;
721 }
722
723 void cpu_loop(CPUARMState *env)
724 {
725 CPUState *cs = CPU(arm_env_get_cpu(env));
726 int trapnr;
727 unsigned int n, insn;
728 target_siginfo_t info;
729 uint32_t addr;
730 abi_ulong ret;
731
732 for(;;) {
733 cpu_exec_start(cs);
734 trapnr = cpu_arm_exec(cs);
735 cpu_exec_end(cs);
736 switch(trapnr) {
737 case EXCP_UDEF:
738 {
739 TaskState *ts = cs->opaque;
740 uint32_t opcode;
741 int rc;
742
743 /* we handle the FPU emulation here, as Linux */
744 /* we get the opcode */
745 /* FIXME - what to do if get_user() fails? */
746 get_user_code_u32(opcode, env->regs[15], env);
747
748 rc = EmulateAll(opcode, &ts->fpa, env);
749 if (rc == 0) { /* illegal instruction */
750 info.si_signo = TARGET_SIGILL;
751 info.si_errno = 0;
752 info.si_code = TARGET_ILL_ILLOPN;
753 info._sifields._sigfault._addr = env->regs[15];
754 queue_signal(env, info.si_signo, &info);
755 } else if (rc < 0) { /* FP exception */
756 int arm_fpe=0;
757
758 /* translate softfloat flags to FPSR flags */
759 if (-rc & float_flag_invalid)
760 arm_fpe |= BIT_IOC;
761 if (-rc & float_flag_divbyzero)
762 arm_fpe |= BIT_DZC;
763 if (-rc & float_flag_overflow)
764 arm_fpe |= BIT_OFC;
765 if (-rc & float_flag_underflow)
766 arm_fpe |= BIT_UFC;
767 if (-rc & float_flag_inexact)
768 arm_fpe |= BIT_IXC;
769
770 FPSR fpsr = ts->fpa.fpsr;
771 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
772
773 if (fpsr & (arm_fpe << 16)) { /* exception enabled? */
774 info.si_signo = TARGET_SIGFPE;
775 info.si_errno = 0;
776
777 /* ordered by priority, least first */
778 if (arm_fpe & BIT_IXC) info.si_code = TARGET_FPE_FLTRES;
779 if (arm_fpe & BIT_UFC) info.si_code = TARGET_FPE_FLTUND;
780 if (arm_fpe & BIT_OFC) info.si_code = TARGET_FPE_FLTOVF;
781 if (arm_fpe & BIT_DZC) info.si_code = TARGET_FPE_FLTDIV;
782 if (arm_fpe & BIT_IOC) info.si_code = TARGET_FPE_FLTINV;
783
784 info._sifields._sigfault._addr = env->regs[15];
785 queue_signal(env, info.si_signo, &info);
786 } else {
787 env->regs[15] += 4;
788 }
789
790 /* accumulate unenabled exceptions */
791 if ((!(fpsr & BIT_IXE)) && (arm_fpe & BIT_IXC))
792 fpsr |= BIT_IXC;
793 if ((!(fpsr & BIT_UFE)) && (arm_fpe & BIT_UFC))
794 fpsr |= BIT_UFC;
795 if ((!(fpsr & BIT_OFE)) && (arm_fpe & BIT_OFC))
796 fpsr |= BIT_OFC;
797 if ((!(fpsr & BIT_DZE)) && (arm_fpe & BIT_DZC))
798 fpsr |= BIT_DZC;
799 if ((!(fpsr & BIT_IOE)) && (arm_fpe & BIT_IOC))
800 fpsr |= BIT_IOC;
801 ts->fpa.fpsr=fpsr;
802 } else { /* everything OK */
803 /* increment PC */
804 env->regs[15] += 4;
805 }
806 }
807 break;
808 case EXCP_SWI:
809 case EXCP_BKPT:
810 {
811 env->eabi = 1;
812 /* system call */
813 if (trapnr == EXCP_BKPT) {
814 if (env->thumb) {
815 /* FIXME - what to do if get_user() fails? */
816 get_user_code_u16(insn, env->regs[15], env);
817 n = insn & 0xff;
818 env->regs[15] += 2;
819 } else {
820 /* FIXME - what to do if get_user() fails? */
821 get_user_code_u32(insn, env->regs[15], env);
822 n = (insn & 0xf) | ((insn >> 4) & 0xff0);
823 env->regs[15] += 4;
824 }
825 } else {
826 if (env->thumb) {
827 /* FIXME - what to do if get_user() fails? */
828 get_user_code_u16(insn, env->regs[15] - 2, env);
829 n = insn & 0xff;
830 } else {
831 /* FIXME - what to do if get_user() fails? */
832 get_user_code_u32(insn, env->regs[15] - 4, env);
833 n = insn & 0xffffff;
834 }
835 }
836
837 if (n == ARM_NR_cacheflush) {
838 /* nop */
839 } else if (n == ARM_NR_semihosting
840 || n == ARM_NR_thumb_semihosting) {
841 env->regs[0] = do_arm_semihosting (env);
842 } else if (n == 0 || n >= ARM_SYSCALL_BASE || env->thumb) {
843 /* linux syscall */
844 if (env->thumb || n == 0) {
845 n = env->regs[7];
846 } else {
847 n -= ARM_SYSCALL_BASE;
848 env->eabi = 0;
849 }
850 if ( n > ARM_NR_BASE) {
851 switch (n) {
852 case ARM_NR_cacheflush:
853 /* nop */
854 break;
855 case ARM_NR_set_tls:
856 cpu_set_tls(env, env->regs[0]);
857 env->regs[0] = 0;
858 break;
859 case ARM_NR_breakpoint:
860 env->regs[15] -= env->thumb ? 2 : 4;
861 goto excp_debug;
862 default:
863 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
864 n);
865 env->regs[0] = -TARGET_ENOSYS;
866 break;
867 }
868 } else {
869 ret = do_syscall(env,
870 n,
871 env->regs[0],
872 env->regs[1],
873 env->regs[2],
874 env->regs[3],
875 env->regs[4],
876 env->regs[5],
877 0, 0);
878 if (ret == -TARGET_ERESTARTSYS) {
879 env->regs[15] -= env->thumb ? 2 : 4;
880 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
881 env->regs[0] = ret;
882 }
883 }
884 } else {
885 goto error;
886 }
887 }
888 break;
889 case EXCP_INTERRUPT:
890 /* just indicate that signals should be handled asap */
891 break;
892 case EXCP_STREX:
893 if (!do_strex(env)) {
894 break;
895 }
896 /* fall through for segv */
897 case EXCP_PREFETCH_ABORT:
898 case EXCP_DATA_ABORT:
899 addr = env->exception.vaddress;
900 {
901 info.si_signo = TARGET_SIGSEGV;
902 info.si_errno = 0;
903 /* XXX: check env->error_code */
904 info.si_code = TARGET_SEGV_MAPERR;
905 info._sifields._sigfault._addr = addr;
906 queue_signal(env, info.si_signo, &info);
907 }
908 break;
909 case EXCP_DEBUG:
910 excp_debug:
911 {
912 int sig;
913
914 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
915 if (sig)
916 {
917 info.si_signo = sig;
918 info.si_errno = 0;
919 info.si_code = TARGET_TRAP_BRKPT;
920 queue_signal(env, info.si_signo, &info);
921 }
922 }
923 break;
924 case EXCP_KERNEL_TRAP:
925 if (do_kernel_trap(env))
926 goto error;
927 break;
928 case EXCP_YIELD:
929 /* nothing to do here for user-mode, just resume guest code */
930 break;
931 default:
932 error:
933 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
934 abort();
935 }
936 process_pending_signals(env);
937 }
938 }
939
940 #else
941
942 /*
943 * Handle AArch64 store-release exclusive
944 *
945 * rs = gets the status result of store exclusive
946 * rt = is the register that is stored
947 * rt2 = is the second register store (in STP)
948 *
949 */
950 static int do_strex_a64(CPUARMState *env)
951 {
952 uint64_t val;
953 int size;
954 bool is_pair;
955 int rc = 1;
956 int segv = 0;
957 uint64_t addr;
958 int rs, rt, rt2;
959
960 start_exclusive();
961 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
962 size = extract32(env->exclusive_info, 0, 2);
963 is_pair = extract32(env->exclusive_info, 2, 1);
964 rs = extract32(env->exclusive_info, 4, 5);
965 rt = extract32(env->exclusive_info, 9, 5);
966 rt2 = extract32(env->exclusive_info, 14, 5);
967
968 addr = env->exclusive_addr;
969
970 if (addr != env->exclusive_test) {
971 goto finish;
972 }
973
974 switch (size) {
975 case 0:
976 segv = get_user_u8(val, addr);
977 break;
978 case 1:
979 segv = get_user_u16(val, addr);
980 break;
981 case 2:
982 segv = get_user_u32(val, addr);
983 break;
984 case 3:
985 segv = get_user_u64(val, addr);
986 break;
987 default:
988 abort();
989 }
990 if (segv) {
991 env->exception.vaddress = addr;
992 goto error;
993 }
994 if (val != env->exclusive_val) {
995 goto finish;
996 }
997 if (is_pair) {
998 if (size == 2) {
999 segv = get_user_u32(val, addr + 4);
1000 } else {
1001 segv = get_user_u64(val, addr + 8);
1002 }
1003 if (segv) {
1004 env->exception.vaddress = addr + (size == 2 ? 4 : 8);
1005 goto error;
1006 }
1007 if (val != env->exclusive_high) {
1008 goto finish;
1009 }
1010 }
1011 /* handle the zero register */
1012 val = rt == 31 ? 0 : env->xregs[rt];
1013 switch (size) {
1014 case 0:
1015 segv = put_user_u8(val, addr);
1016 break;
1017 case 1:
1018 segv = put_user_u16(val, addr);
1019 break;
1020 case 2:
1021 segv = put_user_u32(val, addr);
1022 break;
1023 case 3:
1024 segv = put_user_u64(val, addr);
1025 break;
1026 }
1027 if (segv) {
1028 goto error;
1029 }
1030 if (is_pair) {
1031 /* handle the zero register */
1032 val = rt2 == 31 ? 0 : env->xregs[rt2];
1033 if (size == 2) {
1034 segv = put_user_u32(val, addr + 4);
1035 } else {
1036 segv = put_user_u64(val, addr + 8);
1037 }
1038 if (segv) {
1039 env->exception.vaddress = addr + (size == 2 ? 4 : 8);
1040 goto error;
1041 }
1042 }
1043 rc = 0;
1044 finish:
1045 env->pc += 4;
1046 /* rs == 31 encodes a write to the ZR, thus throwing away
1047 * the status return. This is rather silly but valid.
1048 */
1049 if (rs < 31) {
1050 env->xregs[rs] = rc;
1051 }
1052 error:
1053 /* instruction faulted, PC does not advance */
1054 /* either way a strex releases any exclusive lock we have */
1055 env->exclusive_addr = -1;
1056 end_exclusive();
1057 return segv;
1058 }
1059
1060 /* AArch64 main loop */
1061 void cpu_loop(CPUARMState *env)
1062 {
1063 CPUState *cs = CPU(arm_env_get_cpu(env));
1064 int trapnr, sig;
1065 abi_long ret;
1066 target_siginfo_t info;
1067
1068 for (;;) {
1069 cpu_exec_start(cs);
1070 trapnr = cpu_arm_exec(cs);
1071 cpu_exec_end(cs);
1072
1073 switch (trapnr) {
1074 case EXCP_SWI:
1075 ret = do_syscall(env,
1076 env->xregs[8],
1077 env->xregs[0],
1078 env->xregs[1],
1079 env->xregs[2],
1080 env->xregs[3],
1081 env->xregs[4],
1082 env->xregs[5],
1083 0, 0);
1084 if (ret == -TARGET_ERESTARTSYS) {
1085 env->pc -= 4;
1086 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
1087 env->xregs[0] = ret;
1088 }
1089 break;
1090 case EXCP_INTERRUPT:
1091 /* just indicate that signals should be handled asap */
1092 break;
1093 case EXCP_UDEF:
1094 info.si_signo = TARGET_SIGILL;
1095 info.si_errno = 0;
1096 info.si_code = TARGET_ILL_ILLOPN;
1097 info._sifields._sigfault._addr = env->pc;
1098 queue_signal(env, info.si_signo, &info);
1099 break;
1100 case EXCP_STREX:
1101 if (!do_strex_a64(env)) {
1102 break;
1103 }
1104 /* fall through for segv */
1105 case EXCP_PREFETCH_ABORT:
1106 case EXCP_DATA_ABORT:
1107 info.si_signo = TARGET_SIGSEGV;
1108 info.si_errno = 0;
1109 /* XXX: check env->error_code */
1110 info.si_code = TARGET_SEGV_MAPERR;
1111 info._sifields._sigfault._addr = env->exception.vaddress;
1112 queue_signal(env, info.si_signo, &info);
1113 break;
1114 case EXCP_DEBUG:
1115 case EXCP_BKPT:
1116 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
1117 if (sig) {
1118 info.si_signo = sig;
1119 info.si_errno = 0;
1120 info.si_code = TARGET_TRAP_BRKPT;
1121 queue_signal(env, info.si_signo, &info);
1122 }
1123 break;
1124 case EXCP_SEMIHOST:
1125 env->xregs[0] = do_arm_semihosting(env);
1126 break;
1127 case EXCP_YIELD:
1128 /* nothing to do here for user-mode, just resume guest code */
1129 break;
1130 default:
1131 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
1132 abort();
1133 }
1134 process_pending_signals(env);
1135 /* Exception return on AArch64 always clears the exclusive monitor,
1136 * so any return to running guest code implies this.
1137 * A strex (successful or otherwise) also clears the monitor, so
1138 * we don't need to specialcase EXCP_STREX.
1139 */
1140 env->exclusive_addr = -1;
1141 }
1142 }
1143 #endif /* ndef TARGET_ABI32 */
1144
1145 #endif
1146
1147 #ifdef TARGET_UNICORE32
1148
1149 void cpu_loop(CPUUniCore32State *env)
1150 {
1151 CPUState *cs = CPU(uc32_env_get_cpu(env));
1152 int trapnr;
1153 unsigned int n, insn;
1154 target_siginfo_t info;
1155
1156 for (;;) {
1157 cpu_exec_start(cs);
1158 trapnr = uc32_cpu_exec(cs);
1159 cpu_exec_end(cs);
1160 switch (trapnr) {
1161 case UC32_EXCP_PRIV:
1162 {
1163 /* system call */
1164 get_user_u32(insn, env->regs[31] - 4);
1165 n = insn & 0xffffff;
1166
1167 if (n >= UC32_SYSCALL_BASE) {
1168 /* linux syscall */
1169 n -= UC32_SYSCALL_BASE;
1170 if (n == UC32_SYSCALL_NR_set_tls) {
1171 cpu_set_tls(env, env->regs[0]);
1172 env->regs[0] = 0;
1173 } else {
1174 abi_long ret = do_syscall(env,
1175 n,
1176 env->regs[0],
1177 env->regs[1],
1178 env->regs[2],
1179 env->regs[3],
1180 env->regs[4],
1181 env->regs[5],
1182 0, 0);
1183 if (ret == -TARGET_ERESTARTSYS) {
1184 env->regs[31] -= 4;
1185 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
1186 env->regs[0] = ret;
1187 }
1188 }
1189 } else {
1190 goto error;
1191 }
1192 }
1193 break;
1194 case UC32_EXCP_DTRAP:
1195 case UC32_EXCP_ITRAP:
1196 info.si_signo = TARGET_SIGSEGV;
1197 info.si_errno = 0;
1198 /* XXX: check env->error_code */
1199 info.si_code = TARGET_SEGV_MAPERR;
1200 info._sifields._sigfault._addr = env->cp0.c4_faultaddr;
1201 queue_signal(env, info.si_signo, &info);
1202 break;
1203 case EXCP_INTERRUPT:
1204 /* just indicate that signals should be handled asap */
1205 break;
1206 case EXCP_DEBUG:
1207 {
1208 int sig;
1209
1210 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
1211 if (sig) {
1212 info.si_signo = sig;
1213 info.si_errno = 0;
1214 info.si_code = TARGET_TRAP_BRKPT;
1215 queue_signal(env, info.si_signo, &info);
1216 }
1217 }
1218 break;
1219 default:
1220 goto error;
1221 }
1222 process_pending_signals(env);
1223 }
1224
1225 error:
1226 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
1227 abort();
1228 }
1229 #endif
1230
1231 #ifdef TARGET_SPARC
1232 #define SPARC64_STACK_BIAS 2047
1233
1234 //#define DEBUG_WIN
1235
1236 /* WARNING: dealing with register windows _is_ complicated. More info
1237 can be found at http://www.sics.se/~psm/sparcstack.html */
1238 static inline int get_reg_index(CPUSPARCState *env, int cwp, int index)
1239 {
1240 index = (index + cwp * 16) % (16 * env->nwindows);
1241 /* wrap handling : if cwp is on the last window, then we use the
1242 registers 'after' the end */
1243 if (index < 8 && env->cwp == env->nwindows - 1)
1244 index += 16 * env->nwindows;
1245 return index;
1246 }
1247
1248 /* save the register window 'cwp1' */
1249 static inline void save_window_offset(CPUSPARCState *env, int cwp1)
1250 {
1251 unsigned int i;
1252 abi_ulong sp_ptr;
1253
1254 sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
1255 #ifdef TARGET_SPARC64
1256 if (sp_ptr & 3)
1257 sp_ptr += SPARC64_STACK_BIAS;
1258 #endif
1259 #if defined(DEBUG_WIN)
1260 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx " save_cwp=%d\n",
1261 sp_ptr, cwp1);
1262 #endif
1263 for(i = 0; i < 16; i++) {
1264 /* FIXME - what to do if put_user() fails? */
1265 put_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr);
1266 sp_ptr += sizeof(abi_ulong);
1267 }
1268 }
1269
1270 static void save_window(CPUSPARCState *env)
1271 {
1272 #ifndef TARGET_SPARC64
1273 unsigned int new_wim;
1274 new_wim = ((env->wim >> 1) | (env->wim << (env->nwindows - 1))) &
1275 ((1LL << env->nwindows) - 1);
1276 save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
1277 env->wim = new_wim;
1278 #else
1279 save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
1280 env->cansave++;
1281 env->canrestore--;
1282 #endif
1283 }
1284
1285 static void restore_window(CPUSPARCState *env)
1286 {
1287 #ifndef TARGET_SPARC64
1288 unsigned int new_wim;
1289 #endif
1290 unsigned int i, cwp1;
1291 abi_ulong sp_ptr;
1292
1293 #ifndef TARGET_SPARC64
1294 new_wim = ((env->wim << 1) | (env->wim >> (env->nwindows - 1))) &
1295 ((1LL << env->nwindows) - 1);
1296 #endif
1297
1298 /* restore the invalid window */
1299 cwp1 = cpu_cwp_inc(env, env->cwp + 1);
1300 sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
1301 #ifdef TARGET_SPARC64
1302 if (sp_ptr & 3)
1303 sp_ptr += SPARC64_STACK_BIAS;
1304 #endif
1305 #if defined(DEBUG_WIN)
1306 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx " load_cwp=%d\n",
1307 sp_ptr, cwp1);
1308 #endif
1309 for(i = 0; i < 16; i++) {
1310 /* FIXME - what to do if get_user() fails? */
1311 get_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr);
1312 sp_ptr += sizeof(abi_ulong);
1313 }
1314 #ifdef TARGET_SPARC64
1315 env->canrestore++;
1316 if (env->cleanwin < env->nwindows - 1)
1317 env->cleanwin++;
1318 env->cansave--;
1319 #else
1320 env->wim = new_wim;
1321 #endif
1322 }
1323
1324 static void flush_windows(CPUSPARCState *env)
1325 {
1326 int offset, cwp1;
1327
1328 offset = 1;
1329 for(;;) {
1330 /* if restore would invoke restore_window(), then we can stop */
1331 cwp1 = cpu_cwp_inc(env, env->cwp + offset);
1332 #ifndef TARGET_SPARC64
1333 if (env->wim & (1 << cwp1))
1334 break;
1335 #else
1336 if (env->canrestore == 0)
1337 break;
1338 env->cansave++;
1339 env->canrestore--;
1340 #endif
1341 save_window_offset(env, cwp1);
1342 offset++;
1343 }
1344 cwp1 = cpu_cwp_inc(env, env->cwp + 1);
1345 #ifndef TARGET_SPARC64
1346 /* set wim so that restore will reload the registers */
1347 env->wim = 1 << cwp1;
1348 #endif
1349 #if defined(DEBUG_WIN)
1350 printf("flush_windows: nb=%d\n", offset - 1);
1351 #endif
1352 }
1353
1354 void cpu_loop (CPUSPARCState *env)
1355 {
1356 CPUState *cs = CPU(sparc_env_get_cpu(env));
1357 int trapnr;
1358 abi_long ret;
1359 target_siginfo_t info;
1360
1361 while (1) {
1362 cpu_exec_start(cs);
1363 trapnr = cpu_sparc_exec(cs);
1364 cpu_exec_end(cs);
1365
1366 /* Compute PSR before exposing state. */
1367 if (env->cc_op != CC_OP_FLAGS) {
1368 cpu_get_psr(env);
1369 }
1370
1371 switch (trapnr) {
1372 #ifndef TARGET_SPARC64
1373 case 0x88:
1374 case 0x90:
1375 #else
1376 case 0x110:
1377 case 0x16d:
1378 #endif
1379 ret = do_syscall (env, env->gregs[1],
1380 env->regwptr[0], env->regwptr[1],
1381 env->regwptr[2], env->regwptr[3],
1382 env->regwptr[4], env->regwptr[5],
1383 0, 0);
1384 if (ret == -TARGET_ERESTARTSYS || ret == -TARGET_QEMU_ESIGRETURN) {
1385 break;
1386 }
1387 if ((abi_ulong)ret >= (abi_ulong)(-515)) {
1388 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1389 env->xcc |= PSR_CARRY;
1390 #else
1391 env->psr |= PSR_CARRY;
1392 #endif
1393 ret = -ret;
1394 } else {
1395 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1396 env->xcc &= ~PSR_CARRY;
1397 #else
1398 env->psr &= ~PSR_CARRY;
1399 #endif
1400 }
1401 env->regwptr[0] = ret;
1402 /* next instruction */
1403 env->pc = env->npc;
1404 env->npc = env->npc + 4;
1405 break;
1406 case 0x83: /* flush windows */
1407 #ifdef TARGET_ABI32
1408 case 0x103:
1409 #endif
1410 flush_windows(env);
1411 /* next instruction */
1412 env->pc = env->npc;
1413 env->npc = env->npc + 4;
1414 break;
1415 #ifndef TARGET_SPARC64
1416 case TT_WIN_OVF: /* window overflow */
1417 save_window(env);
1418 break;
1419 case TT_WIN_UNF: /* window underflow */
1420 restore_window(env);
1421 break;
1422 case TT_TFAULT:
1423 case TT_DFAULT:
1424 {
1425 info.si_signo = TARGET_SIGSEGV;
1426 info.si_errno = 0;
1427 /* XXX: check env->error_code */
1428 info.si_code = TARGET_SEGV_MAPERR;
1429 info._sifields._sigfault._addr = env->mmuregs[4];
1430 queue_signal(env, info.si_signo, &info);
1431 }
1432 break;
1433 #else
1434 case TT_SPILL: /* window overflow */
1435 save_window(env);
1436 break;
1437 case TT_FILL: /* window underflow */
1438 restore_window(env);
1439 break;
1440 case TT_TFAULT:
1441 case TT_DFAULT:
1442 {
1443 info.si_signo = TARGET_SIGSEGV;
1444 info.si_errno = 0;
1445 /* XXX: check env->error_code */
1446 info.si_code = TARGET_SEGV_MAPERR;
1447 if (trapnr == TT_DFAULT)
1448 info._sifields._sigfault._addr = env->dmmuregs[4];
1449 else
1450 info._sifields._sigfault._addr = cpu_tsptr(env)->tpc;
1451 queue_signal(env, info.si_signo, &info);
1452 }
1453 break;
1454 #ifndef TARGET_ABI32
1455 case 0x16e:
1456 flush_windows(env);
1457 sparc64_get_context(env);
1458 break;
1459 case 0x16f:
1460 flush_windows(env);
1461 sparc64_set_context(env);
1462 break;
1463 #endif
1464 #endif
1465 case EXCP_INTERRUPT:
1466 /* just indicate that signals should be handled asap */
1467 break;
1468 case TT_ILL_INSN:
1469 {
1470 info.si_signo = TARGET_SIGILL;
1471 info.si_errno = 0;
1472 info.si_code = TARGET_ILL_ILLOPC;
1473 info._sifields._sigfault._addr = env->pc;
1474 queue_signal(env, info.si_signo, &info);
1475 }
1476 break;
1477 case EXCP_DEBUG:
1478 {
1479 int sig;
1480
1481 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
1482 if (sig)
1483 {
1484 info.si_signo = sig;
1485 info.si_errno = 0;
1486 info.si_code = TARGET_TRAP_BRKPT;
1487 queue_signal(env, info.si_signo, &info);
1488 }
1489 }
1490 break;
1491 default:
1492 printf ("Unhandled trap: 0x%x\n", trapnr);
1493 cpu_dump_state(cs, stderr, fprintf, 0);
1494 exit(EXIT_FAILURE);
1495 }
1496 process_pending_signals (env);
1497 }
1498 }
1499
1500 #endif
1501
1502 #ifdef TARGET_PPC
1503 static inline uint64_t cpu_ppc_get_tb(CPUPPCState *env)
1504 {
1505 return cpu_get_host_ticks();
1506 }
1507
1508 uint64_t cpu_ppc_load_tbl(CPUPPCState *env)
1509 {
1510 return cpu_ppc_get_tb(env);
1511 }
1512
1513 uint32_t cpu_ppc_load_tbu(CPUPPCState *env)
1514 {
1515 return cpu_ppc_get_tb(env) >> 32;
1516 }
1517
1518 uint64_t cpu_ppc_load_atbl(CPUPPCState *env)
1519 {
1520 return cpu_ppc_get_tb(env);
1521 }
1522
1523 uint32_t cpu_ppc_load_atbu(CPUPPCState *env)
1524 {
1525 return cpu_ppc_get_tb(env) >> 32;
1526 }
1527
1528 uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env)
1529 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1530
1531 uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env)
1532 {
1533 return cpu_ppc_load_tbl(env) & 0x3FFFFF80;
1534 }
1535
1536 /* XXX: to be fixed */
1537 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1538 {
1539 return -1;
1540 }
1541
1542 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1543 {
1544 return -1;
1545 }
1546
1547 static int do_store_exclusive(CPUPPCState *env)
1548 {
1549 target_ulong addr;
1550 target_ulong page_addr;
1551 target_ulong val, val2 __attribute__((unused)) = 0;
1552 int flags;
1553 int segv = 0;
1554
1555 addr = env->reserve_ea;
1556 page_addr = addr & TARGET_PAGE_MASK;
1557 start_exclusive();
1558 mmap_lock();
1559 flags = page_get_flags(page_addr);
1560 if ((flags & PAGE_READ) == 0) {
1561 segv = 1;
1562 } else {
1563 int reg = env->reserve_info & 0x1f;
1564 int size = env->reserve_info >> 5;
1565 int stored = 0;
1566
1567 if (addr == env->reserve_addr) {
1568 switch (size) {
1569 case 1: segv = get_user_u8(val, addr); break;
1570 case 2: segv = get_user_u16(val, addr); break;
1571 case 4: segv = get_user_u32(val, addr); break;
1572 #if defined(TARGET_PPC64)
1573 case 8: segv = get_user_u64(val, addr); break;
1574 case 16: {
1575 segv = get_user_u64(val, addr);
1576 if (!segv) {
1577 segv = get_user_u64(val2, addr + 8);
1578 }
1579 break;
1580 }
1581 #endif
1582 default: abort();
1583 }
1584 if (!segv && val == env->reserve_val) {
1585 val = env->gpr[reg];
1586 switch (size) {
1587 case 1: segv = put_user_u8(val, addr); break;
1588 case 2: segv = put_user_u16(val, addr); break;
1589 case 4: segv = put_user_u32(val, addr); break;
1590 #if defined(TARGET_PPC64)
1591 case 8: segv = put_user_u64(val, addr); break;
1592 case 16: {
1593 if (val2 == env->reserve_val2) {
1594 if (msr_le) {
1595 val2 = val;
1596 val = env->gpr[reg+1];
1597 } else {
1598 val2 = env->gpr[reg+1];
1599 }
1600 segv = put_user_u64(val, addr);
1601 if (!segv) {
1602 segv = put_user_u64(val2, addr + 8);
1603 }
1604 }
1605 break;
1606 }
1607 #endif
1608 default: abort();
1609 }
1610 if (!segv) {
1611 stored = 1;
1612 }
1613 }
1614 }
1615 env->crf[0] = (stored << 1) | xer_so;
1616 env->reserve_addr = (target_ulong)-1;
1617 }
1618 if (!segv) {
1619 env->nip += 4;
1620 }
1621 mmap_unlock();
1622 end_exclusive();
1623 return segv;
1624 }
1625
1626 void cpu_loop(CPUPPCState *env)
1627 {
1628 CPUState *cs = CPU(ppc_env_get_cpu(env));
1629 target_siginfo_t info;
1630 int trapnr;
1631 target_ulong ret;
1632
1633 for(;;) {
1634 cpu_exec_start(cs);
1635 trapnr = cpu_ppc_exec(cs);
1636 cpu_exec_end(cs);
1637 switch(trapnr) {
1638 case POWERPC_EXCP_NONE:
1639 /* Just go on */
1640 break;
1641 case POWERPC_EXCP_CRITICAL: /* Critical input */
1642 cpu_abort(cs, "Critical interrupt while in user mode. "
1643 "Aborting\n");
1644 break;
1645 case POWERPC_EXCP_MCHECK: /* Machine check exception */
1646 cpu_abort(cs, "Machine check exception while in user mode. "
1647 "Aborting\n");
1648 break;
1649 case POWERPC_EXCP_DSI: /* Data storage exception */
1650 EXCP_DUMP(env, "Invalid data memory access: 0x" TARGET_FMT_lx "\n",
1651 env->spr[SPR_DAR]);
1652 /* XXX: check this. Seems bugged */
1653 switch (env->error_code & 0xFF000000) {
1654 case 0x40000000:
1655 info.si_signo = TARGET_SIGSEGV;
1656 info.si_errno = 0;
1657 info.si_code = TARGET_SEGV_MAPERR;
1658 break;
1659 case 0x04000000:
1660 info.si_signo = TARGET_SIGILL;
1661 info.si_errno = 0;
1662 info.si_code = TARGET_ILL_ILLADR;
1663 break;
1664 case 0x08000000:
1665 info.si_signo = TARGET_SIGSEGV;
1666 info.si_errno = 0;
1667 info.si_code = TARGET_SEGV_ACCERR;
1668 break;
1669 default:
1670 /* Let's send a regular segfault... */
1671 EXCP_DUMP(env, "Invalid segfault errno (%02x)\n",
1672 env->error_code);
1673 info.si_signo = TARGET_SIGSEGV;
1674 info.si_errno = 0;
1675 info.si_code = TARGET_SEGV_MAPERR;
1676 break;
1677 }
1678 info._sifields._sigfault._addr = env->nip;
1679 queue_signal(env, info.si_signo, &info);
1680 break;
1681 case POWERPC_EXCP_ISI: /* Instruction storage exception */
1682 EXCP_DUMP(env, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1683 "\n", env->spr[SPR_SRR0]);
1684 /* XXX: check this */
1685 switch (env->error_code & 0xFF000000) {
1686 case 0x40000000:
1687 info.si_signo = TARGET_SIGSEGV;
1688 info.si_errno = 0;
1689 info.si_code = TARGET_SEGV_MAPERR;
1690 break;
1691 case 0x10000000:
1692 case 0x08000000:
1693 info.si_signo = TARGET_SIGSEGV;
1694 info.si_errno = 0;
1695 info.si_code = TARGET_SEGV_ACCERR;
1696 break;
1697 default:
1698 /* Let's send a regular segfault... */
1699 EXCP_DUMP(env, "Invalid segfault errno (%02x)\n",
1700 env->error_code);
1701 info.si_signo = TARGET_SIGSEGV;
1702 info.si_errno = 0;
1703 info.si_code = TARGET_SEGV_MAPERR;
1704 break;
1705 }
1706 info._sifields._sigfault._addr = env->nip - 4;
1707 queue_signal(env, info.si_signo, &info);
1708 break;
1709 case POWERPC_EXCP_EXTERNAL: /* External input */
1710 cpu_abort(cs, "External interrupt while in user mode. "
1711 "Aborting\n");
1712 break;
1713 case POWERPC_EXCP_ALIGN: /* Alignment exception */
1714 EXCP_DUMP(env, "Unaligned memory access\n");
1715 /* XXX: check this */
1716 info.si_signo = TARGET_SIGBUS;
1717 info.si_errno = 0;
1718 info.si_code = TARGET_BUS_ADRALN;
1719 info._sifields._sigfault._addr = env->nip;
1720 queue_signal(env, info.si_signo, &info);
1721 break;
1722 case POWERPC_EXCP_PROGRAM: /* Program exception */
1723 /* XXX: check this */
1724 switch (env->error_code & ~0xF) {
1725 case POWERPC_EXCP_FP:
1726 EXCP_DUMP(env, "Floating point program exception\n");
1727 info.si_signo = TARGET_SIGFPE;
1728 info.si_errno = 0;
1729 switch (env->error_code & 0xF) {
1730 case POWERPC_EXCP_FP_OX:
1731 info.si_code = TARGET_FPE_FLTOVF;
1732 break;
1733 case POWERPC_EXCP_FP_UX:
1734 info.si_code = TARGET_FPE_FLTUND;
1735 break;
1736 case POWERPC_EXCP_FP_ZX:
1737 case POWERPC_EXCP_FP_VXZDZ:
1738 info.si_code = TARGET_FPE_FLTDIV;
1739 break;
1740 case POWERPC_EXCP_FP_XX:
1741 info.si_code = TARGET_FPE_FLTRES;
1742 break;
1743 case POWERPC_EXCP_FP_VXSOFT:
1744 info.si_code = TARGET_FPE_FLTINV;
1745 break;
1746 case POWERPC_EXCP_FP_VXSNAN:
1747 case POWERPC_EXCP_FP_VXISI:
1748 case POWERPC_EXCP_FP_VXIDI:
1749 case POWERPC_EXCP_FP_VXIMZ:
1750 case POWERPC_EXCP_FP_VXVC:
1751 case POWERPC_EXCP_FP_VXSQRT:
1752 case POWERPC_EXCP_FP_VXCVI:
1753 info.si_code = TARGET_FPE_FLTSUB;
1754 break;
1755 default:
1756 EXCP_DUMP(env, "Unknown floating point exception (%02x)\n",
1757 env->error_code);
1758 break;
1759 }
1760 break;
1761 case POWERPC_EXCP_INVAL:
1762 EXCP_DUMP(env, "Invalid instruction\n");
1763 info.si_signo = TARGET_SIGILL;
1764 info.si_errno = 0;
1765 switch (env->error_code & 0xF) {
1766 case POWERPC_EXCP_INVAL_INVAL:
1767 info.si_code = TARGET_ILL_ILLOPC;
1768 break;
1769 case POWERPC_EXCP_INVAL_LSWX:
1770 info.si_code = TARGET_ILL_ILLOPN;
1771 break;
1772 case POWERPC_EXCP_INVAL_SPR:
1773 info.si_code = TARGET_ILL_PRVREG;
1774 break;
1775 case POWERPC_EXCP_INVAL_FP:
1776 info.si_code = TARGET_ILL_COPROC;
1777 break;
1778 default:
1779 EXCP_DUMP(env, "Unknown invalid operation (%02x)\n",
1780 env->error_code & 0xF);
1781 info.si_code = TARGET_ILL_ILLADR;
1782 break;
1783 }
1784 break;
1785 case POWERPC_EXCP_PRIV:
1786 EXCP_DUMP(env, "Privilege violation\n");
1787 info.si_signo = TARGET_SIGILL;
1788 info.si_errno = 0;
1789 switch (env->error_code & 0xF) {
1790 case POWERPC_EXCP_PRIV_OPC:
1791 info.si_code = TARGET_ILL_PRVOPC;
1792 break;
1793 case POWERPC_EXCP_PRIV_REG:
1794 info.si_code = TARGET_ILL_PRVREG;
1795 break;
1796 default:
1797 EXCP_DUMP(env, "Unknown privilege violation (%02x)\n",
1798 env->error_code & 0xF);
1799 info.si_code = TARGET_ILL_PRVOPC;
1800 break;
1801 }
1802 break;
1803 case POWERPC_EXCP_TRAP:
1804 cpu_abort(cs, "Tried to call a TRAP\n");
1805 break;
1806 default:
1807 /* Should not happen ! */
1808 cpu_abort(cs, "Unknown program exception (%02x)\n",
1809 env->error_code);
1810 break;
1811 }
1812 info._sifields._sigfault._addr = env->nip - 4;
1813 queue_signal(env, info.si_signo, &info);
1814 break;
1815 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
1816 EXCP_DUMP(env, "No floating point allowed\n");
1817 info.si_signo = TARGET_SIGILL;
1818 info.si_errno = 0;
1819 info.si_code = TARGET_ILL_COPROC;
1820 info._sifields._sigfault._addr = env->nip - 4;
1821 queue_signal(env, info.si_signo, &info);
1822 break;
1823 case POWERPC_EXCP_SYSCALL: /* System call exception */
1824 cpu_abort(cs, "Syscall exception while in user mode. "
1825 "Aborting\n");
1826 break;
1827 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
1828 EXCP_DUMP(env, "No APU instruction allowed\n");
1829 info.si_signo = TARGET_SIGILL;
1830 info.si_errno = 0;
1831 info.si_code = TARGET_ILL_COPROC;
1832 info._sifields._sigfault._addr = env->nip - 4;
1833 queue_signal(env, info.si_signo, &info);
1834 break;
1835 case POWERPC_EXCP_DECR: /* Decrementer exception */
1836 cpu_abort(cs, "Decrementer interrupt while in user mode. "
1837 "Aborting\n");
1838 break;
1839 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
1840 cpu_abort(cs, "Fix interval timer interrupt while in user mode. "
1841 "Aborting\n");
1842 break;
1843 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
1844 cpu_abort(cs, "Watchdog timer interrupt while in user mode. "
1845 "Aborting\n");
1846 break;
1847 case POWERPC_EXCP_DTLB: /* Data TLB error */
1848 cpu_abort(cs, "Data TLB exception while in user mode. "
1849 "Aborting\n");
1850 break;
1851 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
1852 cpu_abort(cs, "Instruction TLB exception while in user mode. "
1853 "Aborting\n");
1854 break;
1855 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */
1856 EXCP_DUMP(env, "No SPE/floating-point instruction allowed\n");
1857 info.si_signo = TARGET_SIGILL;
1858 info.si_errno = 0;
1859 info.si_code = TARGET_ILL_COPROC;
1860 info._sifields._sigfault._addr = env->nip - 4;
1861 queue_signal(env, info.si_signo, &info);
1862 break;
1863 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */
1864 cpu_abort(cs, "Embedded floating-point data IRQ not handled\n");
1865 break;
1866 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round IRQ */
1867 cpu_abort(cs, "Embedded floating-point round IRQ not handled\n");
1868 break;
1869 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor IRQ */
1870 cpu_abort(cs, "Performance monitor exception not handled\n");
1871 break;
1872 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
1873 cpu_abort(cs, "Doorbell interrupt while in user mode. "
1874 "Aborting\n");
1875 break;
1876 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
1877 cpu_abort(cs, "Doorbell critical interrupt while in user mode. "
1878 "Aborting\n");
1879 break;
1880 case POWERPC_EXCP_RESET: /* System reset exception */
1881 cpu_abort(cs, "Reset interrupt while in user mode. "
1882 "Aborting\n");
1883 break;
1884 case POWERPC_EXCP_DSEG: /* Data segment exception */
1885 cpu_abort(cs, "Data segment exception while in user mode. "
1886 "Aborting\n");
1887 break;
1888 case POWERPC_EXCP_ISEG: /* Instruction segment exception */
1889 cpu_abort(cs, "Instruction segment exception "
1890 "while in user mode. Aborting\n");
1891 break;
1892 /* PowerPC 64 with hypervisor mode support */
1893 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
1894 cpu_abort(cs, "Hypervisor decrementer interrupt "
1895 "while in user mode. Aborting\n");
1896 break;
1897 case POWERPC_EXCP_TRACE: /* Trace exception */
1898 /* Nothing to do:
1899 * we use this exception to emulate step-by-step execution mode.
1900 */
1901 break;
1902 /* PowerPC 64 with hypervisor mode support */
1903 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
1904 cpu_abort(cs, "Hypervisor data storage exception "
1905 "while in user mode. Aborting\n");
1906 break;
1907 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage excp */
1908 cpu_abort(cs, "Hypervisor instruction storage exception "
1909 "while in user mode. Aborting\n");
1910 break;
1911 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
1912 cpu_abort(cs, "Hypervisor data segment exception "
1913 "while in user mode. Aborting\n");
1914 break;
1915 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment excp */
1916 cpu_abort(cs, "Hypervisor instruction segment exception "
1917 "while in user mode. Aborting\n");
1918 break;
1919 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
1920 EXCP_DUMP(env, "No Altivec instructions allowed\n");
1921 info.si_signo = TARGET_SIGILL;
1922 info.si_errno = 0;
1923 info.si_code = TARGET_ILL_COPROC;
1924 info._sifields._sigfault._addr = env->nip - 4;
1925 queue_signal(env, info.si_signo, &info);
1926 break;
1927 case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */
1928 cpu_abort(cs, "Programmable interval timer interrupt "
1929 "while in user mode. Aborting\n");
1930 break;
1931 case POWERPC_EXCP_IO: /* IO error exception */
1932 cpu_abort(cs, "IO error exception while in user mode. "
1933 "Aborting\n");
1934 break;
1935 case POWERPC_EXCP_RUNM: /* Run mode exception */
1936 cpu_abort(cs, "Run mode exception while in user mode. "
1937 "Aborting\n");
1938 break;
1939 case POWERPC_EXCP_EMUL: /* Emulation trap exception */
1940 cpu_abort(cs, "Emulation trap exception not handled\n");
1941 break;
1942 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
1943 cpu_abort(cs, "Instruction fetch TLB exception "
1944 "while in user-mode. Aborting");
1945 break;
1946 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
1947 cpu_abort(cs, "Data load TLB exception while in user-mode. "
1948 "Aborting");
1949 break;
1950 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
1951 cpu_abort(cs, "Data store TLB exception while in user-mode. "
1952 "Aborting");
1953 break;
1954 case POWERPC_EXCP_FPA: /* Floating-point assist exception */
1955 cpu_abort(cs, "Floating-point assist exception not handled\n");
1956 break;
1957 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
1958 cpu_abort(cs, "Instruction address breakpoint exception "
1959 "not handled\n");
1960 break;
1961 case POWERPC_EXCP_SMI: /* System management interrupt */
1962 cpu_abort(cs, "System management interrupt while in user mode. "
1963 "Aborting\n");
1964 break;
1965 case POWERPC_EXCP_THERM: /* Thermal interrupt */
1966 cpu_abort(cs, "Thermal interrupt interrupt while in user mode. "
1967 "Aborting\n");
1968 break;
1969 case POWERPC_EXCP_PERFM: /* Embedded performance monitor IRQ */
1970 cpu_abort(cs, "Performance monitor exception not handled\n");
1971 break;
1972 case POWERPC_EXCP_VPUA: /* Vector assist exception */
1973 cpu_abort(cs, "Vector assist exception not handled\n");
1974 break;
1975 case POWERPC_EXCP_SOFTP: /* Soft patch exception */
1976 cpu_abort(cs, "Soft patch exception not handled\n");
1977 break;
1978 case POWERPC_EXCP_MAINT: /* Maintenance exception */
1979 cpu_abort(cs, "Maintenance exception while in user mode. "
1980 "Aborting\n");
1981 break;
1982 case POWERPC_EXCP_STOP: /* stop translation */
1983 /* We did invalidate the instruction cache. Go on */
1984 break;
1985 case POWERPC_EXCP_BRANCH: /* branch instruction: */
1986 /* We just stopped because of a branch. Go on */
1987 break;
1988 case POWERPC_EXCP_SYSCALL_USER:
1989 /* system call in user-mode emulation */
1990 /* WARNING:
1991 * PPC ABI uses overflow flag in cr0 to signal an error
1992 * in syscalls.
1993 */
1994 env->crf[0] &= ~0x1;
1995 ret = do_syscall(env, env->gpr[0], env->gpr[3], env->gpr[4],
1996 env->gpr[5], env->gpr[6], env->gpr[7],
1997 env->gpr[8], 0, 0);
1998 if (ret == -TARGET_ERESTARTSYS) {
1999 env->nip -= 4;
2000 break;
2001 }
2002 if (ret == (target_ulong)(-TARGET_QEMU_ESIGRETURN)) {
2003 /* Returning from a successful sigreturn syscall.
2004 Avoid corrupting register state. */
2005 break;
2006 }
2007 if (ret > (target_ulong)(-515)) {
2008 env->crf[0] |= 0x1;
2009 ret = -ret;
2010 }
2011 env->gpr[3] = ret;
2012 break;
2013 case POWERPC_EXCP_STCX:
2014 if (do_store_exclusive(env)) {
2015 info.si_signo = TARGET_SIGSEGV;
2016 info.si_errno = 0;
2017 info.si_code = TARGET_SEGV_MAPERR;
2018 info._sifields._sigfault._addr = env->nip;
2019 queue_signal(env, info.si_signo, &info);
2020 }
2021 break;
2022 case EXCP_DEBUG:
2023 {
2024 int sig;
2025
2026 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2027 if (sig) {
2028 info.si_signo = sig;
2029 info.si_errno = 0;
2030 info.si_code = TARGET_TRAP_BRKPT;
2031 queue_signal(env, info.si_signo, &info);
2032 }
2033 }
2034 break;
2035 case EXCP_INTERRUPT:
2036 /* just indicate that signals should be handled asap */
2037 break;
2038 default:
2039 cpu_abort(cs, "Unknown exception 0x%d. Aborting\n", trapnr);
2040 break;
2041 }
2042 process_pending_signals(env);
2043 }
2044 }
2045 #endif
2046
2047 #ifdef TARGET_MIPS
2048
2049 # ifdef TARGET_ABI_MIPSO32
2050 # define MIPS_SYS(name, args) args,
2051 static const uint8_t mips_syscall_args[] = {
2052 MIPS_SYS(sys_syscall , 8) /* 4000 */
2053 MIPS_SYS(sys_exit , 1)
2054 MIPS_SYS(sys_fork , 0)
2055 MIPS_SYS(sys_read , 3)
2056 MIPS_SYS(sys_write , 3)
2057 MIPS_SYS(sys_open , 3) /* 4005 */
2058 MIPS_SYS(sys_close , 1)
2059 MIPS_SYS(sys_waitpid , 3)
2060 MIPS_SYS(sys_creat , 2)
2061 MIPS_SYS(sys_link , 2)
2062 MIPS_SYS(sys_unlink , 1) /* 4010 */
2063 MIPS_SYS(sys_execve , 0)
2064 MIPS_SYS(sys_chdir , 1)
2065 MIPS_SYS(sys_time , 1)
2066 MIPS_SYS(sys_mknod , 3)
2067 MIPS_SYS(sys_chmod , 2) /* 4015 */
2068 MIPS_SYS(sys_lchown , 3)
2069 MIPS_SYS(sys_ni_syscall , 0)
2070 MIPS_SYS(sys_ni_syscall , 0) /* was sys_stat */
2071 MIPS_SYS(sys_lseek , 3)
2072 MIPS_SYS(sys_getpid , 0) /* 4020 */
2073 MIPS_SYS(sys_mount , 5)
2074 MIPS_SYS(sys_umount , 1)
2075 MIPS_SYS(sys_setuid , 1)
2076 MIPS_SYS(sys_getuid , 0)
2077 MIPS_SYS(sys_stime , 1) /* 4025 */
2078 MIPS_SYS(sys_ptrace , 4)
2079 MIPS_SYS(sys_alarm , 1)
2080 MIPS_SYS(sys_ni_syscall , 0) /* was sys_fstat */
2081 MIPS_SYS(sys_pause , 0)
2082 MIPS_SYS(sys_utime , 2) /* 4030 */
2083 MIPS_SYS(sys_ni_syscall , 0)
2084 MIPS_SYS(sys_ni_syscall , 0)
2085 MIPS_SYS(sys_access , 2)
2086 MIPS_SYS(sys_nice , 1)
2087 MIPS_SYS(sys_ni_syscall , 0) /* 4035 */
2088 MIPS_SYS(sys_sync , 0)
2089 MIPS_SYS(sys_kill , 2)
2090 MIPS_SYS(sys_rename , 2)
2091 MIPS_SYS(sys_mkdir , 2)
2092 MIPS_SYS(sys_rmdir , 1) /* 4040 */
2093 MIPS_SYS(sys_dup , 1)
2094 MIPS_SYS(sys_pipe , 0)
2095 MIPS_SYS(sys_times , 1)
2096 MIPS_SYS(sys_ni_syscall , 0)
2097 MIPS_SYS(sys_brk , 1) /* 4045 */
2098 MIPS_SYS(sys_setgid , 1)
2099 MIPS_SYS(sys_getgid , 0)
2100 MIPS_SYS(sys_ni_syscall , 0) /* was signal(2) */
2101 MIPS_SYS(sys_geteuid , 0)
2102 MIPS_SYS(sys_getegid , 0) /* 4050 */
2103 MIPS_SYS(sys_acct , 0)
2104 MIPS_SYS(sys_umount2 , 2)
2105 MIPS_SYS(sys_ni_syscall , 0)
2106 MIPS_SYS(sys_ioctl , 3)
2107 MIPS_SYS(sys_fcntl , 3) /* 4055 */
2108 MIPS_SYS(sys_ni_syscall , 2)
2109 MIPS_SYS(sys_setpgid , 2)
2110 MIPS_SYS(sys_ni_syscall , 0)
2111 MIPS_SYS(sys_olduname , 1)
2112 MIPS_SYS(sys_umask , 1) /* 4060 */
2113 MIPS_SYS(sys_chroot , 1)
2114 MIPS_SYS(sys_ustat , 2)
2115 MIPS_SYS(sys_dup2 , 2)
2116 MIPS_SYS(sys_getppid , 0)
2117 MIPS_SYS(sys_getpgrp , 0) /* 4065 */
2118 MIPS_SYS(sys_setsid , 0)
2119 MIPS_SYS(sys_sigaction , 3)
2120 MIPS_SYS(sys_sgetmask , 0)
2121 MIPS_SYS(sys_ssetmask , 1)
2122 MIPS_SYS(sys_setreuid , 2) /* 4070 */
2123 MIPS_SYS(sys_setregid , 2)
2124 MIPS_SYS(sys_sigsuspend , 0)
2125 MIPS_SYS(sys_sigpending , 1)
2126 MIPS_SYS(sys_sethostname , 2)
2127 MIPS_SYS(sys_setrlimit , 2) /* 4075 */
2128 MIPS_SYS(sys_getrlimit , 2)
2129 MIPS_SYS(sys_getrusage , 2)
2130 MIPS_SYS(sys_gettimeofday, 2)
2131 MIPS_SYS(sys_settimeofday, 2)
2132 MIPS_SYS(sys_getgroups , 2) /* 4080 */
2133 MIPS_SYS(sys_setgroups , 2)
2134 MIPS_SYS(sys_ni_syscall , 0) /* old_select */
2135 MIPS_SYS(sys_symlink , 2)
2136 MIPS_SYS(sys_ni_syscall , 0) /* was sys_lstat */
2137 MIPS_SYS(sys_readlink , 3) /* 4085 */
2138 MIPS_SYS(sys_uselib , 1)
2139 MIPS_SYS(sys_swapon , 2)
2140 MIPS_SYS(sys_reboot , 3)
2141 MIPS_SYS(old_readdir , 3)
2142 MIPS_SYS(old_mmap , 6) /* 4090 */
2143 MIPS_SYS(sys_munmap , 2)
2144 MIPS_SYS(sys_truncate , 2)
2145 MIPS_SYS(sys_ftruncate , 2)
2146 MIPS_SYS(sys_fchmod , 2)
2147 MIPS_SYS(sys_fchown , 3) /* 4095 */
2148 MIPS_SYS(sys_getpriority , 2)
2149 MIPS_SYS(sys_setpriority , 3)
2150 MIPS_SYS(sys_ni_syscall , 0)
2151 MIPS_SYS(sys_statfs , 2)
2152 MIPS_SYS(sys_fstatfs , 2) /* 4100 */
2153 MIPS_SYS(sys_ni_syscall , 0) /* was ioperm(2) */
2154 MIPS_SYS(sys_socketcall , 2)
2155 MIPS_SYS(sys_syslog , 3)
2156 MIPS_SYS(sys_setitimer , 3)
2157 MIPS_SYS(sys_getitimer , 2) /* 4105 */
2158 MIPS_SYS(sys_newstat , 2)
2159 MIPS_SYS(sys_newlstat , 2)
2160 MIPS_SYS(sys_newfstat , 2)
2161 MIPS_SYS(sys_uname , 1)
2162 MIPS_SYS(sys_ni_syscall , 0) /* 4110 was iopl(2) */
2163 MIPS_SYS(sys_vhangup , 0)
2164 MIPS_SYS(sys_ni_syscall , 0) /* was sys_idle() */
2165 MIPS_SYS(sys_ni_syscall , 0) /* was sys_vm86 */
2166 MIPS_SYS(sys_wait4 , 4)
2167 MIPS_SYS(sys_swapoff , 1) /* 4115 */
2168 MIPS_SYS(sys_sysinfo , 1)
2169 MIPS_SYS(sys_ipc , 6)
2170 MIPS_SYS(sys_fsync , 1)
2171 MIPS_SYS(sys_sigreturn , 0)
2172 MIPS_SYS(sys_clone , 6) /* 4120 */
2173 MIPS_SYS(sys_setdomainname, 2)
2174 MIPS_SYS(sys_newuname , 1)
2175 MIPS_SYS(sys_ni_syscall , 0) /* sys_modify_ldt */
2176 MIPS_SYS(sys_adjtimex , 1)
2177 MIPS_SYS(sys_mprotect , 3) /* 4125 */
2178 MIPS_SYS(sys_sigprocmask , 3)
2179 MIPS_SYS(sys_ni_syscall , 0) /* was create_module */
2180 MIPS_SYS(sys_init_module , 5)
2181 MIPS_SYS(sys_delete_module, 1)
2182 MIPS_SYS(sys_ni_syscall , 0) /* 4130 was get_kernel_syms */
2183 MIPS_SYS(sys_quotactl , 0)
2184 MIPS_SYS(sys_getpgid , 1)
2185 MIPS_SYS(sys_fchdir , 1)
2186 MIPS_SYS(sys_bdflush , 2)
2187 MIPS_SYS(sys_sysfs , 3) /* 4135 */
2188 MIPS_SYS(sys_personality , 1)
2189 MIPS_SYS(sys_ni_syscall , 0) /* for afs_syscall */
2190 MIPS_SYS(sys_setfsuid , 1)
2191 MIPS_SYS(sys_setfsgid , 1)
2192 MIPS_SYS(sys_llseek , 5) /* 4140 */
2193 MIPS_SYS(sys_getdents , 3)
2194 MIPS_SYS(sys_select , 5)
2195 MIPS_SYS(sys_flock , 2)
2196 MIPS_SYS(sys_msync , 3)
2197 MIPS_SYS(sys_readv , 3) /* 4145 */
2198 MIPS_SYS(sys_writev , 3)
2199 MIPS_SYS(sys_cacheflush , 3)
2200 MIPS_SYS(sys_cachectl , 3)
2201 MIPS_SYS(sys_sysmips , 4)
2202 MIPS_SYS(sys_ni_syscall , 0) /* 4150 */
2203 MIPS_SYS(sys_getsid , 1)
2204 MIPS_SYS(sys_fdatasync , 0)
2205 MIPS_SYS(sys_sysctl , 1)
2206 MIPS_SYS(sys_mlock , 2)
2207 MIPS_SYS(sys_munlock , 2) /* 4155 */
2208 MIPS_SYS(sys_mlockall , 1)
2209 MIPS_SYS(sys_munlockall , 0)
2210 MIPS_SYS(sys_sched_setparam, 2)
2211 MIPS_SYS(sys_sched_getparam, 2)
2212 MIPS_SYS(sys_sched_setscheduler, 3) /* 4160 */
2213 MIPS_SYS(sys_sched_getscheduler, 1)
2214 MIPS_SYS(sys_sched_yield , 0)
2215 MIPS_SYS(sys_sched_get_priority_max, 1)
2216 MIPS_SYS(sys_sched_get_priority_min, 1)
2217 MIPS_SYS(sys_sched_rr_get_interval, 2) /* 4165 */
2218 MIPS_SYS(sys_nanosleep, 2)
2219 MIPS_SYS(sys_mremap , 5)
2220 MIPS_SYS(sys_accept , 3)
2221 MIPS_SYS(sys_bind , 3)
2222 MIPS_SYS(sys_connect , 3) /* 4170 */
2223 MIPS_SYS(sys_getpeername , 3)
2224 MIPS_SYS(sys_getsockname , 3)
2225 MIPS_SYS(sys_getsockopt , 5)
2226 MIPS_SYS(sys_listen , 2)
2227 MIPS_SYS(sys_recv , 4) /* 4175 */
2228 MIPS_SYS(sys_recvfrom , 6)
2229 MIPS_SYS(sys_recvmsg , 3)
2230 MIPS_SYS(sys_send , 4)
2231 MIPS_SYS(sys_sendmsg , 3)
2232 MIPS_SYS(sys_sendto , 6) /* 4180 */
2233 MIPS_SYS(sys_setsockopt , 5)
2234 MIPS_SYS(sys_shutdown , 2)
2235 MIPS_SYS(sys_socket , 3)
2236 MIPS_SYS(sys_socketpair , 4)
2237 MIPS_SYS(sys_setresuid , 3) /* 4185 */
2238 MIPS_SYS(sys_getresuid , 3)
2239 MIPS_SYS(sys_ni_syscall , 0) /* was sys_query_module */
2240 MIPS_SYS(sys_poll , 3)
2241 MIPS_SYS(sys_nfsservctl , 3)
2242 MIPS_SYS(sys_setresgid , 3) /* 4190 */
2243 MIPS_SYS(sys_getresgid , 3)
2244 MIPS_SYS(sys_prctl , 5)
2245 MIPS_SYS(sys_rt_sigreturn, 0)
2246 MIPS_SYS(sys_rt_sigaction, 4)
2247 MIPS_SYS(sys_rt_sigprocmask, 4) /* 4195 */
2248 MIPS_SYS(sys_rt_sigpending, 2)
2249 MIPS_SYS(sys_rt_sigtimedwait, 4)
2250 MIPS_SYS(sys_rt_sigqueueinfo, 3)
2251 MIPS_SYS(sys_rt_sigsuspend, 0)
2252 MIPS_SYS(sys_pread64 , 6) /* 4200 */
2253 MIPS_SYS(sys_pwrite64 , 6)
2254 MIPS_SYS(sys_chown , 3)
2255 MIPS_SYS(sys_getcwd , 2)
2256 MIPS_SYS(sys_capget , 2)
2257 MIPS_SYS(sys_capset , 2) /* 4205 */
2258 MIPS_SYS(sys_sigaltstack , 2)
2259 MIPS_SYS(sys_sendfile , 4)
2260 MIPS_SYS(sys_ni_syscall , 0)
2261 MIPS_SYS(sys_ni_syscall , 0)
2262 MIPS_SYS(sys_mmap2 , 6) /* 4210 */
2263 MIPS_SYS(sys_truncate64 , 4)
2264 MIPS_SYS(sys_ftruncate64 , 4)
2265 MIPS_SYS(sys_stat64 , 2)
2266 MIPS_SYS(sys_lstat64 , 2)
2267 MIPS_SYS(sys_fstat64 , 2) /* 4215 */
2268 MIPS_SYS(sys_pivot_root , 2)
2269 MIPS_SYS(sys_mincore , 3)
2270 MIPS_SYS(sys_madvise , 3)
2271 MIPS_SYS(sys_getdents64 , 3)
2272 MIPS_SYS(sys_fcntl64 , 3) /* 4220 */
2273 MIPS_SYS(sys_ni_syscall , 0)
2274 MIPS_SYS(sys_gettid , 0)
2275 MIPS_SYS(sys_readahead , 5)
2276 MIPS_SYS(sys_setxattr , 5)
2277 MIPS_SYS(sys_lsetxattr , 5) /* 4225 */
2278 MIPS_SYS(sys_fsetxattr , 5)
2279 MIPS_SYS(sys_getxattr , 4)
2280 MIPS_SYS(sys_lgetxattr , 4)
2281 MIPS_SYS(sys_fgetxattr , 4)
2282 MIPS_SYS(sys_listxattr , 3) /* 4230 */
2283 MIPS_SYS(sys_llistxattr , 3)
2284 MIPS_SYS(sys_flistxattr , 3)
2285 MIPS_SYS(sys_removexattr , 2)
2286 MIPS_SYS(sys_lremovexattr, 2)
2287 MIPS_SYS(sys_fremovexattr, 2) /* 4235 */
2288 MIPS_SYS(sys_tkill , 2)
2289 MIPS_SYS(sys_sendfile64 , 5)
2290 MIPS_SYS(sys_futex , 6)
2291 MIPS_SYS(sys_sched_setaffinity, 3)
2292 MIPS_SYS(sys_sched_getaffinity, 3) /* 4240 */
2293 MIPS_SYS(sys_io_setup , 2)
2294 MIPS_SYS(sys_io_destroy , 1)
2295 MIPS_SYS(sys_io_getevents, 5)
2296 MIPS_SYS(sys_io_submit , 3)
2297 MIPS_SYS(sys_io_cancel , 3) /* 4245 */
2298 MIPS_SYS(sys_exit_group , 1)
2299 MIPS_SYS(sys_lookup_dcookie, 3)
2300 MIPS_SYS(sys_epoll_create, 1)
2301 MIPS_SYS(sys_epoll_ctl , 4)
2302 MIPS_SYS(sys_epoll_wait , 3) /* 4250 */
2303 MIPS_SYS(sys_remap_file_pages, 5)
2304 MIPS_SYS(sys_set_tid_address, 1)
2305 MIPS_SYS(sys_restart_syscall, 0)
2306 MIPS_SYS(sys_fadvise64_64, 7)
2307 MIPS_SYS(sys_statfs64 , 3) /* 4255 */
2308 MIPS_SYS(sys_fstatfs64 , 2)
2309 MIPS_SYS(sys_timer_create, 3)
2310 MIPS_SYS(sys_timer_settime, 4)
2311 MIPS_SYS(sys_timer_gettime, 2)
2312 MIPS_SYS(sys_timer_getoverrun, 1) /* 4260 */
2313 MIPS_SYS(sys_timer_delete, 1)
2314 MIPS_SYS(sys_clock_settime, 2)
2315 MIPS_SYS(sys_clock_gettime, 2)
2316 MIPS_SYS(sys_clock_getres, 2)
2317 MIPS_SYS(sys_clock_nanosleep, 4) /* 4265 */
2318 MIPS_SYS(sys_tgkill , 3)
2319 MIPS_SYS(sys_utimes , 2)
2320 MIPS_SYS(sys_mbind , 4)
2321 MIPS_SYS(sys_ni_syscall , 0) /* sys_get_mempolicy */
2322 MIPS_SYS(sys_ni_syscall , 0) /* 4270 sys_set_mempolicy */
2323 MIPS_SYS(sys_mq_open , 4)
2324 MIPS_SYS(sys_mq_unlink , 1)
2325 MIPS_SYS(sys_mq_timedsend, 5)
2326 MIPS_SYS(sys_mq_timedreceive, 5)
2327 MIPS_SYS(sys_mq_notify , 2) /* 4275 */
2328 MIPS_SYS(sys_mq_getsetattr, 3)
2329 MIPS_SYS(sys_ni_syscall , 0) /* sys_vserver */
2330 MIPS_SYS(sys_waitid , 4)
2331 MIPS_SYS(sys_ni_syscall , 0) /* available, was setaltroot */
2332 MIPS_SYS(sys_add_key , 5)
2333 MIPS_SYS(sys_request_key, 4)
2334 MIPS_SYS(sys_keyctl , 5)
2335 MIPS_SYS(sys_set_thread_area, 1)
2336 MIPS_SYS(sys_inotify_init, 0)
2337 MIPS_SYS(sys_inotify_add_watch, 3) /* 4285 */
2338 MIPS_SYS(sys_inotify_rm_watch, 2)
2339 MIPS_SYS(sys_migrate_pages, 4)
2340 MIPS_SYS(sys_openat, 4)
2341 MIPS_SYS(sys_mkdirat, 3)
2342 MIPS_SYS(sys_mknodat, 4) /* 4290 */
2343 MIPS_SYS(sys_fchownat, 5)
2344 MIPS_SYS(sys_futimesat, 3)
2345 MIPS_SYS(sys_fstatat64, 4)
2346 MIPS_SYS(sys_unlinkat, 3)
2347 MIPS_SYS(sys_renameat, 4) /* 4295 */
2348 MIPS_SYS(sys_linkat, 5)
2349 MIPS_SYS(sys_symlinkat, 3)
2350 MIPS_SYS(sys_readlinkat, 4)
2351 MIPS_SYS(sys_fchmodat, 3)
2352 MIPS_SYS(sys_faccessat, 3) /* 4300 */
2353 MIPS_SYS(sys_pselect6, 6)
2354 MIPS_SYS(sys_ppoll, 5)
2355 MIPS_SYS(sys_unshare, 1)
2356 MIPS_SYS(sys_splice, 6)
2357 MIPS_SYS(sys_sync_file_range, 7) /* 4305 */
2358 MIPS_SYS(sys_tee, 4)
2359 MIPS_SYS(sys_vmsplice, 4)
2360 MIPS_SYS(sys_move_pages, 6)
2361 MIPS_SYS(sys_set_robust_list, 2)
2362 MIPS_SYS(sys_get_robust_list, 3) /* 4310 */
2363 MIPS_SYS(sys_kexec_load, 4)
2364 MIPS_SYS(sys_getcpu, 3)
2365 MIPS_SYS(sys_epoll_pwait, 6)
2366 MIPS_SYS(sys_ioprio_set, 3)
2367 MIPS_SYS(sys_ioprio_get, 2)
2368 MIPS_SYS(sys_utimensat, 4)
2369 MIPS_SYS(sys_signalfd, 3)
2370 MIPS_SYS(sys_ni_syscall, 0) /* was timerfd */
2371 MIPS_SYS(sys_eventfd, 1)
2372 MIPS_SYS(sys_fallocate, 6) /* 4320 */
2373 MIPS_SYS(sys_timerfd_create, 2)
2374 MIPS_SYS(sys_timerfd_gettime, 2)
2375 MIPS_SYS(sys_timerfd_settime, 4)
2376 MIPS_SYS(sys_signalfd4, 4)
2377 MIPS_SYS(sys_eventfd2, 2) /* 4325 */
2378 MIPS_SYS(sys_epoll_create1, 1)
2379 MIPS_SYS(sys_dup3, 3)
2380 MIPS_SYS(sys_pipe2, 2)
2381 MIPS_SYS(sys_inotify_init1, 1)
2382 MIPS_SYS(sys_preadv, 6) /* 4330 */
2383 MIPS_SYS(sys_pwritev, 6)
2384 MIPS_SYS(sys_rt_tgsigqueueinfo, 4)
2385 MIPS_SYS(sys_perf_event_open, 5)
2386 MIPS_SYS(sys_accept4, 4)
2387 MIPS_SYS(sys_recvmmsg, 5) /* 4335 */
2388 MIPS_SYS(sys_fanotify_init, 2)
2389 MIPS_SYS(sys_fanotify_mark, 6)
2390 MIPS_SYS(sys_prlimit64, 4)
2391 MIPS_SYS(sys_name_to_handle_at, 5)
2392 MIPS_SYS(sys_open_by_handle_at, 3) /* 4340 */
2393 MIPS_SYS(sys_clock_adjtime, 2)
2394 MIPS_SYS(sys_syncfs, 1)
2395 };
2396 # undef MIPS_SYS
2397 # endif /* O32 */
2398
2399 static int do_store_exclusive(CPUMIPSState *env)
2400 {
2401 target_ulong addr;
2402 target_ulong page_addr;
2403 target_ulong val;
2404 int flags;
2405 int segv = 0;
2406 int reg;
2407 int d;
2408
2409 addr = env->lladdr;
2410 page_addr = addr & TARGET_PAGE_MASK;
2411 start_exclusive();
2412 mmap_lock();
2413 flags = page_get_flags(page_addr);
2414 if ((flags & PAGE_READ) == 0) {
2415 segv = 1;
2416 } else {
2417 reg = env->llreg & 0x1f;
2418 d = (env->llreg & 0x20) != 0;
2419 if (d) {
2420 segv = get_user_s64(val, addr);
2421 } else {
2422 segv = get_user_s32(val, addr);
2423 }
2424 if (!segv) {
2425 if (val != env->llval) {
2426 env->active_tc.gpr[reg] = 0;
2427 } else {
2428 if (d) {
2429 segv = put_user_u64(env->llnewval, addr);
2430 } else {
2431 segv = put_user_u32(env->llnewval, addr);
2432 }
2433 if (!segv) {
2434 env->active_tc.gpr[reg] = 1;
2435 }
2436 }
2437 }
2438 }
2439 env->lladdr = -1;
2440 if (!segv) {
2441 env->active_tc.PC += 4;
2442 }
2443 mmap_unlock();
2444 end_exclusive();
2445 return segv;
2446 }
2447
2448 /* Break codes */
2449 enum {
2450 BRK_OVERFLOW = 6,
2451 BRK_DIVZERO = 7
2452 };
2453
2454 static int do_break(CPUMIPSState *env, target_siginfo_t *info,
2455 unsigned int code)
2456 {
2457 int ret = -1;
2458
2459 switch (code) {
2460 case BRK_OVERFLOW:
2461 case BRK_DIVZERO:
2462 info->si_signo = TARGET_SIGFPE;
2463 info->si_errno = 0;
2464 info->si_code = (code == BRK_OVERFLOW) ? FPE_INTOVF : FPE_INTDIV;
2465 queue_signal(env, info->si_signo, &*info);
2466 ret = 0;
2467 break;
2468 default:
2469 info->si_signo = TARGET_SIGTRAP;
2470 info->si_errno = 0;
2471 queue_signal(env, info->si_signo, &*info);
2472 ret = 0;
2473 break;
2474 }
2475
2476 return ret;
2477 }
2478
2479 void cpu_loop(CPUMIPSState *env)
2480 {
2481 CPUState *cs = CPU(mips_env_get_cpu(env));
2482 target_siginfo_t info;
2483 int trapnr;
2484 abi_long ret;
2485 # ifdef TARGET_ABI_MIPSO32
2486 unsigned int syscall_num;
2487 # endif
2488
2489 for(;;) {
2490 cpu_exec_start(cs);
2491 trapnr = cpu_mips_exec(cs);
2492 cpu_exec_end(cs);
2493 switch(trapnr) {
2494 case EXCP_SYSCALL:
2495 env->active_tc.PC += 4;
2496 # ifdef TARGET_ABI_MIPSO32
2497 syscall_num = env->active_tc.gpr[2] - 4000;
2498 if (syscall_num >= sizeof(mips_syscall_args)) {
2499 ret = -TARGET_ENOSYS;
2500 } else {
2501 int nb_args;
2502 abi_ulong sp_reg;
2503 abi_ulong arg5 = 0, arg6 = 0, arg7 = 0, arg8 = 0;
2504
2505 nb_args = mips_syscall_args[syscall_num];
2506 sp_reg = env->active_tc.gpr[29];
2507 switch (nb_args) {
2508 /* these arguments are taken from the stack */
2509 case 8:
2510 if ((ret = get_user_ual(arg8, sp_reg + 28)) != 0) {
2511 goto done_syscall;
2512 }
2513 case 7:
2514 if ((ret = get_user_ual(arg7, sp_reg + 24)) != 0) {
2515 goto done_syscall;
2516 }
2517 case 6:
2518 if ((ret = get_user_ual(arg6, sp_reg + 20)) != 0) {
2519 goto done_syscall;
2520 }
2521 case 5:
2522 if ((ret = get_user_ual(arg5, sp_reg + 16)) != 0) {
2523 goto done_syscall;
2524 }
2525 default:
2526 break;
2527 }
2528 ret = do_syscall(env, env->active_tc.gpr[2],
2529 env->active_tc.gpr[4],
2530 env->active_tc.gpr[5],
2531 env->active_tc.gpr[6],
2532 env->active_tc.gpr[7],
2533 arg5, arg6, arg7, arg8);
2534 }
2535 done_syscall:
2536 # else
2537 ret = do_syscall(env, env->active_tc.gpr[2],
2538 env->active_tc.gpr[4], env->active_tc.gpr[5],
2539 env->active_tc.gpr[6], env->active_tc.gpr[7],
2540 env->active_tc.gpr[8], env->active_tc.gpr[9],
2541 env->active_tc.gpr[10], env->active_tc.gpr[11]);
2542 # endif /* O32 */
2543 if (ret == -TARGET_ERESTARTSYS) {
2544 env->active_tc.PC -= 4;
2545 break;
2546 }
2547 if (ret == -TARGET_QEMU_ESIGRETURN) {
2548 /* Returning from a successful sigreturn syscall.
2549 Avoid clobbering register state. */
2550 break;
2551 }
2552 if ((abi_ulong)ret >= (abi_ulong)-1133) {
2553 env->active_tc.gpr[7] = 1; /* error flag */
2554 ret = -ret;
2555 } else {
2556 env->active_tc.gpr[7] = 0; /* error flag */
2557 }
2558 env->active_tc.gpr[2] = ret;
2559 break;
2560 case EXCP_TLBL:
2561 case EXCP_TLBS:
2562 case EXCP_AdEL:
2563 case EXCP_AdES:
2564 info.si_signo = TARGET_SIGSEGV;
2565 info.si_errno = 0;
2566 /* XXX: check env->error_code */
2567 info.si_code = TARGET_SEGV_MAPERR;
2568 info._sifields._sigfault._addr = env->CP0_BadVAddr;
2569 queue_signal(env, info.si_signo, &info);
2570 break;
2571 case EXCP_CpU:
2572 case EXCP_RI:
2573 info.si_signo = TARGET_SIGILL;
2574 info.si_errno = 0;
2575 info.si_code = 0;
2576 queue_signal(env, info.si_signo, &info);
2577 break;
2578 case EXCP_INTERRUPT:
2579 /* just indicate that signals should be handled asap */
2580 break;
2581 case EXCP_DEBUG:
2582 {
2583 int sig;
2584
2585 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2586 if (sig)
2587 {
2588 info.si_signo = sig;
2589 info.si_errno = 0;
2590 info.si_code = TARGET_TRAP_BRKPT;
2591 queue_signal(env, info.si_signo, &info);
2592 }
2593 }
2594 break;
2595 case EXCP_SC:
2596 if (do_store_exclusive(env)) {
2597 info.si_signo = TARGET_SIGSEGV;
2598 info.si_errno = 0;
2599 info.si_code = TARGET_SEGV_MAPERR;
2600 info._sifields._sigfault._addr = env->active_tc.PC;
2601 queue_signal(env, info.si_signo, &info);
2602 }
2603 break;
2604 case EXCP_DSPDIS:
2605 info.si_signo = TARGET_SIGILL;
2606 info.si_errno = 0;
2607 info.si_code = TARGET_ILL_ILLOPC;
2608 queue_signal(env, info.si_signo, &info);
2609 break;
2610 /* The code below was inspired by the MIPS Linux kernel trap
2611 * handling code in arch/mips/kernel/traps.c.
2612 */
2613 case EXCP_BREAK:
2614 {
2615 abi_ulong trap_instr;
2616 unsigned int code;
2617
2618 if (env->hflags & MIPS_HFLAG_M16) {
2619 if (env->insn_flags & ASE_MICROMIPS) {
2620 /* microMIPS mode */
2621 ret = get_user_u16(trap_instr, env->active_tc.PC);
2622 if (ret != 0) {
2623 goto error;
2624 }
2625
2626 if ((trap_instr >> 10) == 0x11) {
2627 /* 16-bit instruction */
2628 code = trap_instr & 0xf;
2629 } else {
2630 /* 32-bit instruction */
2631 abi_ulong instr_lo;
2632
2633 ret = get_user_u16(instr_lo,
2634 env->active_tc.PC + 2);
2635 if (ret != 0) {
2636 goto error;
2637 }
2638 trap_instr = (trap_instr << 16) | instr_lo;
2639 code = ((trap_instr >> 6) & ((1 << 20) - 1));
2640 /* Unfortunately, microMIPS also suffers from
2641 the old assembler bug... */
2642 if (code >= (1 << 10)) {
2643 code >>= 10;
2644 }
2645 }
2646 } else {
2647 /* MIPS16e mode */
2648 ret = get_user_u16(trap_instr, env->active_tc.PC);
2649 if (ret != 0) {
2650 goto error;
2651 }
2652 code = (trap_instr >> 6) & 0x3f;
2653 }
2654 } else {
2655 ret = get_user_u32(trap_instr, env->active_tc.PC);
2656 if (ret != 0) {
2657 goto error;
2658 }
2659
2660 /* As described in the original Linux kernel code, the
2661 * below checks on 'code' are to work around an old
2662 * assembly bug.
2663 */
2664 code = ((trap_instr >> 6) & ((1 << 20) - 1));
2665 if (code >= (1 << 10)) {
2666 code >>= 10;
2667 }
2668 }
2669
2670 if (do_break(env, &info, code) != 0) {
2671 goto error;
2672 }
2673 }
2674 break;
2675 case EXCP_TRAP:
2676 {
2677 abi_ulong trap_instr;
2678 unsigned int code = 0;
2679
2680 if (env->hflags & MIPS_HFLAG_M16) {
2681 /* microMIPS mode */
2682 abi_ulong instr[2];
2683
2684 ret = get_user_u16(instr[0], env->active_tc.PC) ||
2685 get_user_u16(instr[1], env->active_tc.PC + 2);
2686
2687 trap_instr = (instr[0] << 16) | instr[1];
2688 } else {
2689 ret = get_user_u32(trap_instr, env->active_tc.PC);
2690 }
2691
2692 if (ret != 0) {
2693 goto error;
2694 }
2695
2696 /* The immediate versions don't provide a code. */
2697 if (!(trap_instr & 0xFC000000)) {
2698 if (env->hflags & MIPS_HFLAG_M16) {
2699 /* microMIPS mode */
2700 code = ((trap_instr >> 12) & ((1 << 4) - 1));
2701 } else {
2702 code = ((trap_instr >> 6) & ((1 << 10) - 1));
2703 }
2704 }
2705
2706 if (do_break(env, &info, code) != 0) {
2707 goto error;
2708 }
2709 }
2710 break;
2711 default:
2712 error:
2713 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
2714 abort();
2715 }
2716 process_pending_signals(env);
2717 }
2718 }
2719 #endif
2720
2721 #ifdef TARGET_OPENRISC
2722
2723 void cpu_loop(CPUOpenRISCState *env)
2724 {
2725 CPUState *cs = CPU(openrisc_env_get_cpu(env));
2726 int trapnr, gdbsig;
2727 abi_long ret;
2728
2729 for (;;) {
2730 cpu_exec_start(cs);
2731 trapnr = cpu_openrisc_exec(cs);
2732 cpu_exec_end(cs);
2733 gdbsig = 0;
2734
2735 switch (trapnr) {
2736 case EXCP_RESET:
2737 qemu_log_mask(CPU_LOG_INT, "\nReset request, exit, pc is %#x\n", env->pc);
2738 exit(EXIT_FAILURE);
2739 break;
2740 case EXCP_BUSERR:
2741 qemu_log_mask(CPU_LOG_INT, "\nBus error, exit, pc is %#x\n", env->pc);
2742 gdbsig = TARGET_SIGBUS;
2743 break;
2744 case EXCP_DPF:
2745 case EXCP_IPF:
2746 cpu_dump_state(cs, stderr, fprintf, 0);
2747 gdbsig = TARGET_SIGSEGV;
2748 break;
2749 case EXCP_TICK:
2750 qemu_log_mask(CPU_LOG_INT, "\nTick time interrupt pc is %#x\n", env->pc);
2751 break;
2752 case EXCP_ALIGN:
2753 qemu_log_mask(CPU_LOG_INT, "\nAlignment pc is %#x\n", env->pc);
2754 gdbsig = TARGET_SIGBUS;
2755 break;
2756 case EXCP_ILLEGAL:
2757 qemu_log_mask(CPU_LOG_INT, "\nIllegal instructionpc is %#x\n", env->pc);
2758 gdbsig = TARGET_SIGILL;
2759 break;
2760 case EXCP_INT:
2761 qemu_log_mask(CPU_LOG_INT, "\nExternal interruptpc is %#x\n", env->pc);
2762 break;
2763 case EXCP_DTLBMISS:
2764 case EXCP_ITLBMISS:
2765 qemu_log_mask(CPU_LOG_INT, "\nTLB miss\n");
2766 break;
2767 case EXCP_RANGE:
2768 qemu_log_mask(CPU_LOG_INT, "\nRange\n");
2769 gdbsig = TARGET_SIGSEGV;
2770 break;
2771 case EXCP_SYSCALL:
2772 env->pc += 4; /* 0xc00; */
2773 ret = do_syscall(env,
2774 env->gpr[11], /* return value */
2775 env->gpr[3], /* r3 - r7 are params */
2776 env->gpr[4],
2777 env->gpr[5],
2778 env->gpr[6],
2779 env->gpr[7],
2780 env->gpr[8], 0, 0);
2781 if (ret == -TARGET_ERESTARTSYS) {
2782 env->pc -= 4;
2783 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
2784 env->gpr[11] = ret;
2785 }
2786 break;
2787 case EXCP_FPE:
2788 qemu_log_mask(CPU_LOG_INT, "\nFloating point error\n");
2789 break;
2790 case EXCP_TRAP:
2791 qemu_log_mask(CPU_LOG_INT, "\nTrap\n");
2792 gdbsig = TARGET_SIGTRAP;
2793 break;
2794 case EXCP_NR:
2795 qemu_log_mask(CPU_LOG_INT, "\nNR\n");
2796 break;
2797 default:
2798 EXCP_DUMP(env, "\nqemu: unhandled CPU exception %#x - aborting\n",
2799 trapnr);
2800 gdbsig = TARGET_SIGILL;
2801 break;
2802 }
2803 if (gdbsig) {
2804 gdb_handlesig(cs, gdbsig);
2805 if (gdbsig != TARGET_SIGTRAP) {
2806 exit(EXIT_FAILURE);
2807 }
2808 }
2809
2810 process_pending_signals(env);
2811 }
2812 }
2813
2814 #endif /* TARGET_OPENRISC */
2815
2816 #ifdef TARGET_SH4
2817 void cpu_loop(CPUSH4State *env)
2818 {
2819 CPUState *cs = CPU(sh_env_get_cpu(env));
2820 int trapnr, ret;
2821 target_siginfo_t info;
2822
2823 while (1) {
2824 cpu_exec_start(cs);
2825 trapnr = cpu_sh4_exec(cs);
2826 cpu_exec_end(cs);
2827
2828 switch (trapnr) {
2829 case 0x160:
2830 env->pc += 2;
2831 ret = do_syscall(env,
2832 env->gregs[3],
2833 env->gregs[4],
2834 env->gregs[5],
2835 env->gregs[6],
2836 env->gregs[7],
2837 env->gregs[0],
2838 env->gregs[1],
2839 0, 0);
2840 if (ret == -TARGET_ERESTARTSYS) {
2841 env->pc -= 2;
2842 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
2843 env->gregs[0] = ret;
2844 }
2845 break;
2846 case EXCP_INTERRUPT:
2847 /* just indicate that signals should be handled asap */
2848 break;
2849 case EXCP_DEBUG:
2850 {
2851 int sig;
2852
2853 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2854 if (sig)
2855 {
2856 info.si_signo = sig;
2857 info.si_errno = 0;
2858 info.si_code = TARGET_TRAP_BRKPT;
2859 queue_signal(env, info.si_signo, &info);
2860 }
2861 }
2862 break;
2863 case 0xa0:
2864 case 0xc0:
2865 info.si_signo = TARGET_SIGSEGV;
2866 info.si_errno = 0;
2867 info.si_code = TARGET_SEGV_MAPERR;
2868 info._sifields._sigfault._addr = env->tea;
2869 queue_signal(env, info.si_signo, &info);
2870 break;
2871
2872 default:
2873 printf ("Unhandled trap: 0x%x\n", trapnr);
2874 cpu_dump_state(cs, stderr, fprintf, 0);
2875 exit(EXIT_FAILURE);
2876 }
2877 process_pending_signals (env);
2878 }
2879 }
2880 #endif
2881
2882 #ifdef TARGET_CRIS
2883 void cpu_loop(CPUCRISState *env)
2884 {
2885 CPUState *cs = CPU(cris_env_get_cpu(env));
2886 int trapnr, ret;
2887 target_siginfo_t info;
2888
2889 while (1) {
2890 cpu_exec_start(cs);
2891 trapnr = cpu_cris_exec(cs);
2892 cpu_exec_end(cs);
2893 switch (trapnr) {
2894 case 0xaa:
2895 {
2896 info.si_signo = TARGET_SIGSEGV;
2897 info.si_errno = 0;
2898 /* XXX: check env->error_code */
2899 info.si_code = TARGET_SEGV_MAPERR;
2900 info._sifields._sigfault._addr = env->pregs[PR_EDA];
2901 queue_signal(env, info.si_signo, &info);
2902 }
2903 break;
2904 case EXCP_INTERRUPT:
2905 /* just indicate that signals should be handled asap */
2906 break;
2907 case EXCP_BREAK:
2908 ret = do_syscall(env,
2909 env->regs[9],
2910 env->regs[10],
2911 env->regs[11],
2912 env->regs[12],
2913 env->regs[13],
2914 env->pregs[7],
2915 env->pregs[11],
2916 0, 0);
2917 if (ret == -TARGET_ERESTARTSYS) {
2918 env->pc -= 2;
2919 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
2920 env->regs[10] = ret;
2921 }
2922 break;
2923 case EXCP_DEBUG:
2924 {
2925 int sig;
2926
2927 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2928 if (sig)
2929 {
2930 info.si_signo = sig;
2931 info.si_errno = 0;
2932 info.si_code = TARGET_TRAP_BRKPT;
2933 queue_signal(env, info.si_signo, &info);
2934 }
2935 }
2936 break;
2937 default:
2938 printf ("Unhandled trap: 0x%x\n", trapnr);
2939 cpu_dump_state(cs, stderr, fprintf, 0);
2940 exit(EXIT_FAILURE);
2941 }
2942 process_pending_signals (env);
2943 }
2944 }
2945 #endif
2946
2947 #ifdef TARGET_MICROBLAZE
2948 void cpu_loop(CPUMBState *env)
2949 {
2950 CPUState *cs = CPU(mb_env_get_cpu(env));
2951 int trapnr, ret;
2952 target_siginfo_t info;
2953
2954 while (1) {
2955 cpu_exec_start(cs);
2956 trapnr = cpu_mb_exec(cs);
2957 cpu_exec_end(cs);
2958 switch (trapnr) {
2959 case 0xaa:
2960 {
2961 info.si_signo = TARGET_SIGSEGV;
2962 info.si_errno = 0;
2963 /* XXX: check env->error_code */
2964 info.si_code = TARGET_SEGV_MAPERR;
2965 info._sifields._sigfault._addr = 0;
2966 queue_signal(env, info.si_signo, &info);
2967 }
2968 break;
2969 case EXCP_INTERRUPT:
2970 /* just indicate that signals should be handled asap */
2971 break;
2972 case EXCP_BREAK:
2973 /* Return address is 4 bytes after the call. */
2974 env->regs[14] += 4;
2975 env->sregs[SR_PC] = env->regs[14];
2976 ret = do_syscall(env,
2977 env->regs[12],
2978 env->regs[5],
2979 env->regs[6],
2980 env->regs[7],
2981 env->regs[8],
2982 env->regs[9],
2983 env->regs[10],
2984 0, 0);
2985 if (ret == -TARGET_ERESTARTSYS) {
2986 /* Wind back to before the syscall. */
2987 env->sregs[SR_PC] -= 4;
2988 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
2989 env->regs[3] = ret;
2990 }
2991 /* All syscall exits result in guest r14 being equal to the
2992 * PC we return to, because the kernel syscall exit "rtbd" does
2993 * this. (This is true even for sigreturn(); note that r14 is
2994 * not a userspace-usable register, as the kernel may clobber it
2995 * at any point.)
2996 */
2997 env->regs[14] = env->sregs[SR_PC];
2998 break;
2999 case EXCP_HW_EXCP:
3000 env->regs[17] = env->sregs[SR_PC] + 4;
3001 if (env->iflags & D_FLAG) {
3002 env->sregs[SR_ESR] |= 1 << 12;
3003 env->sregs[SR_PC] -= 4;
3004 /* FIXME: if branch was immed, replay the imm as well. */
3005 }
3006
3007 env->iflags &= ~(IMM_FLAG | D_FLAG);
3008
3009 switch (env->sregs[SR_ESR] & 31) {
3010 case ESR_EC_DIVZERO:
3011 info.si_signo = TARGET_SIGFPE;
3012 info.si_errno = 0;
3013 info.si_code = TARGET_FPE_FLTDIV;
3014 info._sifields._sigfault._addr = 0;
3015 queue_signal(env, info.si_signo, &info);
3016 break;
3017 case ESR_EC_FPU:
3018 info.si_signo = TARGET_SIGFPE;
3019 info.si_errno = 0;
3020 if (env->sregs[SR_FSR] & FSR_IO) {
3021 info.si_code = TARGET_FPE_FLTINV;
3022 }
3023 if (env->sregs[SR_FSR] & FSR_DZ) {
3024 info.si_code = TARGET_FPE_FLTDIV;
3025 }
3026 info._sifields._sigfault._addr = 0;
3027 queue_signal(env, info.si_signo, &info);
3028 break;
3029 default:
3030 printf ("Unhandled hw-exception: 0x%x\n",
3031 env->sregs[SR_ESR] & ESR_EC_MASK);
3032 cpu_dump_state(cs, stderr, fprintf, 0);
3033 exit(EXIT_FAILURE);
3034 break;
3035 }
3036 break;
3037 case EXCP_DEBUG:
3038 {
3039 int sig;
3040
3041 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
3042 if (sig)
3043 {
3044 info.si_signo = sig;
3045 info.si_errno = 0;
3046 info.si_code = TARGET_TRAP_BRKPT;
3047 queue_signal(env, info.si_signo, &info);
3048 }
3049 }
3050 break;
3051 default:
3052 printf ("Unhandled trap: 0x%x\n", trapnr);
3053 cpu_dump_state(cs, stderr, fprintf, 0);
3054 exit(EXIT_FAILURE);
3055 }
3056 process_pending_signals (env);
3057 }
3058 }
3059 #endif
3060
3061 #ifdef TARGET_M68K
3062
3063 void cpu_loop(CPUM68KState *env)
3064 {
3065 CPUState *cs = CPU(m68k_env_get_cpu(env));
3066 int trapnr;
3067 unsigned int n;
3068 target_siginfo_t info;
3069 TaskState *ts = cs->opaque;
3070
3071 for(;;) {
3072 cpu_exec_start(cs);
3073 trapnr = cpu_m68k_exec(cs);
3074 cpu_exec_end(cs);
3075 switch(trapnr) {
3076 case EXCP_ILLEGAL:
3077 {
3078 if (ts->sim_syscalls) {
3079 uint16_t nr;
3080 get_user_u16(nr, env->pc + 2);
3081 env->pc += 4;
3082 do_m68k_simcall(env, nr);
3083 } else {
3084 goto do_sigill;
3085 }
3086 }
3087 break;
3088 case EXCP_HALT_INSN:
3089 /* Semihosing syscall. */
3090 env->pc += 4;
3091 do_m68k_semihosting(env, env->dregs[0]);
3092 break;
3093 case EXCP_LINEA:
3094 case EXCP_LINEF:
3095 case EXCP_UNSUPPORTED:
3096 do_sigill:
3097 info.si_signo = TARGET_SIGILL;
3098 info.si_errno = 0;
3099 info.si_code = TARGET_ILL_ILLOPN;
3100 info._sifields._sigfault._addr = env->pc;
3101 queue_signal(env, info.si_signo, &info);
3102 break;
3103 case EXCP_TRAP0:
3104 {
3105 abi_long ret;
3106 ts->sim_syscalls = 0;
3107 n = env->dregs[0];
3108 env->pc += 2;
3109 ret = do_syscall(env,
3110 n,
3111 env->dregs[1],
3112 env->dregs[2],
3113 env->dregs[3],
3114 env->dregs[4],
3115 env->dregs[5],
3116 env->aregs[0],
3117 0, 0);
3118 if (ret == -TARGET_ERESTARTSYS) {
3119 env->pc -= 2;
3120 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
3121 env->dregs[0] = ret;
3122 }
3123 }
3124 break;
3125 case EXCP_INTERRUPT:
3126 /* just indicate that signals should be handled asap */
3127 break;
3128 case EXCP_ACCESS:
3129 {
3130 info.si_signo = TARGET_SIGSEGV;
3131 info.si_errno = 0;
3132 /* XXX: check env->error_code */
3133 info.si_code = TARGET_SEGV_MAPERR;
3134 info._sifields._sigfault._addr = env->mmu.ar;
3135 queue_signal(env, info.si_signo, &info);
3136 }
3137 break;
3138 case EXCP_DEBUG:
3139 {
3140 int sig;
3141
3142 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
3143 if (sig)
3144 {
3145 info.si_signo = sig;
3146 info.si_errno = 0;
3147 info.si_code = TARGET_TRAP_BRKPT;
3148 queue_signal(env, info.si_signo, &info);
3149 }
3150 }
3151 break;
3152 default:
3153 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
3154 abort();
3155 }
3156 process_pending_signals(env);
3157 }
3158 }
3159 #endif /* TARGET_M68K */
3160
3161 #ifdef TARGET_ALPHA
3162 static void do_store_exclusive(CPUAlphaState *env, int reg, int quad)
3163 {
3164 target_ulong addr, val, tmp;
3165 target_siginfo_t info;
3166 int ret = 0;
3167
3168 addr = env->lock_addr;
3169 tmp = env->lock_st_addr;
3170 env->lock_addr = -1;
3171 env->lock_st_addr = 0;
3172
3173 start_exclusive();
3174 mmap_lock();
3175
3176 if (addr == tmp) {
3177 if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) {
3178 goto do_sigsegv;
3179 }
3180
3181 if (val == env->lock_value) {
3182 tmp = env->ir[reg];
3183 if (quad ? put_user_u64(tmp, addr) : put_user_u32(tmp, addr)) {
3184 goto do_sigsegv;
3185 }
3186 ret = 1;
3187 }
3188 }
3189 env->ir[reg] = ret;
3190 env->pc += 4;
3191
3192 mmap_unlock();
3193 end_exclusive();
3194 return;
3195
3196 do_sigsegv:
3197 mmap_unlock();
3198 end_exclusive();
3199
3200 info.si_signo = TARGET_SIGSEGV;
3201 info.si_errno = 0;
3202 info.si_code = TARGET_SEGV_MAPERR;
3203 info._sifields._sigfault._addr = addr;
3204 queue_signal(env, TARGET_SIGSEGV, &info);
3205 }
3206
3207 void cpu_loop(CPUAlphaState *env)
3208 {
3209 CPUState *cs = CPU(alpha_env_get_cpu(env));
3210 int trapnr;
3211 target_siginfo_t info;
3212 abi_long sysret;
3213
3214 while (1) {
3215 cpu_exec_start(cs);
3216 trapnr = cpu_alpha_exec(cs);
3217 cpu_exec_end(cs);
3218
3219