4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu-version.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
25 #include "qemu/path.h"
26 #include "qemu/cutils.h"
27 #include "qemu/help_option.h"
29 #include "exec/exec-all.h"
31 #include "qemu/timer.h"
32 #include "qemu/envlist.h"
39 static const char *filename
;
40 static const char *argv0
;
41 static int gdbstub_port
;
42 static envlist_t
*envlist
;
43 static const char *cpu_model
;
44 unsigned long mmap_min_addr
;
45 unsigned long guest_base
;
48 #define EXCP_DUMP(env, fmt, ...) \
50 CPUState *cs = ENV_GET_CPU(env); \
51 fprintf(stderr, fmt , ## __VA_ARGS__); \
52 cpu_dump_state(cs, stderr, fprintf, 0); \
53 if (qemu_log_separate()) { \
54 qemu_log(fmt, ## __VA_ARGS__); \
55 log_cpu_state(cs, 0); \
59 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
61 * When running 32-on-64 we should make sure we can fit all of the possible
62 * guest address space into a contiguous chunk of virtual host memory.
64 * This way we will never overlap with our own libraries or binaries or stack
65 * or anything else that QEMU maps.
68 /* MIPS only supports 31 bits of virtual address space for user space */
69 unsigned long reserved_va
= 0x77000000;
71 unsigned long reserved_va
= 0xf7000000;
74 unsigned long reserved_va
;
77 static void usage(int exitcode
);
79 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
80 const char *qemu_uname_release
;
82 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
83 we allocate a bigger stack. Need a better solution, for example
84 by remapping the process stack directly at the right place */
85 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
87 void gemu_log(const char *fmt
, ...)
92 vfprintf(stderr
, fmt
, ap
);
96 #if defined(TARGET_I386)
97 int cpu_get_pic_interrupt(CPUX86State
*env
)
103 /***********************************************************/
104 /* Helper routines for implementing atomic operations. */
106 /* To implement exclusive operations we force all cpus to syncronise.
107 We don't require a full sync, only that no cpus are executing guest code.
108 The alternative is to map target atomic ops onto host equivalents,
109 which requires quite a lot of per host/target work. */
110 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
111 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
112 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
113 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
114 static int pending_cpus
;
116 /* Make sure everything is in a consistent state for calling fork(). */
117 void fork_start(void)
119 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
120 pthread_mutex_lock(&exclusive_lock
);
124 void fork_end(int child
)
126 mmap_fork_end(child
);
128 CPUState
*cpu
, *next_cpu
;
129 /* Child processes created by fork() only have a single thread.
130 Discard information about the parent threads. */
131 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
132 if (cpu
!= thread_cpu
) {
133 QTAILQ_REMOVE(&cpus
, cpu
, node
);
137 pthread_mutex_init(&exclusive_lock
, NULL
);
138 pthread_mutex_init(&cpu_list_mutex
, NULL
);
139 pthread_cond_init(&exclusive_cond
, NULL
);
140 pthread_cond_init(&exclusive_resume
, NULL
);
141 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
142 gdbserver_fork(thread_cpu
);
144 pthread_mutex_unlock(&exclusive_lock
);
145 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
149 /* Wait for pending exclusive operations to complete. The exclusive lock
151 static inline void exclusive_idle(void)
153 while (pending_cpus
) {
154 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
158 /* Start an exclusive operation.
159 Must only be called from outside cpu_arm_exec. */
160 static inline void start_exclusive(void)
164 pthread_mutex_lock(&exclusive_lock
);
168 /* Make all other cpus stop executing. */
169 CPU_FOREACH(other_cpu
) {
170 if (other_cpu
->running
) {
175 if (pending_cpus
> 1) {
176 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
180 /* Finish an exclusive operation. */
181 static inline void __attribute__((unused
)) end_exclusive(void)
184 pthread_cond_broadcast(&exclusive_resume
);
185 pthread_mutex_unlock(&exclusive_lock
);
188 /* Wait for exclusive ops to finish, and begin cpu execution. */
189 static inline void cpu_exec_start(CPUState
*cpu
)
191 pthread_mutex_lock(&exclusive_lock
);
194 pthread_mutex_unlock(&exclusive_lock
);
197 /* Mark cpu as not executing, and release pending exclusive ops. */
198 static inline void cpu_exec_end(CPUState
*cpu
)
200 pthread_mutex_lock(&exclusive_lock
);
201 cpu
->running
= false;
202 if (pending_cpus
> 1) {
204 if (pending_cpus
== 1) {
205 pthread_cond_signal(&exclusive_cond
);
209 pthread_mutex_unlock(&exclusive_lock
);
212 void cpu_list_lock(void)
214 pthread_mutex_lock(&cpu_list_mutex
);
217 void cpu_list_unlock(void)
219 pthread_mutex_unlock(&cpu_list_mutex
);
224 /***********************************************************/
225 /* CPUX86 core interface */
227 uint64_t cpu_get_tsc(CPUX86State
*env
)
229 return cpu_get_host_ticks();
232 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
237 e1
= (addr
<< 16) | (limit
& 0xffff);
238 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
245 static uint64_t *idt_table
;
247 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
248 uint64_t addr
, unsigned int sel
)
251 e1
= (addr
& 0xffff) | (sel
<< 16);
252 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
256 p
[2] = tswap32(addr
>> 32);
259 /* only dpl matters as we do only user space emulation */
260 static void set_idt(int n
, unsigned int dpl
)
262 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
265 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
266 uint32_t addr
, unsigned int sel
)
269 e1
= (addr
& 0xffff) | (sel
<< 16);
270 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
276 /* only dpl matters as we do only user space emulation */
277 static void set_idt(int n
, unsigned int dpl
)
279 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
283 void cpu_loop(CPUX86State
*env
)
285 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
289 target_siginfo_t info
;
293 trapnr
= cpu_x86_exec(cs
);
297 /* linux syscall from int $0x80 */
298 ret
= do_syscall(env
,
307 if (ret
== -TARGET_ERESTARTSYS
) {
309 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
310 env
->regs
[R_EAX
] = ret
;
315 /* linux syscall from syscall instruction */
316 ret
= do_syscall(env
,
325 if (ret
== -TARGET_ERESTARTSYS
) {
327 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
328 env
->regs
[R_EAX
] = ret
;
334 info
.si_signo
= TARGET_SIGBUS
;
336 info
.si_code
= TARGET_SI_KERNEL
;
337 info
._sifields
._sigfault
._addr
= 0;
338 queue_signal(env
, info
.si_signo
, &info
);
341 /* XXX: potential problem if ABI32 */
342 #ifndef TARGET_X86_64
343 if (env
->eflags
& VM_MASK
) {
344 handle_vm86_fault(env
);
348 info
.si_signo
= TARGET_SIGSEGV
;
350 info
.si_code
= TARGET_SI_KERNEL
;
351 info
._sifields
._sigfault
._addr
= 0;
352 queue_signal(env
, info
.si_signo
, &info
);
356 info
.si_signo
= TARGET_SIGSEGV
;
358 if (!(env
->error_code
& 1))
359 info
.si_code
= TARGET_SEGV_MAPERR
;
361 info
.si_code
= TARGET_SEGV_ACCERR
;
362 info
._sifields
._sigfault
._addr
= env
->cr
[2];
363 queue_signal(env
, info
.si_signo
, &info
);
366 #ifndef TARGET_X86_64
367 if (env
->eflags
& VM_MASK
) {
368 handle_vm86_trap(env
, trapnr
);
372 /* division by zero */
373 info
.si_signo
= TARGET_SIGFPE
;
375 info
.si_code
= TARGET_FPE_INTDIV
;
376 info
._sifields
._sigfault
._addr
= env
->eip
;
377 queue_signal(env
, info
.si_signo
, &info
);
382 #ifndef TARGET_X86_64
383 if (env
->eflags
& VM_MASK
) {
384 handle_vm86_trap(env
, trapnr
);
388 info
.si_signo
= TARGET_SIGTRAP
;
390 if (trapnr
== EXCP01_DB
) {
391 info
.si_code
= TARGET_TRAP_BRKPT
;
392 info
._sifields
._sigfault
._addr
= env
->eip
;
394 info
.si_code
= TARGET_SI_KERNEL
;
395 info
._sifields
._sigfault
._addr
= 0;
397 queue_signal(env
, info
.si_signo
, &info
);
402 #ifndef TARGET_X86_64
403 if (env
->eflags
& VM_MASK
) {
404 handle_vm86_trap(env
, trapnr
);
408 info
.si_signo
= TARGET_SIGSEGV
;
410 info
.si_code
= TARGET_SI_KERNEL
;
411 info
._sifields
._sigfault
._addr
= 0;
412 queue_signal(env
, info
.si_signo
, &info
);
416 info
.si_signo
= TARGET_SIGILL
;
418 info
.si_code
= TARGET_ILL_ILLOPN
;
419 info
._sifields
._sigfault
._addr
= env
->eip
;
420 queue_signal(env
, info
.si_signo
, &info
);
423 /* just indicate that signals should be handled asap */
429 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
434 info
.si_code
= TARGET_TRAP_BRKPT
;
435 queue_signal(env
, info
.si_signo
, &info
);
440 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
441 EXCP_DUMP(env
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
445 process_pending_signals(env
);
452 #define get_user_code_u32(x, gaddr, env) \
453 ({ abi_long __r = get_user_u32((x), (gaddr)); \
454 if (!__r && bswap_code(arm_sctlr_b(env))) { \
460 #define get_user_code_u16(x, gaddr, env) \
461 ({ abi_long __r = get_user_u16((x), (gaddr)); \
462 if (!__r && bswap_code(arm_sctlr_b(env))) { \
468 #define get_user_data_u32(x, gaddr, env) \
469 ({ abi_long __r = get_user_u32((x), (gaddr)); \
470 if (!__r && arm_cpu_bswap_data(env)) { \
476 #define get_user_data_u16(x, gaddr, env) \
477 ({ abi_long __r = get_user_u16((x), (gaddr)); \
478 if (!__r && arm_cpu_bswap_data(env)) { \
484 #define put_user_data_u32(x, gaddr, env) \
485 ({ typeof(x) __x = (x); \
486 if (arm_cpu_bswap_data(env)) { \
487 __x = bswap32(__x); \
489 put_user_u32(__x, (gaddr)); \
492 #define put_user_data_u16(x, gaddr, env) \
493 ({ typeof(x) __x = (x); \
494 if (arm_cpu_bswap_data(env)) { \
495 __x = bswap16(__x); \
497 put_user_u16(__x, (gaddr)); \
501 /* Commpage handling -- there is no commpage for AArch64 */
504 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
506 * r0 = pointer to oldval
507 * r1 = pointer to newval
508 * r2 = pointer to target value
511 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
512 * C set if *ptr was changed, clear if no exchange happened
514 * Note segv's in kernel helpers are a bit tricky, we can set the
515 * data address sensibly but the PC address is just the entry point.
517 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
519 uint64_t oldval
, newval
, val
;
521 target_siginfo_t info
;
523 /* Based on the 32 bit code in do_kernel_trap */
525 /* XXX: This only works between threads, not between processes.
526 It's probably possible to implement this with native host
527 operations. However things like ldrex/strex are much harder so
528 there's not much point trying. */
530 cpsr
= cpsr_read(env
);
533 if (get_user_u64(oldval
, env
->regs
[0])) {
534 env
->exception
.vaddress
= env
->regs
[0];
538 if (get_user_u64(newval
, env
->regs
[1])) {
539 env
->exception
.vaddress
= env
->regs
[1];
543 if (get_user_u64(val
, addr
)) {
544 env
->exception
.vaddress
= addr
;
551 if (put_user_u64(val
, addr
)) {
552 env
->exception
.vaddress
= addr
;
562 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
568 /* We get the PC of the entry address - which is as good as anything,
569 on a real kernel what you get depends on which mode it uses. */
570 info
.si_signo
= TARGET_SIGSEGV
;
572 /* XXX: check env->error_code */
573 info
.si_code
= TARGET_SEGV_MAPERR
;
574 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
575 queue_signal(env
, info
.si_signo
, &info
);
578 /* Handle a jump to the kernel code page. */
580 do_kernel_trap(CPUARMState
*env
)
586 switch (env
->regs
[15]) {
587 case 0xffff0fa0: /* __kernel_memory_barrier */
588 /* ??? No-op. Will need to do better for SMP. */
590 case 0xffff0fc0: /* __kernel_cmpxchg */
591 /* XXX: This only works between threads, not between processes.
592 It's probably possible to implement this with native host
593 operations. However things like ldrex/strex are much harder so
594 there's not much point trying. */
596 cpsr
= cpsr_read(env
);
598 /* FIXME: This should SEGV if the access fails. */
599 if (get_user_u32(val
, addr
))
601 if (val
== env
->regs
[0]) {
603 /* FIXME: Check for segfaults. */
604 put_user_u32(val
, addr
);
611 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
614 case 0xffff0fe0: /* __kernel_get_tls */
615 env
->regs
[0] = cpu_get_tls(env
);
617 case 0xffff0f60: /* __kernel_cmpxchg64 */
618 arm_kernel_cmpxchg64_helper(env
);
624 /* Jump back to the caller. */
625 addr
= env
->regs
[14];
630 env
->regs
[15] = addr
;
635 /* Store exclusive handling for AArch32 */
636 static int do_strex(CPUARMState
*env
)
644 if (env
->exclusive_addr
!= env
->exclusive_test
) {
647 /* We know we're always AArch32 so the address is in uint32_t range
648 * unless it was the -1 exclusive-monitor-lost value (which won't
649 * match exclusive_test above).
651 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
652 addr
= env
->exclusive_addr
;
653 size
= env
->exclusive_info
& 0xf;
656 segv
= get_user_u8(val
, addr
);
659 segv
= get_user_data_u16(val
, addr
, env
);
663 segv
= get_user_data_u32(val
, addr
, env
);
669 env
->exception
.vaddress
= addr
;
674 segv
= get_user_data_u32(valhi
, addr
+ 4, env
);
676 env
->exception
.vaddress
= addr
+ 4;
679 if (arm_cpu_bswap_data(env
)) {
680 val
= deposit64((uint64_t)valhi
, 32, 32, val
);
682 val
= deposit64(val
, 32, 32, valhi
);
685 if (val
!= env
->exclusive_val
) {
689 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
692 segv
= put_user_u8(val
, addr
);
695 segv
= put_user_data_u16(val
, addr
, env
);
699 segv
= put_user_data_u32(val
, addr
, env
);
703 env
->exception
.vaddress
= addr
;
707 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
708 segv
= put_user_data_u32(val
, addr
+ 4, env
);
710 env
->exception
.vaddress
= addr
+ 4;
717 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
723 void cpu_loop(CPUARMState
*env
)
725 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
727 unsigned int n
, insn
;
728 target_siginfo_t info
;
734 trapnr
= cpu_arm_exec(cs
);
739 TaskState
*ts
= cs
->opaque
;
743 /* we handle the FPU emulation here, as Linux */
744 /* we get the opcode */
745 /* FIXME - what to do if get_user() fails? */
746 get_user_code_u32(opcode
, env
->regs
[15], env
);
748 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
749 if (rc
== 0) { /* illegal instruction */
750 info
.si_signo
= TARGET_SIGILL
;
752 info
.si_code
= TARGET_ILL_ILLOPN
;
753 info
._sifields
._sigfault
._addr
= env
->regs
[15];
754 queue_signal(env
, info
.si_signo
, &info
);
755 } else if (rc
< 0) { /* FP exception */
758 /* translate softfloat flags to FPSR flags */
759 if (-rc
& float_flag_invalid
)
761 if (-rc
& float_flag_divbyzero
)
763 if (-rc
& float_flag_overflow
)
765 if (-rc
& float_flag_underflow
)
767 if (-rc
& float_flag_inexact
)
770 FPSR fpsr
= ts
->fpa
.fpsr
;
771 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
773 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
774 info
.si_signo
= TARGET_SIGFPE
;
777 /* ordered by priority, least first */
778 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
779 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
780 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
781 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
782 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
784 info
._sifields
._sigfault
._addr
= env
->regs
[15];
785 queue_signal(env
, info
.si_signo
, &info
);
790 /* accumulate unenabled exceptions */
791 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
793 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
795 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
797 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
799 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
802 } else { /* everything OK */
813 if (trapnr
== EXCP_BKPT
) {
815 /* FIXME - what to do if get_user() fails? */
816 get_user_code_u16(insn
, env
->regs
[15], env
);
820 /* FIXME - what to do if get_user() fails? */
821 get_user_code_u32(insn
, env
->regs
[15], env
);
822 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
827 /* FIXME - what to do if get_user() fails? */
828 get_user_code_u16(insn
, env
->regs
[15] - 2, env
);
831 /* FIXME - what to do if get_user() fails? */
832 get_user_code_u32(insn
, env
->regs
[15] - 4, env
);
837 if (n
== ARM_NR_cacheflush
) {
839 } else if (n
== ARM_NR_semihosting
840 || n
== ARM_NR_thumb_semihosting
) {
841 env
->regs
[0] = do_arm_semihosting (env
);
842 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
844 if (env
->thumb
|| n
== 0) {
847 n
-= ARM_SYSCALL_BASE
;
850 if ( n
> ARM_NR_BASE
) {
852 case ARM_NR_cacheflush
:
856 cpu_set_tls(env
, env
->regs
[0]);
859 case ARM_NR_breakpoint
:
860 env
->regs
[15] -= env
->thumb ?
2 : 4;
863 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
865 env
->regs
[0] = -TARGET_ENOSYS
;
869 ret
= do_syscall(env
,
878 if (ret
== -TARGET_ERESTARTSYS
) {
879 env
->regs
[15] -= env
->thumb ?
2 : 4;
880 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
890 /* just indicate that signals should be handled asap */
893 if (!do_strex(env
)) {
896 /* fall through for segv */
897 case EXCP_PREFETCH_ABORT
:
898 case EXCP_DATA_ABORT
:
899 addr
= env
->exception
.vaddress
;
901 info
.si_signo
= TARGET_SIGSEGV
;
903 /* XXX: check env->error_code */
904 info
.si_code
= TARGET_SEGV_MAPERR
;
905 info
._sifields
._sigfault
._addr
= addr
;
906 queue_signal(env
, info
.si_signo
, &info
);
914 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
919 info
.si_code
= TARGET_TRAP_BRKPT
;
920 queue_signal(env
, info
.si_signo
, &info
);
924 case EXCP_KERNEL_TRAP
:
925 if (do_kernel_trap(env
))
929 /* nothing to do here for user-mode, just resume guest code */
933 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
936 process_pending_signals(env
);
943 * Handle AArch64 store-release exclusive
945 * rs = gets the status result of store exclusive
946 * rt = is the register that is stored
947 * rt2 = is the second register store (in STP)
950 static int do_strex_a64(CPUARMState
*env
)
961 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
962 size
= extract32(env
->exclusive_info
, 0, 2);
963 is_pair
= extract32(env
->exclusive_info
, 2, 1);
964 rs
= extract32(env
->exclusive_info
, 4, 5);
965 rt
= extract32(env
->exclusive_info
, 9, 5);
966 rt2
= extract32(env
->exclusive_info
, 14, 5);
968 addr
= env
->exclusive_addr
;
970 if (addr
!= env
->exclusive_test
) {
976 segv
= get_user_u8(val
, addr
);
979 segv
= get_user_u16(val
, addr
);
982 segv
= get_user_u32(val
, addr
);
985 segv
= get_user_u64(val
, addr
);
991 env
->exception
.vaddress
= addr
;
994 if (val
!= env
->exclusive_val
) {
999 segv
= get_user_u32(val
, addr
+ 4);
1001 segv
= get_user_u64(val
, addr
+ 8);
1004 env
->exception
.vaddress
= addr
+ (size
== 2 ?
4 : 8);
1007 if (val
!= env
->exclusive_high
) {
1011 /* handle the zero register */
1012 val
= rt
== 31 ?
0 : env
->xregs
[rt
];
1015 segv
= put_user_u8(val
, addr
);
1018 segv
= put_user_u16(val
, addr
);
1021 segv
= put_user_u32(val
, addr
);
1024 segv
= put_user_u64(val
, addr
);
1031 /* handle the zero register */
1032 val
= rt2
== 31 ?
0 : env
->xregs
[rt2
];
1034 segv
= put_user_u32(val
, addr
+ 4);
1036 segv
= put_user_u64(val
, addr
+ 8);
1039 env
->exception
.vaddress
= addr
+ (size
== 2 ?
4 : 8);
1046 /* rs == 31 encodes a write to the ZR, thus throwing away
1047 * the status return. This is rather silly but valid.
1050 env
->xregs
[rs
] = rc
;
1053 /* instruction faulted, PC does not advance */
1054 /* either way a strex releases any exclusive lock we have */
1055 env
->exclusive_addr
= -1;
1060 /* AArch64 main loop */
1061 void cpu_loop(CPUARMState
*env
)
1063 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1066 target_siginfo_t info
;
1070 trapnr
= cpu_arm_exec(cs
);
1075 ret
= do_syscall(env
,
1084 if (ret
== -TARGET_ERESTARTSYS
) {
1086 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
1087 env
->xregs
[0] = ret
;
1090 case EXCP_INTERRUPT
:
1091 /* just indicate that signals should be handled asap */
1094 info
.si_signo
= TARGET_SIGILL
;
1096 info
.si_code
= TARGET_ILL_ILLOPN
;
1097 info
._sifields
._sigfault
._addr
= env
->pc
;
1098 queue_signal(env
, info
.si_signo
, &info
);
1101 if (!do_strex_a64(env
)) {
1104 /* fall through for segv */
1105 case EXCP_PREFETCH_ABORT
:
1106 case EXCP_DATA_ABORT
:
1107 info
.si_signo
= TARGET_SIGSEGV
;
1109 /* XXX: check env->error_code */
1110 info
.si_code
= TARGET_SEGV_MAPERR
;
1111 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1112 queue_signal(env
, info
.si_signo
, &info
);
1116 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1118 info
.si_signo
= sig
;
1120 info
.si_code
= TARGET_TRAP_BRKPT
;
1121 queue_signal(env
, info
.si_signo
, &info
);
1125 env
->xregs
[0] = do_arm_semihosting(env
);
1128 /* nothing to do here for user-mode, just resume guest code */
1131 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1134 process_pending_signals(env
);
1135 /* Exception return on AArch64 always clears the exclusive monitor,
1136 * so any return to running guest code implies this.
1137 * A strex (successful or otherwise) also clears the monitor, so
1138 * we don't need to specialcase EXCP_STREX.
1140 env
->exclusive_addr
= -1;
1143 #endif /* ndef TARGET_ABI32 */
1147 #ifdef TARGET_UNICORE32
1149 void cpu_loop(CPUUniCore32State
*env
)
1151 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1153 unsigned int n
, insn
;
1154 target_siginfo_t info
;
1158 trapnr
= uc32_cpu_exec(cs
);
1161 case UC32_EXCP_PRIV
:
1164 get_user_u32(insn
, env
->regs
[31] - 4);
1165 n
= insn
& 0xffffff;
1167 if (n
>= UC32_SYSCALL_BASE
) {
1169 n
-= UC32_SYSCALL_BASE
;
1170 if (n
== UC32_SYSCALL_NR_set_tls
) {
1171 cpu_set_tls(env
, env
->regs
[0]);
1174 abi_long ret
= do_syscall(env
,
1183 if (ret
== -TARGET_ERESTARTSYS
) {
1185 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
1194 case UC32_EXCP_DTRAP
:
1195 case UC32_EXCP_ITRAP
:
1196 info
.si_signo
= TARGET_SIGSEGV
;
1198 /* XXX: check env->error_code */
1199 info
.si_code
= TARGET_SEGV_MAPERR
;
1200 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1201 queue_signal(env
, info
.si_signo
, &info
);
1203 case EXCP_INTERRUPT
:
1204 /* just indicate that signals should be handled asap */
1210 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1212 info
.si_signo
= sig
;
1214 info
.si_code
= TARGET_TRAP_BRKPT
;
1215 queue_signal(env
, info
.si_signo
, &info
);
1222 process_pending_signals(env
);
1226 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1232 #define SPARC64_STACK_BIAS 2047
1236 /* WARNING: dealing with register windows _is_ complicated. More info
1237 can be found at http://www.sics.se/~psm/sparcstack.html */
1238 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1240 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1241 /* wrap handling : if cwp is on the last window, then we use the
1242 registers 'after' the end */
1243 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1244 index
+= 16 * env
->nwindows
;
1248 /* save the register window 'cwp1' */
1249 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1254 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1255 #ifdef TARGET_SPARC64
1257 sp_ptr
+= SPARC64_STACK_BIAS
;
1259 #if defined(DEBUG_WIN)
1260 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1263 for(i
= 0; i
< 16; i
++) {
1264 /* FIXME - what to do if put_user() fails? */
1265 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1266 sp_ptr
+= sizeof(abi_ulong
);
1270 static void save_window(CPUSPARCState
*env
)
1272 #ifndef TARGET_SPARC64
1273 unsigned int new_wim
;
1274 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1275 ((1LL << env
->nwindows
) - 1);
1276 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1279 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1285 static void restore_window(CPUSPARCState
*env
)
1287 #ifndef TARGET_SPARC64
1288 unsigned int new_wim
;
1290 unsigned int i
, cwp1
;
1293 #ifndef TARGET_SPARC64
1294 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1295 ((1LL << env
->nwindows
) - 1);
1298 /* restore the invalid window */
1299 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1300 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1301 #ifdef TARGET_SPARC64
1303 sp_ptr
+= SPARC64_STACK_BIAS
;
1305 #if defined(DEBUG_WIN)
1306 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1309 for(i
= 0; i
< 16; i
++) {
1310 /* FIXME - what to do if get_user() fails? */
1311 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1312 sp_ptr
+= sizeof(abi_ulong
);
1314 #ifdef TARGET_SPARC64
1316 if (env
->cleanwin
< env
->nwindows
- 1)
1324 static void flush_windows(CPUSPARCState
*env
)
1330 /* if restore would invoke restore_window(), then we can stop */
1331 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1332 #ifndef TARGET_SPARC64
1333 if (env
->wim
& (1 << cwp1
))
1336 if (env
->canrestore
== 0)
1341 save_window_offset(env
, cwp1
);
1344 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1345 #ifndef TARGET_SPARC64
1346 /* set wim so that restore will reload the registers */
1347 env
->wim
= 1 << cwp1
;
1349 #if defined(DEBUG_WIN)
1350 printf("flush_windows: nb=%d\n", offset
- 1);
1354 void cpu_loop (CPUSPARCState
*env
)
1356 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1359 target_siginfo_t info
;
1363 trapnr
= cpu_sparc_exec(cs
);
1366 /* Compute PSR before exposing state. */
1367 if (env
->cc_op
!= CC_OP_FLAGS
) {
1372 #ifndef TARGET_SPARC64
1379 ret
= do_syscall (env
, env
->gregs
[1],
1380 env
->regwptr
[0], env
->regwptr
[1],
1381 env
->regwptr
[2], env
->regwptr
[3],
1382 env
->regwptr
[4], env
->regwptr
[5],
1384 if (ret
== -TARGET_ERESTARTSYS
|| ret
== -TARGET_QEMU_ESIGRETURN
) {
1387 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1388 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1389 env
->xcc
|= PSR_CARRY
;
1391 env
->psr
|= PSR_CARRY
;
1395 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1396 env
->xcc
&= ~PSR_CARRY
;
1398 env
->psr
&= ~PSR_CARRY
;
1401 env
->regwptr
[0] = ret
;
1402 /* next instruction */
1404 env
->npc
= env
->npc
+ 4;
1406 case 0x83: /* flush windows */
1411 /* next instruction */
1413 env
->npc
= env
->npc
+ 4;
1415 #ifndef TARGET_SPARC64
1416 case TT_WIN_OVF
: /* window overflow */
1419 case TT_WIN_UNF
: /* window underflow */
1420 restore_window(env
);
1425 info
.si_signo
= TARGET_SIGSEGV
;
1427 /* XXX: check env->error_code */
1428 info
.si_code
= TARGET_SEGV_MAPERR
;
1429 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1430 queue_signal(env
, info
.si_signo
, &info
);
1434 case TT_SPILL
: /* window overflow */
1437 case TT_FILL
: /* window underflow */
1438 restore_window(env
);
1443 info
.si_signo
= TARGET_SIGSEGV
;
1445 /* XXX: check env->error_code */
1446 info
.si_code
= TARGET_SEGV_MAPERR
;
1447 if (trapnr
== TT_DFAULT
)
1448 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1450 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1451 queue_signal(env
, info
.si_signo
, &info
);
1454 #ifndef TARGET_ABI32
1457 sparc64_get_context(env
);
1461 sparc64_set_context(env
);
1465 case EXCP_INTERRUPT
:
1466 /* just indicate that signals should be handled asap */
1470 info
.si_signo
= TARGET_SIGILL
;
1472 info
.si_code
= TARGET_ILL_ILLOPC
;
1473 info
._sifields
._sigfault
._addr
= env
->pc
;
1474 queue_signal(env
, info
.si_signo
, &info
);
1481 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1484 info
.si_signo
= sig
;
1486 info
.si_code
= TARGET_TRAP_BRKPT
;
1487 queue_signal(env
, info
.si_signo
, &info
);
1492 printf ("Unhandled trap: 0x%x\n", trapnr
);
1493 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1496 process_pending_signals (env
);
1503 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1505 return cpu_get_host_ticks();
1508 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1510 return cpu_ppc_get_tb(env
);
1513 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1515 return cpu_ppc_get_tb(env
) >> 32;
1518 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1520 return cpu_ppc_get_tb(env
);
1523 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1525 return cpu_ppc_get_tb(env
) >> 32;
1528 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1529 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1531 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1533 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1536 /* XXX: to be fixed */
1537 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1542 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1547 static int do_store_exclusive(CPUPPCState
*env
)
1550 target_ulong page_addr
;
1551 target_ulong val
, val2
__attribute__((unused
)) = 0;
1555 addr
= env
->reserve_ea
;
1556 page_addr
= addr
& TARGET_PAGE_MASK
;
1559 flags
= page_get_flags(page_addr
);
1560 if ((flags
& PAGE_READ
) == 0) {
1563 int reg
= env
->reserve_info
& 0x1f;
1564 int size
= env
->reserve_info
>> 5;
1567 if (addr
== env
->reserve_addr
) {
1569 case 1: segv
= get_user_u8(val
, addr
); break;
1570 case 2: segv
= get_user_u16(val
, addr
); break;
1571 case 4: segv
= get_user_u32(val
, addr
); break;
1572 #if defined(TARGET_PPC64)
1573 case 8: segv
= get_user_u64(val
, addr
); break;
1575 segv
= get_user_u64(val
, addr
);
1577 segv
= get_user_u64(val2
, addr
+ 8);
1584 if (!segv
&& val
== env
->reserve_val
) {
1585 val
= env
->gpr
[reg
];
1587 case 1: segv
= put_user_u8(val
, addr
); break;
1588 case 2: segv
= put_user_u16(val
, addr
); break;
1589 case 4: segv
= put_user_u32(val
, addr
); break;
1590 #if defined(TARGET_PPC64)
1591 case 8: segv
= put_user_u64(val
, addr
); break;
1593 if (val2
== env
->reserve_val2
) {
1596 val
= env
->gpr
[reg
+1];
1598 val2
= env
->gpr
[reg
+1];
1600 segv
= put_user_u64(val
, addr
);
1602 segv
= put_user_u64(val2
, addr
+ 8);
1615 env
->crf
[0] = (stored
<< 1) | xer_so
;
1616 env
->reserve_addr
= (target_ulong
)-1;
1626 void cpu_loop(CPUPPCState
*env
)
1628 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1629 target_siginfo_t info
;
1635 trapnr
= cpu_ppc_exec(cs
);
1638 case POWERPC_EXCP_NONE
:
1641 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1642 cpu_abort(cs
, "Critical interrupt while in user mode. "
1645 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1646 cpu_abort(cs
, "Machine check exception while in user mode. "
1649 case POWERPC_EXCP_DSI
: /* Data storage exception */
1650 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1652 /* XXX: check this. Seems bugged */
1653 switch (env
->error_code
& 0xFF000000) {
1655 info
.si_signo
= TARGET_SIGSEGV
;
1657 info
.si_code
= TARGET_SEGV_MAPERR
;
1660 info
.si_signo
= TARGET_SIGILL
;
1662 info
.si_code
= TARGET_ILL_ILLADR
;
1665 info
.si_signo
= TARGET_SIGSEGV
;
1667 info
.si_code
= TARGET_SEGV_ACCERR
;
1670 /* Let's send a regular segfault... */
1671 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1673 info
.si_signo
= TARGET_SIGSEGV
;
1675 info
.si_code
= TARGET_SEGV_MAPERR
;
1678 info
._sifields
._sigfault
._addr
= env
->nip
;
1679 queue_signal(env
, info
.si_signo
, &info
);
1681 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1682 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1683 "\n", env
->spr
[SPR_SRR0
]);
1684 /* XXX: check this */
1685 switch (env
->error_code
& 0xFF000000) {
1687 info
.si_signo
= TARGET_SIGSEGV
;
1689 info
.si_code
= TARGET_SEGV_MAPERR
;
1693 info
.si_signo
= TARGET_SIGSEGV
;
1695 info
.si_code
= TARGET_SEGV_ACCERR
;
1698 /* Let's send a regular segfault... */
1699 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1701 info
.si_signo
= TARGET_SIGSEGV
;
1703 info
.si_code
= TARGET_SEGV_MAPERR
;
1706 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1707 queue_signal(env
, info
.si_signo
, &info
);
1709 case POWERPC_EXCP_EXTERNAL
: /* External input */
1710 cpu_abort(cs
, "External interrupt while in user mode. "
1713 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1714 EXCP_DUMP(env
, "Unaligned memory access\n");
1715 /* XXX: check this */
1716 info
.si_signo
= TARGET_SIGBUS
;
1718 info
.si_code
= TARGET_BUS_ADRALN
;
1719 info
._sifields
._sigfault
._addr
= env
->nip
;
1720 queue_signal(env
, info
.si_signo
, &info
);
1722 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1723 /* XXX: check this */
1724 switch (env
->error_code
& ~0xF) {
1725 case POWERPC_EXCP_FP
:
1726 EXCP_DUMP(env
, "Floating point program exception\n");
1727 info
.si_signo
= TARGET_SIGFPE
;
1729 switch (env
->error_code
& 0xF) {
1730 case POWERPC_EXCP_FP_OX
:
1731 info
.si_code
= TARGET_FPE_FLTOVF
;
1733 case POWERPC_EXCP_FP_UX
:
1734 info
.si_code
= TARGET_FPE_FLTUND
;
1736 case POWERPC_EXCP_FP_ZX
:
1737 case POWERPC_EXCP_FP_VXZDZ
:
1738 info
.si_code
= TARGET_FPE_FLTDIV
;
1740 case POWERPC_EXCP_FP_XX
:
1741 info
.si_code
= TARGET_FPE_FLTRES
;
1743 case POWERPC_EXCP_FP_VXSOFT
:
1744 info
.si_code
= TARGET_FPE_FLTINV
;
1746 case POWERPC_EXCP_FP_VXSNAN
:
1747 case POWERPC_EXCP_FP_VXISI
:
1748 case POWERPC_EXCP_FP_VXIDI
:
1749 case POWERPC_EXCP_FP_VXIMZ
:
1750 case POWERPC_EXCP_FP_VXVC
:
1751 case POWERPC_EXCP_FP_VXSQRT
:
1752 case POWERPC_EXCP_FP_VXCVI
:
1753 info
.si_code
= TARGET_FPE_FLTSUB
;
1756 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1761 case POWERPC_EXCP_INVAL
:
1762 EXCP_DUMP(env
, "Invalid instruction\n");
1763 info
.si_signo
= TARGET_SIGILL
;
1765 switch (env
->error_code
& 0xF) {
1766 case POWERPC_EXCP_INVAL_INVAL
:
1767 info
.si_code
= TARGET_ILL_ILLOPC
;
1769 case POWERPC_EXCP_INVAL_LSWX
:
1770 info
.si_code
= TARGET_ILL_ILLOPN
;
1772 case POWERPC_EXCP_INVAL_SPR
:
1773 info
.si_code
= TARGET_ILL_PRVREG
;
1775 case POWERPC_EXCP_INVAL_FP
:
1776 info
.si_code
= TARGET_ILL_COPROC
;
1779 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1780 env
->error_code
& 0xF);
1781 info
.si_code
= TARGET_ILL_ILLADR
;
1785 case POWERPC_EXCP_PRIV
:
1786 EXCP_DUMP(env
, "Privilege violation\n");
1787 info
.si_signo
= TARGET_SIGILL
;
1789 switch (env
->error_code
& 0xF) {
1790 case POWERPC_EXCP_PRIV_OPC
:
1791 info
.si_code
= TARGET_ILL_PRVOPC
;
1793 case POWERPC_EXCP_PRIV_REG
:
1794 info
.si_code
= TARGET_ILL_PRVREG
;
1797 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1798 env
->error_code
& 0xF);
1799 info
.si_code
= TARGET_ILL_PRVOPC
;
1803 case POWERPC_EXCP_TRAP
:
1804 cpu_abort(cs
, "Tried to call a TRAP\n");
1807 /* Should not happen ! */
1808 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1812 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1813 queue_signal(env
, info
.si_signo
, &info
);
1815 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1816 EXCP_DUMP(env
, "No floating point allowed\n");
1817 info
.si_signo
= TARGET_SIGILL
;
1819 info
.si_code
= TARGET_ILL_COPROC
;
1820 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1821 queue_signal(env
, info
.si_signo
, &info
);
1823 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1824 cpu_abort(cs
, "Syscall exception while in user mode. "
1827 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1828 EXCP_DUMP(env
, "No APU instruction allowed\n");
1829 info
.si_signo
= TARGET_SIGILL
;
1831 info
.si_code
= TARGET_ILL_COPROC
;
1832 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1833 queue_signal(env
, info
.si_signo
, &info
);
1835 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1836 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1839 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1840 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1843 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1844 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1847 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1848 cpu_abort(cs
, "Data TLB exception while in user mode. "
1851 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1852 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1855 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1856 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1857 info
.si_signo
= TARGET_SIGILL
;
1859 info
.si_code
= TARGET_ILL_COPROC
;
1860 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1861 queue_signal(env
, info
.si_signo
, &info
);
1863 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1864 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1866 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1867 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1869 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1870 cpu_abort(cs
, "Performance monitor exception not handled\n");
1872 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1873 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1876 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1877 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1880 case POWERPC_EXCP_RESET
: /* System reset exception */
1881 cpu_abort(cs
, "Reset interrupt while in user mode. "
1884 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1885 cpu_abort(cs
, "Data segment exception while in user mode. "
1888 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1889 cpu_abort(cs
, "Instruction segment exception "
1890 "while in user mode. Aborting\n");
1892 /* PowerPC 64 with hypervisor mode support */
1893 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1894 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1895 "while in user mode. Aborting\n");
1897 case POWERPC_EXCP_TRACE
: /* Trace exception */
1899 * we use this exception to emulate step-by-step execution mode.
1902 /* PowerPC 64 with hypervisor mode support */
1903 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1904 cpu_abort(cs
, "Hypervisor data storage exception "
1905 "while in user mode. Aborting\n");
1907 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1908 cpu_abort(cs
, "Hypervisor instruction storage exception "
1909 "while in user mode. Aborting\n");
1911 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1912 cpu_abort(cs
, "Hypervisor data segment exception "
1913 "while in user mode. Aborting\n");
1915 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1916 cpu_abort(cs
, "Hypervisor instruction segment exception "
1917 "while in user mode. Aborting\n");
1919 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1920 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1921 info
.si_signo
= TARGET_SIGILL
;
1923 info
.si_code
= TARGET_ILL_COPROC
;
1924 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1925 queue_signal(env
, info
.si_signo
, &info
);
1927 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1928 cpu_abort(cs
, "Programmable interval timer interrupt "
1929 "while in user mode. Aborting\n");
1931 case POWERPC_EXCP_IO
: /* IO error exception */
1932 cpu_abort(cs
, "IO error exception while in user mode. "
1935 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1936 cpu_abort(cs
, "Run mode exception while in user mode. "
1939 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1940 cpu_abort(cs
, "Emulation trap exception not handled\n");
1942 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1943 cpu_abort(cs
, "Instruction fetch TLB exception "
1944 "while in user-mode. Aborting");
1946 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1947 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1950 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1951 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1954 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1955 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1957 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1958 cpu_abort(cs
, "Instruction address breakpoint exception "
1961 case POWERPC_EXCP_SMI
: /* System management interrupt */
1962 cpu_abort(cs
, "System management interrupt while in user mode. "
1965 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1966 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1969 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1970 cpu_abort(cs
, "Performance monitor exception not handled\n");
1972 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1973 cpu_abort(cs
, "Vector assist exception not handled\n");
1975 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1976 cpu_abort(cs
, "Soft patch exception not handled\n");
1978 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1979 cpu_abort(cs
, "Maintenance exception while in user mode. "
1982 case POWERPC_EXCP_STOP
: /* stop translation */
1983 /* We did invalidate the instruction cache. Go on */
1985 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1986 /* We just stopped because of a branch. Go on */
1988 case POWERPC_EXCP_SYSCALL_USER
:
1989 /* system call in user-mode emulation */
1991 * PPC ABI uses overflow flag in cr0 to signal an error
1994 env
->crf
[0] &= ~0x1;
1995 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1996 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1998 if (ret
== -TARGET_ERESTARTSYS
) {
2002 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
2003 /* Returning from a successful sigreturn syscall.
2004 Avoid corrupting register state. */
2007 if (ret
> (target_ulong
)(-515)) {
2013 case POWERPC_EXCP_STCX
:
2014 if (do_store_exclusive(env
)) {
2015 info
.si_signo
= TARGET_SIGSEGV
;
2017 info
.si_code
= TARGET_SEGV_MAPERR
;
2018 info
._sifields
._sigfault
._addr
= env
->nip
;
2019 queue_signal(env
, info
.si_signo
, &info
);
2026 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2028 info
.si_signo
= sig
;
2030 info
.si_code
= TARGET_TRAP_BRKPT
;
2031 queue_signal(env
, info
.si_signo
, &info
);
2035 case EXCP_INTERRUPT
:
2036 /* just indicate that signals should be handled asap */
2039 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
2042 process_pending_signals(env
);
2049 # ifdef TARGET_ABI_MIPSO32
2050 # define MIPS_SYS(name, args) args,
2051 static const uint8_t mips_syscall_args
[] = {
2052 MIPS_SYS(sys_syscall
, 8) /* 4000 */
2053 MIPS_SYS(sys_exit
, 1)
2054 MIPS_SYS(sys_fork
, 0)
2055 MIPS_SYS(sys_read
, 3)
2056 MIPS_SYS(sys_write
, 3)
2057 MIPS_SYS(sys_open
, 3) /* 4005 */
2058 MIPS_SYS(sys_close
, 1)
2059 MIPS_SYS(sys_waitpid
, 3)
2060 MIPS_SYS(sys_creat
, 2)
2061 MIPS_SYS(sys_link
, 2)
2062 MIPS_SYS(sys_unlink
, 1) /* 4010 */
2063 MIPS_SYS(sys_execve
, 0)
2064 MIPS_SYS(sys_chdir
, 1)
2065 MIPS_SYS(sys_time
, 1)
2066 MIPS_SYS(sys_mknod
, 3)
2067 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2068 MIPS_SYS(sys_lchown
, 3)
2069 MIPS_SYS(sys_ni_syscall
, 0)
2070 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2071 MIPS_SYS(sys_lseek
, 3)
2072 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2073 MIPS_SYS(sys_mount
, 5)
2074 MIPS_SYS(sys_umount
, 1)
2075 MIPS_SYS(sys_setuid
, 1)
2076 MIPS_SYS(sys_getuid
, 0)
2077 MIPS_SYS(sys_stime
, 1) /* 4025 */
2078 MIPS_SYS(sys_ptrace
, 4)
2079 MIPS_SYS(sys_alarm
, 1)
2080 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2081 MIPS_SYS(sys_pause
, 0)
2082 MIPS_SYS(sys_utime
, 2) /* 4030 */
2083 MIPS_SYS(sys_ni_syscall
, 0)
2084 MIPS_SYS(sys_ni_syscall
, 0)
2085 MIPS_SYS(sys_access
, 2)
2086 MIPS_SYS(sys_nice
, 1)
2087 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2088 MIPS_SYS(sys_sync
, 0)
2089 MIPS_SYS(sys_kill
, 2)
2090 MIPS_SYS(sys_rename
, 2)
2091 MIPS_SYS(sys_mkdir
, 2)
2092 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2093 MIPS_SYS(sys_dup
, 1)
2094 MIPS_SYS(sys_pipe
, 0)
2095 MIPS_SYS(sys_times
, 1)
2096 MIPS_SYS(sys_ni_syscall
, 0)
2097 MIPS_SYS(sys_brk
, 1) /* 4045 */
2098 MIPS_SYS(sys_setgid
, 1)
2099 MIPS_SYS(sys_getgid
, 0)
2100 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2101 MIPS_SYS(sys_geteuid
, 0)
2102 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2103 MIPS_SYS(sys_acct
, 0)
2104 MIPS_SYS(sys_umount2
, 2)
2105 MIPS_SYS(sys_ni_syscall
, 0)
2106 MIPS_SYS(sys_ioctl
, 3)
2107 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2108 MIPS_SYS(sys_ni_syscall
, 2)
2109 MIPS_SYS(sys_setpgid
, 2)
2110 MIPS_SYS(sys_ni_syscall
, 0)
2111 MIPS_SYS(sys_olduname
, 1)
2112 MIPS_SYS(sys_umask
, 1) /* 4060 */
2113 MIPS_SYS(sys_chroot
, 1)
2114 MIPS_SYS(sys_ustat
, 2)
2115 MIPS_SYS(sys_dup2
, 2)
2116 MIPS_SYS(sys_getppid
, 0)
2117 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2118 MIPS_SYS(sys_setsid
, 0)
2119 MIPS_SYS(sys_sigaction
, 3)
2120 MIPS_SYS(sys_sgetmask
, 0)
2121 MIPS_SYS(sys_ssetmask
, 1)
2122 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2123 MIPS_SYS(sys_setregid
, 2)
2124 MIPS_SYS(sys_sigsuspend
, 0)
2125 MIPS_SYS(sys_sigpending
, 1)
2126 MIPS_SYS(sys_sethostname
, 2)
2127 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2128 MIPS_SYS(sys_getrlimit
, 2)
2129 MIPS_SYS(sys_getrusage
, 2)
2130 MIPS_SYS(sys_gettimeofday
, 2)
2131 MIPS_SYS(sys_settimeofday
, 2)
2132 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2133 MIPS_SYS(sys_setgroups
, 2)
2134 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2135 MIPS_SYS(sys_symlink
, 2)
2136 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2137 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2138 MIPS_SYS(sys_uselib
, 1)
2139 MIPS_SYS(sys_swapon
, 2)
2140 MIPS_SYS(sys_reboot
, 3)
2141 MIPS_SYS(old_readdir
, 3)
2142 MIPS_SYS(old_mmap
, 6) /* 4090 */
2143 MIPS_SYS(sys_munmap
, 2)
2144 MIPS_SYS(sys_truncate
, 2)
2145 MIPS_SYS(sys_ftruncate
, 2)
2146 MIPS_SYS(sys_fchmod
, 2)
2147 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2148 MIPS_SYS(sys_getpriority
, 2)
2149 MIPS_SYS(sys_setpriority
, 3)
2150 MIPS_SYS(sys_ni_syscall
, 0)
2151 MIPS_SYS(sys_statfs
, 2)
2152 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2153 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2154 MIPS_SYS(sys_socketcall
, 2)
2155 MIPS_SYS(sys_syslog
, 3)
2156 MIPS_SYS(sys_setitimer
, 3)
2157 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2158 MIPS_SYS(sys_newstat
, 2)
2159 MIPS_SYS(sys_newlstat
, 2)
2160 MIPS_SYS(sys_newfstat
, 2)
2161 MIPS_SYS(sys_uname
, 1)
2162 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2163 MIPS_SYS(sys_vhangup
, 0)
2164 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2165 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2166 MIPS_SYS(sys_wait4
, 4)
2167 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2168 MIPS_SYS(sys_sysinfo
, 1)
2169 MIPS_SYS(sys_ipc
, 6)
2170 MIPS_SYS(sys_fsync
, 1)
2171 MIPS_SYS(sys_sigreturn
, 0)
2172 MIPS_SYS(sys_clone
, 6) /* 4120 */
2173 MIPS_SYS(sys_setdomainname
, 2)
2174 MIPS_SYS(sys_newuname
, 1)
2175 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2176 MIPS_SYS(sys_adjtimex
, 1)
2177 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2178 MIPS_SYS(sys_sigprocmask
, 3)
2179 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2180 MIPS_SYS(sys_init_module
, 5)
2181 MIPS_SYS(sys_delete_module
, 1)
2182 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2183 MIPS_SYS(sys_quotactl
, 0)
2184 MIPS_SYS(sys_getpgid
, 1)
2185 MIPS_SYS(sys_fchdir
, 1)
2186 MIPS_SYS(sys_bdflush
, 2)
2187 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2188 MIPS_SYS(sys_personality
, 1)
2189 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2190 MIPS_SYS(sys_setfsuid
, 1)
2191 MIPS_SYS(sys_setfsgid
, 1)
2192 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2193 MIPS_SYS(sys_getdents
, 3)
2194 MIPS_SYS(sys_select
, 5)
2195 MIPS_SYS(sys_flock
, 2)
2196 MIPS_SYS(sys_msync
, 3)
2197 MIPS_SYS(sys_readv
, 3) /* 4145 */
2198 MIPS_SYS(sys_writev
, 3)
2199 MIPS_SYS(sys_cacheflush
, 3)
2200 MIPS_SYS(sys_cachectl
, 3)
2201 MIPS_SYS(sys_sysmips
, 4)
2202 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2203 MIPS_SYS(sys_getsid
, 1)
2204 MIPS_SYS(sys_fdatasync
, 0)
2205 MIPS_SYS(sys_sysctl
, 1)
2206 MIPS_SYS(sys_mlock
, 2)
2207 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2208 MIPS_SYS(sys_mlockall
, 1)
2209 MIPS_SYS(sys_munlockall
, 0)
2210 MIPS_SYS(sys_sched_setparam
, 2)
2211 MIPS_SYS(sys_sched_getparam
, 2)
2212 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2213 MIPS_SYS(sys_sched_getscheduler
, 1)
2214 MIPS_SYS(sys_sched_yield
, 0)
2215 MIPS_SYS(sys_sched_get_priority_max
, 1)
2216 MIPS_SYS(sys_sched_get_priority_min
, 1)
2217 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2218 MIPS_SYS(sys_nanosleep
, 2)
2219 MIPS_SYS(sys_mremap
, 5)
2220 MIPS_SYS(sys_accept
, 3)
2221 MIPS_SYS(sys_bind
, 3)
2222 MIPS_SYS(sys_connect
, 3) /* 4170 */
2223 MIPS_SYS(sys_getpeername
, 3)
2224 MIPS_SYS(sys_getsockname
, 3)
2225 MIPS_SYS(sys_getsockopt
, 5)
2226 MIPS_SYS(sys_listen
, 2)
2227 MIPS_SYS(sys_recv
, 4) /* 4175 */
2228 MIPS_SYS(sys_recvfrom
, 6)
2229 MIPS_SYS(sys_recvmsg
, 3)
2230 MIPS_SYS(sys_send
, 4)
2231 MIPS_SYS(sys_sendmsg
, 3)
2232 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2233 MIPS_SYS(sys_setsockopt
, 5)
2234 MIPS_SYS(sys_shutdown
, 2)
2235 MIPS_SYS(sys_socket
, 3)
2236 MIPS_SYS(sys_socketpair
, 4)
2237 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2238 MIPS_SYS(sys_getresuid
, 3)
2239 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2240 MIPS_SYS(sys_poll
, 3)
2241 MIPS_SYS(sys_nfsservctl
, 3)
2242 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2243 MIPS_SYS(sys_getresgid
, 3)
2244 MIPS_SYS(sys_prctl
, 5)
2245 MIPS_SYS(sys_rt_sigreturn
, 0)
2246 MIPS_SYS(sys_rt_sigaction
, 4)
2247 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2248 MIPS_SYS(sys_rt_sigpending
, 2)
2249 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2250 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2251 MIPS_SYS(sys_rt_sigsuspend
, 0)
2252 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2253 MIPS_SYS(sys_pwrite64
, 6)
2254 MIPS_SYS(sys_chown
, 3)
2255 MIPS_SYS(sys_getcwd
, 2)
2256 MIPS_SYS(sys_capget
, 2)
2257 MIPS_SYS(sys_capset
, 2) /* 4205 */
2258 MIPS_SYS(sys_sigaltstack
, 2)
2259 MIPS_SYS(sys_sendfile
, 4)
2260 MIPS_SYS(sys_ni_syscall
, 0)
2261 MIPS_SYS(sys_ni_syscall
, 0)
2262 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2263 MIPS_SYS(sys_truncate64
, 4)
2264 MIPS_SYS(sys_ftruncate64
, 4)
2265 MIPS_SYS(sys_stat64
, 2)
2266 MIPS_SYS(sys_lstat64
, 2)
2267 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2268 MIPS_SYS(sys_pivot_root
, 2)
2269 MIPS_SYS(sys_mincore
, 3)
2270 MIPS_SYS(sys_madvise
, 3)
2271 MIPS_SYS(sys_getdents64
, 3)
2272 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2273 MIPS_SYS(sys_ni_syscall
, 0)
2274 MIPS_SYS(sys_gettid
, 0)
2275 MIPS_SYS(sys_readahead
, 5)
2276 MIPS_SYS(sys_setxattr
, 5)
2277 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2278 MIPS_SYS(sys_fsetxattr
, 5)
2279 MIPS_SYS(sys_getxattr
, 4)
2280 MIPS_SYS(sys_lgetxattr
, 4)
2281 MIPS_SYS(sys_fgetxattr
, 4)
2282 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2283 MIPS_SYS(sys_llistxattr
, 3)
2284 MIPS_SYS(sys_flistxattr
, 3)
2285 MIPS_SYS(sys_removexattr
, 2)
2286 MIPS_SYS(sys_lremovexattr
, 2)
2287 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2288 MIPS_SYS(sys_tkill
, 2)
2289 MIPS_SYS(sys_sendfile64
, 5)
2290 MIPS_SYS(sys_futex
, 6)
2291 MIPS_SYS(sys_sched_setaffinity
, 3)
2292 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2293 MIPS_SYS(sys_io_setup
, 2)
2294 MIPS_SYS(sys_io_destroy
, 1)
2295 MIPS_SYS(sys_io_getevents
, 5)
2296 MIPS_SYS(sys_io_submit
, 3)
2297 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2298 MIPS_SYS(sys_exit_group
, 1)
2299 MIPS_SYS(sys_lookup_dcookie
, 3)
2300 MIPS_SYS(sys_epoll_create
, 1)
2301 MIPS_SYS(sys_epoll_ctl
, 4)
2302 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2303 MIPS_SYS(sys_remap_file_pages
, 5)
2304 MIPS_SYS(sys_set_tid_address
, 1)
2305 MIPS_SYS(sys_restart_syscall
, 0)
2306 MIPS_SYS(sys_fadvise64_64
, 7)
2307 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2308 MIPS_SYS(sys_fstatfs64
, 2)
2309 MIPS_SYS(sys_timer_create
, 3)
2310 MIPS_SYS(sys_timer_settime
, 4)
2311 MIPS_SYS(sys_timer_gettime
, 2)
2312 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2313 MIPS_SYS(sys_timer_delete
, 1)
2314 MIPS_SYS(sys_clock_settime
, 2)
2315 MIPS_SYS(sys_clock_gettime
, 2)
2316 MIPS_SYS(sys_clock_getres
, 2)
2317 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2318 MIPS_SYS(sys_tgkill
, 3)
2319 MIPS_SYS(sys_utimes
, 2)
2320 MIPS_SYS(sys_mbind
, 4)
2321 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2322 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2323 MIPS_SYS(sys_mq_open
, 4)
2324 MIPS_SYS(sys_mq_unlink
, 1)
2325 MIPS_SYS(sys_mq_timedsend
, 5)
2326 MIPS_SYS(sys_mq_timedreceive
, 5)
2327 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2328 MIPS_SYS(sys_mq_getsetattr
, 3)
2329 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2330 MIPS_SYS(sys_waitid
, 4)
2331 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2332 MIPS_SYS(sys_add_key
, 5)
2333 MIPS_SYS(sys_request_key
, 4)
2334 MIPS_SYS(sys_keyctl
, 5)
2335 MIPS_SYS(sys_set_thread_area
, 1)
2336 MIPS_SYS(sys_inotify_init
, 0)
2337 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2338 MIPS_SYS(sys_inotify_rm_watch
, 2)
2339 MIPS_SYS(sys_migrate_pages
, 4)
2340 MIPS_SYS(sys_openat
, 4)
2341 MIPS_SYS(sys_mkdirat
, 3)
2342 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2343 MIPS_SYS(sys_fchownat
, 5)
2344 MIPS_SYS(sys_futimesat
, 3)
2345 MIPS_SYS(sys_fstatat64
, 4)
2346 MIPS_SYS(sys_unlinkat
, 3)
2347 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2348 MIPS_SYS(sys_linkat
, 5)
2349 MIPS_SYS(sys_symlinkat
, 3)
2350 MIPS_SYS(sys_readlinkat
, 4)
2351 MIPS_SYS(sys_fchmodat
, 3)
2352 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2353 MIPS_SYS(sys_pselect6
, 6)
2354 MIPS_SYS(sys_ppoll
, 5)
2355 MIPS_SYS(sys_unshare
, 1)
2356 MIPS_SYS(sys_splice
, 6)
2357 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2358 MIPS_SYS(sys_tee
, 4)
2359 MIPS_SYS(sys_vmsplice
, 4)
2360 MIPS_SYS(sys_move_pages
, 6)
2361 MIPS_SYS(sys_set_robust_list
, 2)
2362 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2363 MIPS_SYS(sys_kexec_load
, 4)
2364 MIPS_SYS(sys_getcpu
, 3)
2365 MIPS_SYS(sys_epoll_pwait
, 6)
2366 MIPS_SYS(sys_ioprio_set
, 3)
2367 MIPS_SYS(sys_ioprio_get
, 2)
2368 MIPS_SYS(sys_utimensat
, 4)
2369 MIPS_SYS(sys_signalfd
, 3)
2370 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2371 MIPS_SYS(sys_eventfd
, 1)
2372 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2373 MIPS_SYS(sys_timerfd_create
, 2)
2374 MIPS_SYS(sys_timerfd_gettime
, 2)
2375 MIPS_SYS(sys_timerfd_settime
, 4)
2376 MIPS_SYS(sys_signalfd4
, 4)
2377 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2378 MIPS_SYS(sys_epoll_create1
, 1)
2379 MIPS_SYS(sys_dup3
, 3)
2380 MIPS_SYS(sys_pipe2
, 2)
2381 MIPS_SYS(sys_inotify_init1
, 1)
2382 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2383 MIPS_SYS(sys_pwritev
, 6)
2384 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2385 MIPS_SYS(sys_perf_event_open
, 5)
2386 MIPS_SYS(sys_accept4
, 4)
2387 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2388 MIPS_SYS(sys_fanotify_init
, 2)
2389 MIPS_SYS(sys_fanotify_mark
, 6)
2390 MIPS_SYS(sys_prlimit64
, 4)
2391 MIPS_SYS(sys_name_to_handle_at
, 5)
2392 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2393 MIPS_SYS(sys_clock_adjtime
, 2)
2394 MIPS_SYS(sys_syncfs
, 1)
2399 static int do_store_exclusive(CPUMIPSState
*env
)
2402 target_ulong page_addr
;
2410 page_addr
= addr
& TARGET_PAGE_MASK
;
2413 flags
= page_get_flags(page_addr
);
2414 if ((flags
& PAGE_READ
) == 0) {
2417 reg
= env
->llreg
& 0x1f;
2418 d
= (env
->llreg
& 0x20) != 0;
2420 segv
= get_user_s64(val
, addr
);
2422 segv
= get_user_s32(val
, addr
);
2425 if (val
!= env
->llval
) {
2426 env
->active_tc
.gpr
[reg
] = 0;
2429 segv
= put_user_u64(env
->llnewval
, addr
);
2431 segv
= put_user_u32(env
->llnewval
, addr
);
2434 env
->active_tc
.gpr
[reg
] = 1;
2441 env
->active_tc
.PC
+= 4;
2454 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2462 info
->si_signo
= TARGET_SIGFPE
;
2464 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2465 queue_signal(env
, info
->si_signo
, &*info
);
2469 info
->si_signo
= TARGET_SIGTRAP
;
2471 queue_signal(env
, info
->si_signo
, &*info
);
2479 void cpu_loop(CPUMIPSState
*env
)
2481 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2482 target_siginfo_t info
;
2485 # ifdef TARGET_ABI_MIPSO32
2486 unsigned int syscall_num
;
2491 trapnr
= cpu_mips_exec(cs
);
2495 env
->active_tc
.PC
+= 4;
2496 # ifdef TARGET_ABI_MIPSO32
2497 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2498 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2499 ret
= -TARGET_ENOSYS
;
2503 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2505 nb_args
= mips_syscall_args
[syscall_num
];
2506 sp_reg
= env
->active_tc
.gpr
[29];
2508 /* these arguments are taken from the stack */
2510 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2514 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2518 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2522 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2528 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2529 env
->active_tc
.gpr
[4],
2530 env
->active_tc
.gpr
[5],
2531 env
->active_tc
.gpr
[6],
2532 env
->active_tc
.gpr
[7],
2533 arg5
, arg6
, arg7
, arg8
);
2537 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2538 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2539 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2540 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2541 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2543 if (ret
== -TARGET_ERESTARTSYS
) {
2544 env
->active_tc
.PC
-= 4;
2547 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2548 /* Returning from a successful sigreturn syscall.
2549 Avoid clobbering register state. */
2552 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2553 env
->active_tc
.gpr
[7] = 1; /* error flag */
2556 env
->active_tc
.gpr
[7] = 0; /* error flag */
2558 env
->active_tc
.gpr
[2] = ret
;
2564 info
.si_signo
= TARGET_SIGSEGV
;
2566 /* XXX: check env->error_code */
2567 info
.si_code
= TARGET_SEGV_MAPERR
;
2568 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2569 queue_signal(env
, info
.si_signo
, &info
);
2573 info
.si_signo
= TARGET_SIGILL
;
2576 queue_signal(env
, info
.si_signo
, &info
);
2578 case EXCP_INTERRUPT
:
2579 /* just indicate that signals should be handled asap */
2585 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2588 info
.si_signo
= sig
;
2590 info
.si_code
= TARGET_TRAP_BRKPT
;
2591 queue_signal(env
, info
.si_signo
, &info
);
2596 if (do_store_exclusive(env
)) {
2597 info
.si_signo
= TARGET_SIGSEGV
;
2599 info
.si_code
= TARGET_SEGV_MAPERR
;
2600 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2601 queue_signal(env
, info
.si_signo
, &info
);
2605 info
.si_signo
= TARGET_SIGILL
;
2607 info
.si_code
= TARGET_ILL_ILLOPC
;
2608 queue_signal(env
, info
.si_signo
, &info
);
2610 /* The code below was inspired by the MIPS Linux kernel trap
2611 * handling code in arch/mips/kernel/traps.c.
2615 abi_ulong trap_instr
;
2618 if (env
->hflags
& MIPS_HFLAG_M16
) {
2619 if (env
->insn_flags
& ASE_MICROMIPS
) {
2620 /* microMIPS mode */
2621 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2626 if ((trap_instr
>> 10) == 0x11) {
2627 /* 16-bit instruction */
2628 code
= trap_instr
& 0xf;
2630 /* 32-bit instruction */
2633 ret
= get_user_u16(instr_lo
,
2634 env
->active_tc
.PC
+ 2);
2638 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2639 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2640 /* Unfortunately, microMIPS also suffers from
2641 the old assembler bug... */
2642 if (code
>= (1 << 10)) {
2648 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2652 code
= (trap_instr
>> 6) & 0x3f;
2655 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2660 /* As described in the original Linux kernel code, the
2661 * below checks on 'code' are to work around an old
2664 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2665 if (code
>= (1 << 10)) {
2670 if (do_break(env
, &info
, code
) != 0) {
2677 abi_ulong trap_instr
;
2678 unsigned int code
= 0;
2680 if (env
->hflags
& MIPS_HFLAG_M16
) {
2681 /* microMIPS mode */
2684 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2685 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2687 trap_instr
= (instr
[0] << 16) | instr
[1];
2689 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2696 /* The immediate versions don't provide a code. */
2697 if (!(trap_instr
& 0xFC000000)) {
2698 if (env
->hflags
& MIPS_HFLAG_M16
) {
2699 /* microMIPS mode */
2700 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2702 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2706 if (do_break(env
, &info
, code
) != 0) {
2713 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
2716 process_pending_signals(env
);
2721 #ifdef TARGET_OPENRISC
2723 void cpu_loop(CPUOpenRISCState
*env
)
2725 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2731 trapnr
= cpu_openrisc_exec(cs
);
2737 qemu_log_mask(CPU_LOG_INT
, "\nReset request, exit, pc is %#x\n", env
->pc
);
2741 qemu_log_mask(CPU_LOG_INT
, "\nBus error, exit, pc is %#x\n", env
->pc
);
2742 gdbsig
= TARGET_SIGBUS
;
2746 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2747 gdbsig
= TARGET_SIGSEGV
;
2750 qemu_log_mask(CPU_LOG_INT
, "\nTick time interrupt pc is %#x\n", env
->pc
);
2753 qemu_log_mask(CPU_LOG_INT
, "\nAlignment pc is %#x\n", env
->pc
);
2754 gdbsig
= TARGET_SIGBUS
;
2757 qemu_log_mask(CPU_LOG_INT
, "\nIllegal instructionpc is %#x\n", env
->pc
);
2758 gdbsig
= TARGET_SIGILL
;
2761 qemu_log_mask(CPU_LOG_INT
, "\nExternal interruptpc is %#x\n", env
->pc
);
2765 qemu_log_mask(CPU_LOG_INT
, "\nTLB miss\n");
2768 qemu_log_mask(CPU_LOG_INT
, "\nRange\n");
2769 gdbsig
= TARGET_SIGSEGV
;
2772 env
->pc
+= 4; /* 0xc00; */
2773 ret
= do_syscall(env
,
2774 env
->gpr
[11], /* return value */
2775 env
->gpr
[3], /* r3 - r7 are params */
2781 if (ret
== -TARGET_ERESTARTSYS
) {
2783 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2788 qemu_log_mask(CPU_LOG_INT
, "\nFloating point error\n");
2791 qemu_log_mask(CPU_LOG_INT
, "\nTrap\n");
2792 gdbsig
= TARGET_SIGTRAP
;
2795 qemu_log_mask(CPU_LOG_INT
, "\nNR\n");
2798 EXCP_DUMP(env
, "\nqemu: unhandled CPU exception %#x - aborting\n",
2800 gdbsig
= TARGET_SIGILL
;
2804 gdb_handlesig(cs
, gdbsig
);
2805 if (gdbsig
!= TARGET_SIGTRAP
) {
2810 process_pending_signals(env
);
2814 #endif /* TARGET_OPENRISC */
2817 void cpu_loop(CPUSH4State
*env
)
2819 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2821 target_siginfo_t info
;
2825 trapnr
= cpu_sh4_exec(cs
);
2831 ret
= do_syscall(env
,
2840 if (ret
== -TARGET_ERESTARTSYS
) {
2842 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2843 env
->gregs
[0] = ret
;
2846 case EXCP_INTERRUPT
:
2847 /* just indicate that signals should be handled asap */
2853 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2856 info
.si_signo
= sig
;
2858 info
.si_code
= TARGET_TRAP_BRKPT
;
2859 queue_signal(env
, info
.si_signo
, &info
);
2865 info
.si_signo
= TARGET_SIGSEGV
;
2867 info
.si_code
= TARGET_SEGV_MAPERR
;
2868 info
._sifields
._sigfault
._addr
= env
->tea
;
2869 queue_signal(env
, info
.si_signo
, &info
);
2873 printf ("Unhandled trap: 0x%x\n", trapnr
);
2874 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2877 process_pending_signals (env
);
2883 void cpu_loop(CPUCRISState
*env
)
2885 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2887 target_siginfo_t info
;
2891 trapnr
= cpu_cris_exec(cs
);
2896 info
.si_signo
= TARGET_SIGSEGV
;
2898 /* XXX: check env->error_code */
2899 info
.si_code
= TARGET_SEGV_MAPERR
;
2900 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2901 queue_signal(env
, info
.si_signo
, &info
);
2904 case EXCP_INTERRUPT
:
2905 /* just indicate that signals should be handled asap */
2908 ret
= do_syscall(env
,
2917 if (ret
== -TARGET_ERESTARTSYS
) {
2919 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2920 env
->regs
[10] = ret
;
2927 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2930 info
.si_signo
= sig
;
2932 info
.si_code
= TARGET_TRAP_BRKPT
;
2933 queue_signal(env
, info
.si_signo
, &info
);
2938 printf ("Unhandled trap: 0x%x\n", trapnr
);
2939 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2942 process_pending_signals (env
);
2947 #ifdef TARGET_MICROBLAZE
2948 void cpu_loop(CPUMBState
*env
)
2950 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2952 target_siginfo_t info
;
2956 trapnr
= cpu_mb_exec(cs
);
2961 info
.si_signo
= TARGET_SIGSEGV
;
2963 /* XXX: check env->error_code */
2964 info
.si_code
= TARGET_SEGV_MAPERR
;
2965 info
._sifields
._sigfault
._addr
= 0;
2966 queue_signal(env
, info
.si_signo
, &info
);
2969 case EXCP_INTERRUPT
:
2970 /* just indicate that signals should be handled asap */
2973 /* Return address is 4 bytes after the call. */
2975 env
->sregs
[SR_PC
] = env
->regs
[14];
2976 ret
= do_syscall(env
,
2985 if (ret
== -TARGET_ERESTARTSYS
) {
2986 /* Wind back to before the syscall. */
2987 env
->sregs
[SR_PC
] -= 4;
2988 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2991 /* All syscall exits result in guest r14 being equal to the
2992 * PC we return to, because the kernel syscall exit "rtbd" does
2993 * this. (This is true even for sigreturn(); note that r14 is
2994 * not a userspace-usable register, as the kernel may clobber it
2997 env
->regs
[14] = env
->sregs
[SR_PC
];
3000 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
3001 if (env
->iflags
& D_FLAG
) {
3002 env
->sregs
[SR_ESR
] |= 1 << 12;
3003 env
->sregs
[SR_PC
] -= 4;
3004 /* FIXME: if branch was immed, replay the imm as well. */
3007 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
3009 switch (env
->sregs
[SR_ESR
] & 31) {
3010 case ESR_EC_DIVZERO
:
3011 info
.si_signo
= TARGET_SIGFPE
;
3013 info
.si_code
= TARGET_FPE_FLTDIV
;
3014 info
._sifields
._sigfault
._addr
= 0;
3015 queue_signal(env
, info
.si_signo
, &info
);
3018 info
.si_signo
= TARGET_SIGFPE
;
3020 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
3021 info
.si_code
= TARGET_FPE_FLTINV
;
3023 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
3024 info
.si_code
= TARGET_FPE_FLTDIV
;
3026 info
._sifields
._sigfault
._addr
= 0;
3027 queue_signal(env
, info
.si_signo
, &info
);
3030 printf ("Unhandled hw-exception: 0x%x\n",
3031 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
3032 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3041 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3044 info
.si_signo
= sig
;
3046 info
.si_code
= TARGET_TRAP_BRKPT
;
3047 queue_signal(env
, info
.si_signo
, &info
);
3052 printf ("Unhandled trap: 0x%x\n", trapnr
);
3053 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3056 process_pending_signals (env
);
3063 void cpu_loop(CPUM68KState
*env
)
3065 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
3068 target_siginfo_t info
;
3069 TaskState
*ts
= cs
->opaque
;
3073 trapnr
= cpu_m68k_exec(cs
);
3078 if (ts
->sim_syscalls
) {
3080 get_user_u16(nr
, env
->pc
+ 2);
3082 do_m68k_simcall(env
, nr
);
3088 case EXCP_HALT_INSN
:
3089 /* Semihosing syscall. */
3091 do_m68k_semihosting(env
, env
->dregs
[0]);
3095 case EXCP_UNSUPPORTED
:
3097 info
.si_signo
= TARGET_SIGILL
;
3099 info
.si_code
= TARGET_ILL_ILLOPN
;
3100 info
._sifields
._sigfault
._addr
= env
->pc
;
3101 queue_signal(env
, info
.si_signo
, &info
);
3106 ts
->sim_syscalls
= 0;
3109 ret
= do_syscall(env
,
3118 if (ret
== -TARGET_ERESTARTSYS
) {
3120 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3121 env
->dregs
[0] = ret
;
3125 case EXCP_INTERRUPT
:
3126 /* just indicate that signals should be handled asap */
3130 info
.si_signo
= TARGET_SIGSEGV
;
3132 /* XXX: check env->error_code */
3133 info
.si_code
= TARGET_SEGV_MAPERR
;
3134 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3135 queue_signal(env
, info
.si_signo
, &info
);
3142 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3145 info
.si_signo
= sig
;
3147 info
.si_code
= TARGET_TRAP_BRKPT
;
3148 queue_signal(env
, info
.si_signo
, &info
);
3153 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
3156 process_pending_signals(env
);
3159 #endif /* TARGET_M68K */
3162 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3164 target_ulong addr
, val
, tmp
;
3165 target_siginfo_t info
;
3168 addr
= env
->lock_addr
;
3169 tmp
= env
->lock_st_addr
;
3170 env
->lock_addr
= -1;
3171 env
->lock_st_addr
= 0;
3177 if (quad ?
get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3181 if (val
== env
->lock_value
) {
3183 if (quad ?
put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3200 info
.si_signo
= TARGET_SIGSEGV
;
3202 info
.si_code
= TARGET_SEGV_MAPERR
;
3203 info
._sifields
._sigfault
._addr
= addr
;
3204 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3207 void cpu_loop(CPUAlphaState
*env
)
3209 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3211 target_siginfo_t info
;
3216 trapnr
= cpu_alpha_exec(cs
);