4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
26 #include <sys/syscall.h>
27 #include <sys/resource.h>
30 #include "qemu-common.h"
31 #include "qemu/cache-utils.h"
34 #include "qemu/timer.h"
35 #include "qemu/envlist.h"
45 static const char *cpu_model
;
46 unsigned long mmap_min_addr
;
47 #if defined(CONFIG_USE_GUEST_BASE)
48 unsigned long guest_base
;
50 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
52 * When running 32-on-64 we should make sure we can fit all of the possible
53 * guest address space into a contiguous chunk of virtual host memory.
55 * This way we will never overlap with our own libraries or binaries or stack
56 * or anything else that QEMU maps.
59 /* MIPS only supports 31 bits of virtual address space for user space */
60 unsigned long reserved_va
= 0x77000000;
62 unsigned long reserved_va
= 0xf7000000;
65 unsigned long reserved_va
;
69 static void usage(void);
71 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
72 const char *qemu_uname_release
= CONFIG_UNAME_RELEASE
;
74 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
75 we allocate a bigger stack. Need a better solution, for example
76 by remapping the process stack directly at the right place */
77 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
79 void gemu_log(const char *fmt
, ...)
84 vfprintf(stderr
, fmt
, ap
);
88 #if defined(TARGET_I386)
89 int cpu_get_pic_interrupt(CPUX86State
*env
)
95 /***********************************************************/
96 /* Helper routines for implementing atomic operations. */
98 /* To implement exclusive operations we force all cpus to syncronise.
99 We don't require a full sync, only that no cpus are executing guest code.
100 The alternative is to map target atomic ops onto host equivalents,
101 which requires quite a lot of per host/target work. */
102 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
103 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
104 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
105 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
106 static int pending_cpus
;
108 /* Make sure everything is in a consistent state for calling fork(). */
109 void fork_start(void)
111 pthread_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
112 pthread_mutex_lock(&exclusive_lock
);
116 void fork_end(int child
)
118 mmap_fork_end(child
);
120 CPUState
*cpu
, *next_cpu
;
121 /* Child processes created by fork() only have a single thread.
122 Discard information about the parent threads. */
123 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
124 if (cpu
!= thread_cpu
) {
125 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
129 pthread_mutex_init(&exclusive_lock
, NULL
);
130 pthread_mutex_init(&cpu_list_mutex
, NULL
);
131 pthread_cond_init(&exclusive_cond
, NULL
);
132 pthread_cond_init(&exclusive_resume
, NULL
);
133 pthread_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
, NULL
);
134 gdbserver_fork((CPUArchState
*)thread_cpu
->env_ptr
);
136 pthread_mutex_unlock(&exclusive_lock
);
137 pthread_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
141 /* Wait for pending exclusive operations to complete. The exclusive lock
143 static inline void exclusive_idle(void)
145 while (pending_cpus
) {
146 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
150 /* Start an exclusive operation.
151 Must only be called from outside cpu_arm_exec. */
152 static inline void start_exclusive(void)
156 pthread_mutex_lock(&exclusive_lock
);
160 /* Make all other cpus stop executing. */
161 CPU_FOREACH(other_cpu
) {
162 if (other_cpu
->running
) {
167 if (pending_cpus
> 1) {
168 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
172 /* Finish an exclusive operation. */
173 static inline void end_exclusive(void)
176 pthread_cond_broadcast(&exclusive_resume
);
177 pthread_mutex_unlock(&exclusive_lock
);
180 /* Wait for exclusive ops to finish, and begin cpu execution. */
181 static inline void cpu_exec_start(CPUState
*cpu
)
183 pthread_mutex_lock(&exclusive_lock
);
186 pthread_mutex_unlock(&exclusive_lock
);
189 /* Mark cpu as not executing, and release pending exclusive ops. */
190 static inline void cpu_exec_end(CPUState
*cpu
)
192 pthread_mutex_lock(&exclusive_lock
);
193 cpu
->running
= false;
194 if (pending_cpus
> 1) {
196 if (pending_cpus
== 1) {
197 pthread_cond_signal(&exclusive_cond
);
201 pthread_mutex_unlock(&exclusive_lock
);
204 void cpu_list_lock(void)
206 pthread_mutex_lock(&cpu_list_mutex
);
209 void cpu_list_unlock(void)
211 pthread_mutex_unlock(&cpu_list_mutex
);
216 /***********************************************************/
217 /* CPUX86 core interface */
219 void cpu_smm_update(CPUX86State
*env
)
223 uint64_t cpu_get_tsc(CPUX86State
*env
)
225 return cpu_get_real_ticks();
228 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
233 e1
= (addr
<< 16) | (limit
& 0xffff);
234 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
241 static uint64_t *idt_table
;
243 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
244 uint64_t addr
, unsigned int sel
)
247 e1
= (addr
& 0xffff) | (sel
<< 16);
248 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
252 p
[2] = tswap32(addr
>> 32);
255 /* only dpl matters as we do only user space emulation */
256 static void set_idt(int n
, unsigned int dpl
)
258 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
261 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
262 uint32_t addr
, unsigned int sel
)
265 e1
= (addr
& 0xffff) | (sel
<< 16);
266 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
272 /* only dpl matters as we do only user space emulation */
273 static void set_idt(int n
, unsigned int dpl
)
275 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
279 void cpu_loop(CPUX86State
*env
)
281 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
284 target_siginfo_t info
;
287 trapnr
= cpu_x86_exec(env
);
290 /* linux syscall from int $0x80 */
291 env
->regs
[R_EAX
] = do_syscall(env
,
303 /* linux syscall from syscall instruction */
304 env
->regs
[R_EAX
] = do_syscall(env
,
313 env
->eip
= env
->exception_next_eip
;
318 info
.si_signo
= SIGBUS
;
320 info
.si_code
= TARGET_SI_KERNEL
;
321 info
._sifields
._sigfault
._addr
= 0;
322 queue_signal(env
, info
.si_signo
, &info
);
325 /* XXX: potential problem if ABI32 */
326 #ifndef TARGET_X86_64
327 if (env
->eflags
& VM_MASK
) {
328 handle_vm86_fault(env
);
332 info
.si_signo
= SIGSEGV
;
334 info
.si_code
= TARGET_SI_KERNEL
;
335 info
._sifields
._sigfault
._addr
= 0;
336 queue_signal(env
, info
.si_signo
, &info
);
340 info
.si_signo
= SIGSEGV
;
342 if (!(env
->error_code
& 1))
343 info
.si_code
= TARGET_SEGV_MAPERR
;
345 info
.si_code
= TARGET_SEGV_ACCERR
;
346 info
._sifields
._sigfault
._addr
= env
->cr
[2];
347 queue_signal(env
, info
.si_signo
, &info
);
350 #ifndef TARGET_X86_64
351 if (env
->eflags
& VM_MASK
) {
352 handle_vm86_trap(env
, trapnr
);
356 /* division by zero */
357 info
.si_signo
= SIGFPE
;
359 info
.si_code
= TARGET_FPE_INTDIV
;
360 info
._sifields
._sigfault
._addr
= env
->eip
;
361 queue_signal(env
, info
.si_signo
, &info
);
366 #ifndef TARGET_X86_64
367 if (env
->eflags
& VM_MASK
) {
368 handle_vm86_trap(env
, trapnr
);
372 info
.si_signo
= SIGTRAP
;
374 if (trapnr
== EXCP01_DB
) {
375 info
.si_code
= TARGET_TRAP_BRKPT
;
376 info
._sifields
._sigfault
._addr
= env
->eip
;
378 info
.si_code
= TARGET_SI_KERNEL
;
379 info
._sifields
._sigfault
._addr
= 0;
381 queue_signal(env
, info
.si_signo
, &info
);
386 #ifndef TARGET_X86_64
387 if (env
->eflags
& VM_MASK
) {
388 handle_vm86_trap(env
, trapnr
);
392 info
.si_signo
= SIGSEGV
;
394 info
.si_code
= TARGET_SI_KERNEL
;
395 info
._sifields
._sigfault
._addr
= 0;
396 queue_signal(env
, info
.si_signo
, &info
);
400 info
.si_signo
= SIGILL
;
402 info
.si_code
= TARGET_ILL_ILLOPN
;
403 info
._sifields
._sigfault
._addr
= env
->eip
;
404 queue_signal(env
, info
.si_signo
, &info
);
407 /* just indicate that signals should be handled asap */
413 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
418 info
.si_code
= TARGET_TRAP_BRKPT
;
419 queue_signal(env
, info
.si_signo
, &info
);
424 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
425 fprintf(stderr
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
429 process_pending_signals(env
);
436 #define get_user_code_u32(x, gaddr, doswap) \
437 ({ abi_long __r = get_user_u32((x), (gaddr)); \
438 if (!__r && (doswap)) { \
444 #define get_user_code_u16(x, gaddr, doswap) \
445 ({ abi_long __r = get_user_u16((x), (gaddr)); \
446 if (!__r && (doswap)) { \
453 /* Commpage handling -- there is no commpage for AArch64 */
456 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
458 * r0 = pointer to oldval
459 * r1 = pointer to newval
460 * r2 = pointer to target value
463 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
464 * C set if *ptr was changed, clear if no exchange happened
466 * Note segv's in kernel helpers are a bit tricky, we can set the
467 * data address sensibly but the PC address is just the entry point.
469 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
471 uint64_t oldval
, newval
, val
;
473 target_siginfo_t info
;
475 /* Based on the 32 bit code in do_kernel_trap */
477 /* XXX: This only works between threads, not between processes.
478 It's probably possible to implement this with native host
479 operations. However things like ldrex/strex are much harder so
480 there's not much point trying. */
482 cpsr
= cpsr_read(env
);
485 if (get_user_u64(oldval
, env
->regs
[0])) {
486 env
->cp15
.c6_data
= env
->regs
[0];
490 if (get_user_u64(newval
, env
->regs
[1])) {
491 env
->cp15
.c6_data
= env
->regs
[1];
495 if (get_user_u64(val
, addr
)) {
496 env
->cp15
.c6_data
= addr
;
503 if (put_user_u64(val
, addr
)) {
504 env
->cp15
.c6_data
= addr
;
514 cpsr_write(env
, cpsr
, CPSR_C
);
520 /* We get the PC of the entry address - which is as good as anything,
521 on a real kernel what you get depends on which mode it uses. */
522 info
.si_signo
= SIGSEGV
;
524 /* XXX: check env->error_code */
525 info
.si_code
= TARGET_SEGV_MAPERR
;
526 info
._sifields
._sigfault
._addr
= env
->cp15
.c6_data
;
527 queue_signal(env
, info
.si_signo
, &info
);
532 /* Handle a jump to the kernel code page. */
534 do_kernel_trap(CPUARMState
*env
)
540 switch (env
->regs
[15]) {
541 case 0xffff0fa0: /* __kernel_memory_barrier */
542 /* ??? No-op. Will need to do better for SMP. */
544 case 0xffff0fc0: /* __kernel_cmpxchg */
545 /* XXX: This only works between threads, not between processes.
546 It's probably possible to implement this with native host
547 operations. However things like ldrex/strex are much harder so
548 there's not much point trying. */
550 cpsr
= cpsr_read(env
);
552 /* FIXME: This should SEGV if the access fails. */
553 if (get_user_u32(val
, addr
))
555 if (val
== env
->regs
[0]) {
557 /* FIXME: Check for segfaults. */
558 put_user_u32(val
, addr
);
565 cpsr_write(env
, cpsr
, CPSR_C
);
568 case 0xffff0fe0: /* __kernel_get_tls */
569 env
->regs
[0] = env
->cp15
.tpidrro_el0
;
571 case 0xffff0f60: /* __kernel_cmpxchg64 */
572 arm_kernel_cmpxchg64_helper(env
);
578 /* Jump back to the caller. */
579 addr
= env
->regs
[14];
584 env
->regs
[15] = addr
;
589 /* Store exclusive handling for AArch32 */
590 static int do_strex(CPUARMState
*env
)
598 if (env
->exclusive_addr
!= env
->exclusive_test
) {
601 /* We know we're always AArch32 so the address is in uint32_t range
602 * unless it was the -1 exclusive-monitor-lost value (which won't
603 * match exclusive_test above).
605 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
606 addr
= env
->exclusive_addr
;
607 size
= env
->exclusive_info
& 0xf;
610 segv
= get_user_u8(val
, addr
);
613 segv
= get_user_u16(val
, addr
);
617 segv
= get_user_u32(val
, addr
);
623 env
->cp15
.c6_data
= addr
;
628 segv
= get_user_u32(valhi
, addr
+ 4);
630 env
->cp15
.c6_data
= addr
+ 4;
633 val
= deposit64(val
, 32, 32, valhi
);
635 if (val
!= env
->exclusive_val
) {
639 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
642 segv
= put_user_u8(val
, addr
);
645 segv
= put_user_u16(val
, addr
);
649 segv
= put_user_u32(val
, addr
);
653 env
->cp15
.c6_data
= addr
;
657 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
658 segv
= put_user_u32(val
, addr
+ 4);
660 env
->cp15
.c6_data
= addr
+ 4;
667 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
673 void cpu_loop(CPUARMState
*env
)
675 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
677 unsigned int n
, insn
;
678 target_siginfo_t info
;
683 trapnr
= cpu_arm_exec(env
);
688 TaskState
*ts
= cs
->opaque
;
692 /* we handle the FPU emulation here, as Linux */
693 /* we get the opcode */
694 /* FIXME - what to do if get_user() fails? */
695 get_user_code_u32(opcode
, env
->regs
[15], env
->bswap_code
);
697 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
698 if (rc
== 0) { /* illegal instruction */
699 info
.si_signo
= SIGILL
;
701 info
.si_code
= TARGET_ILL_ILLOPN
;
702 info
._sifields
._sigfault
._addr
= env
->regs
[15];
703 queue_signal(env
, info
.si_signo
, &info
);
704 } else if (rc
< 0) { /* FP exception */
707 /* translate softfloat flags to FPSR flags */
708 if (-rc
& float_flag_invalid
)
710 if (-rc
& float_flag_divbyzero
)
712 if (-rc
& float_flag_overflow
)
714 if (-rc
& float_flag_underflow
)
716 if (-rc
& float_flag_inexact
)
719 FPSR fpsr
= ts
->fpa
.fpsr
;
720 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
722 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
723 info
.si_signo
= SIGFPE
;
726 /* ordered by priority, least first */
727 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
728 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
729 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
730 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
731 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
733 info
._sifields
._sigfault
._addr
= env
->regs
[15];
734 queue_signal(env
, info
.si_signo
, &info
);
739 /* accumulate unenabled exceptions */
740 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
742 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
744 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
746 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
748 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
751 } else { /* everything OK */
762 if (trapnr
== EXCP_BKPT
) {
764 /* FIXME - what to do if get_user() fails? */
765 get_user_code_u16(insn
, env
->regs
[15], env
->bswap_code
);
769 /* FIXME - what to do if get_user() fails? */
770 get_user_code_u32(insn
, env
->regs
[15], env
->bswap_code
);
771 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
776 /* FIXME - what to do if get_user() fails? */
777 get_user_code_u16(insn
, env
->regs
[15] - 2,
781 /* FIXME - what to do if get_user() fails? */
782 get_user_code_u32(insn
, env
->regs
[15] - 4,
788 if (n
== ARM_NR_cacheflush
) {
790 } else if (n
== ARM_NR_semihosting
791 || n
== ARM_NR_thumb_semihosting
) {
792 env
->regs
[0] = do_arm_semihosting (env
);
793 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
795 if (env
->thumb
|| n
== 0) {
798 n
-= ARM_SYSCALL_BASE
;
801 if ( n
> ARM_NR_BASE
) {
803 case ARM_NR_cacheflush
:
807 cpu_set_tls(env
, env
->regs
[0]);
811 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
813 env
->regs
[0] = -TARGET_ENOSYS
;
817 env
->regs
[0] = do_syscall(env
,
833 /* just indicate that signals should be handled asap */
835 case EXCP_PREFETCH_ABORT
:
836 addr
= env
->cp15
.c6_insn
;
838 case EXCP_DATA_ABORT
:
839 addr
= env
->cp15
.c6_data
;
842 info
.si_signo
= SIGSEGV
;
844 /* XXX: check env->error_code */
845 info
.si_code
= TARGET_SEGV_MAPERR
;
846 info
._sifields
._sigfault
._addr
= addr
;
847 queue_signal(env
, info
.si_signo
, &info
);
854 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
859 info
.si_code
= TARGET_TRAP_BRKPT
;
860 queue_signal(env
, info
.si_signo
, &info
);
864 case EXCP_KERNEL_TRAP
:
865 if (do_kernel_trap(env
))
870 addr
= env
->cp15
.c6_data
;
876 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
878 cpu_dump_state(cs
, stderr
, fprintf
, 0);
881 process_pending_signals(env
);
888 * Handle AArch64 store-release exclusive
890 * rs = gets the status result of store exclusive
891 * rt = is the register that is stored
892 * rt2 = is the second register store (in STP)
895 static int do_strex_a64(CPUARMState
*env
)
906 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
907 size
= extract32(env
->exclusive_info
, 0, 2);
908 is_pair
= extract32(env
->exclusive_info
, 2, 1);
909 rs
= extract32(env
->exclusive_info
, 4, 5);
910 rt
= extract32(env
->exclusive_info
, 9, 5);
911 rt2
= extract32(env
->exclusive_info
, 14, 5);
913 addr
= env
->exclusive_addr
;
915 if (addr
!= env
->exclusive_test
) {
921 segv
= get_user_u8(val
, addr
);
924 segv
= get_user_u16(val
, addr
);
927 segv
= get_user_u32(val
, addr
);
930 segv
= get_user_u64(val
, addr
);
936 env
->cp15
.c6_data
= addr
;
939 if (val
!= env
->exclusive_val
) {
944 segv
= get_user_u32(val
, addr
+ 4);
946 segv
= get_user_u64(val
, addr
+ 8);
949 env
->cp15
.c6_data
= addr
+ (size
== 2 ?
4 : 8);
952 if (val
!= env
->exclusive_high
) {
956 /* handle the zero register */
957 val
= rt
== 31 ?
0 : env
->xregs
[rt
];
960 segv
= put_user_u8(val
, addr
);
963 segv
= put_user_u16(val
, addr
);
966 segv
= put_user_u32(val
, addr
);
969 segv
= put_user_u64(val
, addr
);
976 /* handle the zero register */
977 val
= rt2
== 31 ?
0 : env
->xregs
[rt2
];
979 segv
= put_user_u32(val
, addr
+ 4);
981 segv
= put_user_u64(val
, addr
+ 8);
984 env
->cp15
.c6_data
= addr
+ (size
== 2 ?
4 : 8);
991 /* rs == 31 encodes a write to the ZR, thus throwing away
992 * the status return. This is rather silly but valid.
998 /* instruction faulted, PC does not advance */
999 /* either way a strex releases any exclusive lock we have */
1000 env
->exclusive_addr
= -1;
1005 /* AArch64 main loop */
1006 void cpu_loop(CPUARMState
*env
)
1008 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1010 target_siginfo_t info
;
1015 trapnr
= cpu_arm_exec(env
);
1020 env
->xregs
[0] = do_syscall(env
,
1030 case EXCP_INTERRUPT
:
1031 /* just indicate that signals should be handled asap */
1034 info
.si_signo
= SIGILL
;
1036 info
.si_code
= TARGET_ILL_ILLOPN
;
1037 info
._sifields
._sigfault
._addr
= env
->pc
;
1038 queue_signal(env
, info
.si_signo
, &info
);
1040 case EXCP_PREFETCH_ABORT
:
1041 addr
= env
->cp15
.c6_insn
;
1043 case EXCP_DATA_ABORT
:
1044 addr
= env
->cp15
.c6_data
;
1046 info
.si_signo
= SIGSEGV
;
1048 /* XXX: check env->error_code */
1049 info
.si_code
= TARGET_SEGV_MAPERR
;
1050 info
._sifields
._sigfault
._addr
= addr
;
1051 queue_signal(env
, info
.si_signo
, &info
);
1055 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1057 info
.si_signo
= sig
;
1059 info
.si_code
= TARGET_TRAP_BRKPT
;
1060 queue_signal(env
, info
.si_signo
, &info
);
1064 if (do_strex_a64(env
)) {
1065 addr
= env
->cp15
.c6_data
;
1070 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
1072 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1075 process_pending_signals(env
);
1076 /* Exception return on AArch64 always clears the exclusive monitor,
1077 * so any return to running guest code implies this.
1078 * A strex (successful or otherwise) also clears the monitor, so
1079 * we don't need to specialcase EXCP_STREX.
1081 env
->exclusive_addr
= -1;
1084 #endif /* ndef TARGET_ABI32 */
1088 #ifdef TARGET_UNICORE32
1090 void cpu_loop(CPUUniCore32State
*env
)
1092 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1094 unsigned int n
, insn
;
1095 target_siginfo_t info
;
1099 trapnr
= uc32_cpu_exec(env
);
1102 case UC32_EXCP_PRIV
:
1105 get_user_u32(insn
, env
->regs
[31] - 4);
1106 n
= insn
& 0xffffff;
1108 if (n
>= UC32_SYSCALL_BASE
) {
1110 n
-= UC32_SYSCALL_BASE
;
1111 if (n
== UC32_SYSCALL_NR_set_tls
) {
1112 cpu_set_tls(env
, env
->regs
[0]);
1115 env
->regs
[0] = do_syscall(env
,
1130 case UC32_EXCP_DTRAP
:
1131 case UC32_EXCP_ITRAP
:
1132 info
.si_signo
= SIGSEGV
;
1134 /* XXX: check env->error_code */
1135 info
.si_code
= TARGET_SEGV_MAPERR
;
1136 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1137 queue_signal(env
, info
.si_signo
, &info
);
1139 case EXCP_INTERRUPT
:
1140 /* just indicate that signals should be handled asap */
1146 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1148 info
.si_signo
= sig
;
1150 info
.si_code
= TARGET_TRAP_BRKPT
;
1151 queue_signal(env
, info
.si_signo
, &info
);
1158 process_pending_signals(env
);
1162 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1163 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1169 #define SPARC64_STACK_BIAS 2047
1173 /* WARNING: dealing with register windows _is_ complicated. More info
1174 can be found at http://www.sics.se/~psm/sparcstack.html */
1175 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1177 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1178 /* wrap handling : if cwp is on the last window, then we use the
1179 registers 'after' the end */
1180 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1181 index
+= 16 * env
->nwindows
;
1185 /* save the register window 'cwp1' */
1186 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1191 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1192 #ifdef TARGET_SPARC64
1194 sp_ptr
+= SPARC64_STACK_BIAS
;
1196 #if defined(DEBUG_WIN)
1197 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1200 for(i
= 0; i
< 16; i
++) {
1201 /* FIXME - what to do if put_user() fails? */
1202 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1203 sp_ptr
+= sizeof(abi_ulong
);
1207 static void save_window(CPUSPARCState
*env
)
1209 #ifndef TARGET_SPARC64
1210 unsigned int new_wim
;
1211 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1212 ((1LL << env
->nwindows
) - 1);
1213 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1216 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1222 static void restore_window(CPUSPARCState
*env
)
1224 #ifndef TARGET_SPARC64
1225 unsigned int new_wim
;
1227 unsigned int i
, cwp1
;
1230 #ifndef TARGET_SPARC64
1231 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1232 ((1LL << env
->nwindows
) - 1);
1235 /* restore the invalid window */
1236 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1237 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1238 #ifdef TARGET_SPARC64
1240 sp_ptr
+= SPARC64_STACK_BIAS
;
1242 #if defined(DEBUG_WIN)
1243 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1246 for(i
= 0; i
< 16; i
++) {
1247 /* FIXME - what to do if get_user() fails? */
1248 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1249 sp_ptr
+= sizeof(abi_ulong
);
1251 #ifdef TARGET_SPARC64
1253 if (env
->cleanwin
< env
->nwindows
- 1)
1261 static void flush_windows(CPUSPARCState
*env
)
1267 /* if restore would invoke restore_window(), then we can stop */
1268 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1269 #ifndef TARGET_SPARC64
1270 if (env
->wim
& (1 << cwp1
))
1273 if (env
->canrestore
== 0)
1278 save_window_offset(env
, cwp1
);
1281 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1282 #ifndef TARGET_SPARC64
1283 /* set wim so that restore will reload the registers */
1284 env
->wim
= 1 << cwp1
;
1286 #if defined(DEBUG_WIN)
1287 printf("flush_windows: nb=%d\n", offset
- 1);
1291 void cpu_loop (CPUSPARCState
*env
)
1293 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1296 target_siginfo_t info
;
1299 trapnr
= cpu_sparc_exec (env
);
1301 /* Compute PSR before exposing state. */
1302 if (env
->cc_op
!= CC_OP_FLAGS
) {
1307 #ifndef TARGET_SPARC64
1314 ret
= do_syscall (env
, env
->gregs
[1],
1315 env
->regwptr
[0], env
->regwptr
[1],
1316 env
->regwptr
[2], env
->regwptr
[3],
1317 env
->regwptr
[4], env
->regwptr
[5],
1319 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1320 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1321 env
->xcc
|= PSR_CARRY
;
1323 env
->psr
|= PSR_CARRY
;
1327 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1328 env
->xcc
&= ~PSR_CARRY
;
1330 env
->psr
&= ~PSR_CARRY
;
1333 env
->regwptr
[0] = ret
;
1334 /* next instruction */
1336 env
->npc
= env
->npc
+ 4;
1338 case 0x83: /* flush windows */
1343 /* next instruction */
1345 env
->npc
= env
->npc
+ 4;
1347 #ifndef TARGET_SPARC64
1348 case TT_WIN_OVF
: /* window overflow */
1351 case TT_WIN_UNF
: /* window underflow */
1352 restore_window(env
);
1357 info
.si_signo
= TARGET_SIGSEGV
;
1359 /* XXX: check env->error_code */
1360 info
.si_code
= TARGET_SEGV_MAPERR
;
1361 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1362 queue_signal(env
, info
.si_signo
, &info
);
1366 case TT_SPILL
: /* window overflow */
1369 case TT_FILL
: /* window underflow */
1370 restore_window(env
);
1375 info
.si_signo
= TARGET_SIGSEGV
;
1377 /* XXX: check env->error_code */
1378 info
.si_code
= TARGET_SEGV_MAPERR
;
1379 if (trapnr
== TT_DFAULT
)
1380 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1382 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1383 queue_signal(env
, info
.si_signo
, &info
);
1386 #ifndef TARGET_ABI32
1389 sparc64_get_context(env
);
1393 sparc64_set_context(env
);
1397 case EXCP_INTERRUPT
:
1398 /* just indicate that signals should be handled asap */
1402 info
.si_signo
= TARGET_SIGILL
;
1404 info
.si_code
= TARGET_ILL_ILLOPC
;
1405 info
._sifields
._sigfault
._addr
= env
->pc
;
1406 queue_signal(env
, info
.si_signo
, &info
);
1413 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1416 info
.si_signo
= sig
;
1418 info
.si_code
= TARGET_TRAP_BRKPT
;
1419 queue_signal(env
, info
.si_signo
, &info
);
1424 printf ("Unhandled trap: 0x%x\n", trapnr
);
1425 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1428 process_pending_signals (env
);
1435 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1441 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1443 return cpu_ppc_get_tb(env
);
1446 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1448 return cpu_ppc_get_tb(env
) >> 32;
1451 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1453 return cpu_ppc_get_tb(env
);
1456 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1458 return cpu_ppc_get_tb(env
) >> 32;
1461 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1462 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1464 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1466 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1469 /* XXX: to be fixed */
1470 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1475 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1480 #define EXCP_DUMP(env, fmt, ...) \
1482 CPUState *cs = ENV_GET_CPU(env); \
1483 fprintf(stderr, fmt , ## __VA_ARGS__); \
1484 cpu_dump_state(cs, stderr, fprintf, 0); \
1485 qemu_log(fmt, ## __VA_ARGS__); \
1486 if (qemu_log_enabled()) { \
1487 log_cpu_state(cs, 0); \
1491 static int do_store_exclusive(CPUPPCState
*env
)
1494 target_ulong page_addr
;
1495 target_ulong val
, val2
__attribute__((unused
));
1499 addr
= env
->reserve_ea
;
1500 page_addr
= addr
& TARGET_PAGE_MASK
;
1503 flags
= page_get_flags(page_addr
);
1504 if ((flags
& PAGE_READ
) == 0) {
1507 int reg
= env
->reserve_info
& 0x1f;
1508 int size
= (env
->reserve_info
>> 5) & 0xf;
1511 if (addr
== env
->reserve_addr
) {
1513 case 1: segv
= get_user_u8(val
, addr
); break;
1514 case 2: segv
= get_user_u16(val
, addr
); break;
1515 case 4: segv
= get_user_u32(val
, addr
); break;
1516 #if defined(TARGET_PPC64)
1517 case 8: segv
= get_user_u64(val
, addr
); break;
1519 segv
= get_user_u64(val
, addr
);
1521 segv
= get_user_u64(val2
, addr
+ 8);
1528 if (!segv
&& val
== env
->reserve_val
) {
1529 val
= env
->gpr
[reg
];
1531 case 1: segv
= put_user_u8(val
, addr
); break;
1532 case 2: segv
= put_user_u16(val
, addr
); break;
1533 case 4: segv
= put_user_u32(val
, addr
); break;
1534 #if defined(TARGET_PPC64)
1535 case 8: segv
= put_user_u64(val
, addr
); break;
1537 if (val2
== env
->reserve_val2
) {
1538 segv
= put_user_u64(val
, addr
);
1540 segv
= put_user_u64(val2
, addr
+ 8);
1553 env
->crf
[0] = (stored
<< 1) | xer_so
;
1554 env
->reserve_addr
= (target_ulong
)-1;
1564 void cpu_loop(CPUPPCState
*env
)
1566 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1567 target_siginfo_t info
;
1573 trapnr
= cpu_ppc_exec(env
);
1576 case POWERPC_EXCP_NONE
:
1579 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1580 cpu_abort(env
, "Critical interrupt while in user mode. "
1583 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1584 cpu_abort(env
, "Machine check exception while in user mode. "
1587 case POWERPC_EXCP_DSI
: /* Data storage exception */
1588 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1590 /* XXX: check this. Seems bugged */
1591 switch (env
->error_code
& 0xFF000000) {
1593 info
.si_signo
= TARGET_SIGSEGV
;
1595 info
.si_code
= TARGET_SEGV_MAPERR
;
1598 info
.si_signo
= TARGET_SIGILL
;
1600 info
.si_code
= TARGET_ILL_ILLADR
;
1603 info
.si_signo
= TARGET_SIGSEGV
;
1605 info
.si_code
= TARGET_SEGV_ACCERR
;
1608 /* Let's send a regular segfault... */
1609 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1611 info
.si_signo
= TARGET_SIGSEGV
;
1613 info
.si_code
= TARGET_SEGV_MAPERR
;
1616 info
._sifields
._sigfault
._addr
= env
->nip
;
1617 queue_signal(env
, info
.si_signo
, &info
);
1619 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1620 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1621 "\n", env
->spr
[SPR_SRR0
]);
1622 /* XXX: check this */
1623 switch (env
->error_code
& 0xFF000000) {
1625 info
.si_signo
= TARGET_SIGSEGV
;
1627 info
.si_code
= TARGET_SEGV_MAPERR
;
1631 info
.si_signo
= TARGET_SIGSEGV
;
1633 info
.si_code
= TARGET_SEGV_ACCERR
;
1636 /* Let's send a regular segfault... */
1637 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1639 info
.si_signo
= TARGET_SIGSEGV
;
1641 info
.si_code
= TARGET_SEGV_MAPERR
;
1644 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1645 queue_signal(env
, info
.si_signo
, &info
);
1647 case POWERPC_EXCP_EXTERNAL
: /* External input */
1648 cpu_abort(env
, "External interrupt while in user mode. "
1651 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1652 EXCP_DUMP(env
, "Unaligned memory access\n");
1653 /* XXX: check this */
1654 info
.si_signo
= TARGET_SIGBUS
;
1656 info
.si_code
= TARGET_BUS_ADRALN
;
1657 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1658 queue_signal(env
, info
.si_signo
, &info
);
1660 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1661 /* XXX: check this */
1662 switch (env
->error_code
& ~0xF) {
1663 case POWERPC_EXCP_FP
:
1664 EXCP_DUMP(env
, "Floating point program exception\n");
1665 info
.si_signo
= TARGET_SIGFPE
;
1667 switch (env
->error_code
& 0xF) {
1668 case POWERPC_EXCP_FP_OX
:
1669 info
.si_code
= TARGET_FPE_FLTOVF
;
1671 case POWERPC_EXCP_FP_UX
:
1672 info
.si_code
= TARGET_FPE_FLTUND
;
1674 case POWERPC_EXCP_FP_ZX
:
1675 case POWERPC_EXCP_FP_VXZDZ
:
1676 info
.si_code
= TARGET_FPE_FLTDIV
;
1678 case POWERPC_EXCP_FP_XX
:
1679 info
.si_code
= TARGET_FPE_FLTRES
;
1681 case POWERPC_EXCP_FP_VXSOFT
:
1682 info
.si_code
= TARGET_FPE_FLTINV
;
1684 case POWERPC_EXCP_FP_VXSNAN
:
1685 case POWERPC_EXCP_FP_VXISI
:
1686 case POWERPC_EXCP_FP_VXIDI
:
1687 case POWERPC_EXCP_FP_VXIMZ
:
1688 case POWERPC_EXCP_FP_VXVC
:
1689 case POWERPC_EXCP_FP_VXSQRT
:
1690 case POWERPC_EXCP_FP_VXCVI
:
1691 info
.si_code
= TARGET_FPE_FLTSUB
;
1694 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1699 case POWERPC_EXCP_INVAL
:
1700 EXCP_DUMP(env
, "Invalid instruction\n");
1701 info
.si_signo
= TARGET_SIGILL
;
1703 switch (env
->error_code
& 0xF) {
1704 case POWERPC_EXCP_INVAL_INVAL
:
1705 info
.si_code
= TARGET_ILL_ILLOPC
;
1707 case POWERPC_EXCP_INVAL_LSWX
:
1708 info
.si_code
= TARGET_ILL_ILLOPN
;
1710 case POWERPC_EXCP_INVAL_SPR
:
1711 info
.si_code
= TARGET_ILL_PRVREG
;
1713 case POWERPC_EXCP_INVAL_FP
:
1714 info
.si_code
= TARGET_ILL_COPROC
;
1717 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1718 env
->error_code
& 0xF);
1719 info
.si_code
= TARGET_ILL_ILLADR
;
1723 case POWERPC_EXCP_PRIV
:
1724 EXCP_DUMP(env
, "Privilege violation\n");
1725 info
.si_signo
= TARGET_SIGILL
;
1727 switch (env
->error_code
& 0xF) {
1728 case POWERPC_EXCP_PRIV_OPC
:
1729 info
.si_code
= TARGET_ILL_PRVOPC
;
1731 case POWERPC_EXCP_PRIV_REG
:
1732 info
.si_code
= TARGET_ILL_PRVREG
;
1735 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1736 env
->error_code
& 0xF);
1737 info
.si_code
= TARGET_ILL_PRVOPC
;
1741 case POWERPC_EXCP_TRAP
:
1742 cpu_abort(env
, "Tried to call a TRAP\n");
1745 /* Should not happen ! */
1746 cpu_abort(env
, "Unknown program exception (%02x)\n",
1750 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1751 queue_signal(env
, info
.si_signo
, &info
);
1753 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1754 EXCP_DUMP(env
, "No floating point allowed\n");
1755 info
.si_signo
= TARGET_SIGILL
;
1757 info
.si_code
= TARGET_ILL_COPROC
;
1758 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1759 queue_signal(env
, info
.si_signo
, &info
);
1761 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1762 cpu_abort(env
, "Syscall exception while in user mode. "
1765 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1766 EXCP_DUMP(env
, "No APU instruction allowed\n");
1767 info
.si_signo
= TARGET_SIGILL
;
1769 info
.si_code
= TARGET_ILL_COPROC
;
1770 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1771 queue_signal(env
, info
.si_signo
, &info
);
1773 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1774 cpu_abort(env
, "Decrementer interrupt while in user mode. "
1777 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1778 cpu_abort(env
, "Fix interval timer interrupt while in user mode. "
1781 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1782 cpu_abort(env
, "Watchdog timer interrupt while in user mode. "
1785 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1786 cpu_abort(env
, "Data TLB exception while in user mode. "
1789 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1790 cpu_abort(env
, "Instruction TLB exception while in user mode. "
1793 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1794 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1795 info
.si_signo
= TARGET_SIGILL
;
1797 info
.si_code
= TARGET_ILL_COPROC
;
1798 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1799 queue_signal(env
, info
.si_signo
, &info
);
1801 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1802 cpu_abort(env
, "Embedded floating-point data IRQ not handled\n");
1804 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1805 cpu_abort(env
, "Embedded floating-point round IRQ not handled\n");
1807 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1808 cpu_abort(env
, "Performance monitor exception not handled\n");
1810 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1811 cpu_abort(env
, "Doorbell interrupt while in user mode. "
1814 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1815 cpu_abort(env
, "Doorbell critical interrupt while in user mode. "
1818 case POWERPC_EXCP_RESET
: /* System reset exception */
1819 cpu_abort(env
, "Reset interrupt while in user mode. "
1822 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1823 cpu_abort(env
, "Data segment exception while in user mode. "
1826 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1827 cpu_abort(env
, "Instruction segment exception "
1828 "while in user mode. Aborting\n");
1830 /* PowerPC 64 with hypervisor mode support */
1831 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1832 cpu_abort(env
, "Hypervisor decrementer interrupt "
1833 "while in user mode. Aborting\n");
1835 case POWERPC_EXCP_TRACE
: /* Trace exception */
1837 * we use this exception to emulate step-by-step execution mode.
1840 /* PowerPC 64 with hypervisor mode support */
1841 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1842 cpu_abort(env
, "Hypervisor data storage exception "
1843 "while in user mode. Aborting\n");
1845 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1846 cpu_abort(env
, "Hypervisor instruction storage exception "
1847 "while in user mode. Aborting\n");
1849 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1850 cpu_abort(env
, "Hypervisor data segment exception "
1851 "while in user mode. Aborting\n");
1853 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1854 cpu_abort(env
, "Hypervisor instruction segment exception "
1855 "while in user mode. Aborting\n");
1857 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1858 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1859 info
.si_signo
= TARGET_SIGILL
;
1861 info
.si_code
= TARGET_ILL_COPROC
;
1862 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1863 queue_signal(env
, info
.si_signo
, &info
);
1865 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1866 cpu_abort(env
, "Programmable interval timer interrupt "
1867 "while in user mode. Aborting\n");
1869 case POWERPC_EXCP_IO
: /* IO error exception */
1870 cpu_abort(env
, "IO error exception while in user mode. "
1873 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1874 cpu_abort(env
, "Run mode exception while in user mode. "
1877 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1878 cpu_abort(env
, "Emulation trap exception not handled\n");
1880 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1881 cpu_abort(env
, "Instruction fetch TLB exception "
1882 "while in user-mode. Aborting");
1884 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1885 cpu_abort(env
, "Data load TLB exception while in user-mode. "
1888 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1889 cpu_abort(env
, "Data store TLB exception while in user-mode. "
1892 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1893 cpu_abort(env
, "Floating-point assist exception not handled\n");
1895 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1896 cpu_abort(env
, "Instruction address breakpoint exception "
1899 case POWERPC_EXCP_SMI
: /* System management interrupt */
1900 cpu_abort(env
, "System management interrupt while in user mode. "
1903 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1904 cpu_abort(env
, "Thermal interrupt interrupt while in user mode. "
1907 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1908 cpu_abort(env
, "Performance monitor exception not handled\n");
1910 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1911 cpu_abort(env
, "Vector assist exception not handled\n");
1913 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1914 cpu_abort(env
, "Soft patch exception not handled\n");
1916 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1917 cpu_abort(env
, "Maintenance exception while in user mode. "
1920 case POWERPC_EXCP_STOP
: /* stop translation */
1921 /* We did invalidate the instruction cache. Go on */
1923 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1924 /* We just stopped because of a branch. Go on */
1926 case POWERPC_EXCP_SYSCALL_USER
:
1927 /* system call in user-mode emulation */
1929 * PPC ABI uses overflow flag in cr0 to signal an error
1932 env
->crf
[0] &= ~0x1;
1933 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1934 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1936 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1937 /* Returning from a successful sigreturn syscall.
1938 Avoid corrupting register state. */
1941 if (ret
> (target_ulong
)(-515)) {
1947 case POWERPC_EXCP_STCX
:
1948 if (do_store_exclusive(env
)) {
1949 info
.si_signo
= TARGET_SIGSEGV
;
1951 info
.si_code
= TARGET_SEGV_MAPERR
;
1952 info
._sifields
._sigfault
._addr
= env
->nip
;
1953 queue_signal(env
, info
.si_signo
, &info
);
1960 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1962 info
.si_signo
= sig
;
1964 info
.si_code
= TARGET_TRAP_BRKPT
;
1965 queue_signal(env
, info
.si_signo
, &info
);
1969 case EXCP_INTERRUPT
:
1970 /* just indicate that signals should be handled asap */
1973 cpu_abort(env
, "Unknown exception 0x%d. Aborting\n", trapnr
);
1976 process_pending_signals(env
);
1983 # ifdef TARGET_ABI_MIPSO32
1984 # define MIPS_SYS(name, args) args,
1985 static const uint8_t mips_syscall_args
[] = {
1986 MIPS_SYS(sys_syscall
, 8) /* 4000 */
1987 MIPS_SYS(sys_exit
, 1)
1988 MIPS_SYS(sys_fork
, 0)
1989 MIPS_SYS(sys_read
, 3)
1990 MIPS_SYS(sys_write
, 3)
1991 MIPS_SYS(sys_open
, 3) /* 4005 */
1992 MIPS_SYS(sys_close
, 1)
1993 MIPS_SYS(sys_waitpid
, 3)
1994 MIPS_SYS(sys_creat
, 2)
1995 MIPS_SYS(sys_link
, 2)
1996 MIPS_SYS(sys_unlink
, 1) /* 4010 */
1997 MIPS_SYS(sys_execve
, 0)
1998 MIPS_SYS(sys_chdir
, 1)
1999 MIPS_SYS(sys_time
, 1)
2000 MIPS_SYS(sys_mknod
, 3)
2001 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2002 MIPS_SYS(sys_lchown
, 3)
2003 MIPS_SYS(sys_ni_syscall
, 0)
2004 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2005 MIPS_SYS(sys_lseek
, 3)
2006 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2007 MIPS_SYS(sys_mount
, 5)
2008 MIPS_SYS(sys_umount
, 1)
2009 MIPS_SYS(sys_setuid
, 1)
2010 MIPS_SYS(sys_getuid
, 0)
2011 MIPS_SYS(sys_stime
, 1) /* 4025 */
2012 MIPS_SYS(sys_ptrace
, 4)
2013 MIPS_SYS(sys_alarm
, 1)
2014 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2015 MIPS_SYS(sys_pause
, 0)
2016 MIPS_SYS(sys_utime
, 2) /* 4030 */
2017 MIPS_SYS(sys_ni_syscall
, 0)
2018 MIPS_SYS(sys_ni_syscall
, 0)
2019 MIPS_SYS(sys_access
, 2)
2020 MIPS_SYS(sys_nice
, 1)
2021 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2022 MIPS_SYS(sys_sync
, 0)
2023 MIPS_SYS(sys_kill
, 2)
2024 MIPS_SYS(sys_rename
, 2)
2025 MIPS_SYS(sys_mkdir
, 2)
2026 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2027 MIPS_SYS(sys_dup
, 1)
2028 MIPS_SYS(sys_pipe
, 0)
2029 MIPS_SYS(sys_times
, 1)
2030 MIPS_SYS(sys_ni_syscall
, 0)
2031 MIPS_SYS(sys_brk
, 1) /* 4045 */
2032 MIPS_SYS(sys_setgid
, 1)
2033 MIPS_SYS(sys_getgid
, 0)
2034 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2035 MIPS_SYS(sys_geteuid
, 0)
2036 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2037 MIPS_SYS(sys_acct
, 0)
2038 MIPS_SYS(sys_umount2
, 2)
2039 MIPS_SYS(sys_ni_syscall
, 0)
2040 MIPS_SYS(sys_ioctl
, 3)
2041 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2042 MIPS_SYS(sys_ni_syscall
, 2)
2043 MIPS_SYS(sys_setpgid
, 2)
2044 MIPS_SYS(sys_ni_syscall
, 0)
2045 MIPS_SYS(sys_olduname
, 1)
2046 MIPS_SYS(sys_umask
, 1) /* 4060 */
2047 MIPS_SYS(sys_chroot
, 1)
2048 MIPS_SYS(sys_ustat
, 2)
2049 MIPS_SYS(sys_dup2
, 2)
2050 MIPS_SYS(sys_getppid
, 0)
2051 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2052 MIPS_SYS(sys_setsid
, 0)
2053 MIPS_SYS(sys_sigaction
, 3)
2054 MIPS_SYS(sys_sgetmask
, 0)
2055 MIPS_SYS(sys_ssetmask
, 1)
2056 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2057 MIPS_SYS(sys_setregid
, 2)
2058 MIPS_SYS(sys_sigsuspend
, 0)
2059 MIPS_SYS(sys_sigpending
, 1)
2060 MIPS_SYS(sys_sethostname
, 2)
2061 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2062 MIPS_SYS(sys_getrlimit
, 2)
2063 MIPS_SYS(sys_getrusage
, 2)
2064 MIPS_SYS(sys_gettimeofday
, 2)
2065 MIPS_SYS(sys_settimeofday
, 2)
2066 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2067 MIPS_SYS(sys_setgroups
, 2)
2068 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2069 MIPS_SYS(sys_symlink
, 2)
2070 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2071 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2072 MIPS_SYS(sys_uselib
, 1)
2073 MIPS_SYS(sys_swapon
, 2)
2074 MIPS_SYS(sys_reboot
, 3)
2075 MIPS_SYS(old_readdir
, 3)
2076 MIPS_SYS(old_mmap
, 6) /* 4090 */
2077 MIPS_SYS(sys_munmap
, 2)
2078 MIPS_SYS(sys_truncate
, 2)
2079 MIPS_SYS(sys_ftruncate
, 2)
2080 MIPS_SYS(sys_fchmod
, 2)
2081 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2082 MIPS_SYS(sys_getpriority
, 2)
2083 MIPS_SYS(sys_setpriority
, 3)
2084 MIPS_SYS(sys_ni_syscall
, 0)
2085 MIPS_SYS(sys_statfs
, 2)
2086 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2087 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2088 MIPS_SYS(sys_socketcall
, 2)
2089 MIPS_SYS(sys_syslog
, 3)
2090 MIPS_SYS(sys_setitimer
, 3)
2091 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2092 MIPS_SYS(sys_newstat
, 2)
2093 MIPS_SYS(sys_newlstat
, 2)
2094 MIPS_SYS(sys_newfstat
, 2)
2095 MIPS_SYS(sys_uname
, 1)
2096 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2097 MIPS_SYS(sys_vhangup
, 0)
2098 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2099 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2100 MIPS_SYS(sys_wait4
, 4)
2101 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2102 MIPS_SYS(sys_sysinfo
, 1)
2103 MIPS_SYS(sys_ipc
, 6)
2104 MIPS_SYS(sys_fsync
, 1)
2105 MIPS_SYS(sys_sigreturn
, 0)
2106 MIPS_SYS(sys_clone
, 6) /* 4120 */
2107 MIPS_SYS(sys_setdomainname
, 2)
2108 MIPS_SYS(sys_newuname
, 1)
2109 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2110 MIPS_SYS(sys_adjtimex
, 1)
2111 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2112 MIPS_SYS(sys_sigprocmask
, 3)
2113 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2114 MIPS_SYS(sys_init_module
, 5)
2115 MIPS_SYS(sys_delete_module
, 1)
2116 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2117 MIPS_SYS(sys_quotactl
, 0)
2118 MIPS_SYS(sys_getpgid
, 1)
2119 MIPS_SYS(sys_fchdir
, 1)
2120 MIPS_SYS(sys_bdflush
, 2)
2121 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2122 MIPS_SYS(sys_personality
, 1)
2123 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2124 MIPS_SYS(sys_setfsuid
, 1)
2125 MIPS_SYS(sys_setfsgid
, 1)
2126 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2127 MIPS_SYS(sys_getdents
, 3)
2128 MIPS_SYS(sys_select
, 5)
2129 MIPS_SYS(sys_flock
, 2)
2130 MIPS_SYS(sys_msync
, 3)
2131 MIPS_SYS(sys_readv
, 3) /* 4145 */
2132 MIPS_SYS(sys_writev
, 3)
2133 MIPS_SYS(sys_cacheflush
, 3)
2134 MIPS_SYS(sys_cachectl
, 3)
2135 MIPS_SYS(sys_sysmips
, 4)
2136 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2137 MIPS_SYS(sys_getsid
, 1)
2138 MIPS_SYS(sys_fdatasync
, 0)
2139 MIPS_SYS(sys_sysctl
, 1)
2140 MIPS_SYS(sys_mlock
, 2)
2141 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2142 MIPS_SYS(sys_mlockall
, 1)
2143 MIPS_SYS(sys_munlockall
, 0)
2144 MIPS_SYS(sys_sched_setparam
, 2)
2145 MIPS_SYS(sys_sched_getparam
, 2)
2146 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2147 MIPS_SYS(sys_sched_getscheduler
, 1)
2148 MIPS_SYS(sys_sched_yield
, 0)
2149 MIPS_SYS(sys_sched_get_priority_max
, 1)
2150 MIPS_SYS(sys_sched_get_priority_min
, 1)
2151 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2152 MIPS_SYS(sys_nanosleep
, 2)
2153 MIPS_SYS(sys_mremap
, 5)
2154 MIPS_SYS(sys_accept
, 3)
2155 MIPS_SYS(sys_bind
, 3)
2156 MIPS_SYS(sys_connect
, 3) /* 4170 */
2157 MIPS_SYS(sys_getpeername
, 3)
2158 MIPS_SYS(sys_getsockname
, 3)
2159 MIPS_SYS(sys_getsockopt
, 5)
2160 MIPS_SYS(sys_listen
, 2)
2161 MIPS_SYS(sys_recv
, 4) /* 4175 */
2162 MIPS_SYS(sys_recvfrom
, 6)
2163 MIPS_SYS(sys_recvmsg
, 3)
2164 MIPS_SYS(sys_send
, 4)
2165 MIPS_SYS(sys_sendmsg
, 3)
2166 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2167 MIPS_SYS(sys_setsockopt
, 5)
2168 MIPS_SYS(sys_shutdown
, 2)
2169 MIPS_SYS(sys_socket
, 3)
2170 MIPS_SYS(sys_socketpair
, 4)
2171 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2172 MIPS_SYS(sys_getresuid
, 3)
2173 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2174 MIPS_SYS(sys_poll
, 3)
2175 MIPS_SYS(sys_nfsservctl
, 3)
2176 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2177 MIPS_SYS(sys_getresgid
, 3)
2178 MIPS_SYS(sys_prctl
, 5)
2179 MIPS_SYS(sys_rt_sigreturn
, 0)
2180 MIPS_SYS(sys_rt_sigaction
, 4)
2181 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2182 MIPS_SYS(sys_rt_sigpending
, 2)
2183 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2184 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2185 MIPS_SYS(sys_rt_sigsuspend
, 0)
2186 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2187 MIPS_SYS(sys_pwrite64
, 6)
2188 MIPS_SYS(sys_chown
, 3)
2189 MIPS_SYS(sys_getcwd
, 2)
2190 MIPS_SYS(sys_capget
, 2)
2191 MIPS_SYS(sys_capset
, 2) /* 4205 */
2192 MIPS_SYS(sys_sigaltstack
, 2)
2193 MIPS_SYS(sys_sendfile
, 4)
2194 MIPS_SYS(sys_ni_syscall
, 0)
2195 MIPS_SYS(sys_ni_syscall
, 0)
2196 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2197 MIPS_SYS(sys_truncate64
, 4)
2198 MIPS_SYS(sys_ftruncate64
, 4)
2199 MIPS_SYS(sys_stat64
, 2)
2200 MIPS_SYS(sys_lstat64
, 2)
2201 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2202 MIPS_SYS(sys_pivot_root
, 2)
2203 MIPS_SYS(sys_mincore
, 3)
2204 MIPS_SYS(sys_madvise
, 3)
2205 MIPS_SYS(sys_getdents64
, 3)
2206 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2207 MIPS_SYS(sys_ni_syscall
, 0)
2208 MIPS_SYS(sys_gettid
, 0)
2209 MIPS_SYS(sys_readahead
, 5)
2210 MIPS_SYS(sys_setxattr
, 5)
2211 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2212 MIPS_SYS(sys_fsetxattr
, 5)
2213 MIPS_SYS(sys_getxattr
, 4)
2214 MIPS_SYS(sys_lgetxattr
, 4)
2215 MIPS_SYS(sys_fgetxattr
, 4)
2216 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2217 MIPS_SYS(sys_llistxattr
, 3)
2218 MIPS_SYS(sys_flistxattr
, 3)
2219 MIPS_SYS(sys_removexattr
, 2)
2220 MIPS_SYS(sys_lremovexattr
, 2)
2221 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2222 MIPS_SYS(sys_tkill
, 2)
2223 MIPS_SYS(sys_sendfile64
, 5)
2224 MIPS_SYS(sys_futex
, 6)
2225 MIPS_SYS(sys_sched_setaffinity
, 3)
2226 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2227 MIPS_SYS(sys_io_setup
, 2)
2228 MIPS_SYS(sys_io_destroy
, 1)
2229 MIPS_SYS(sys_io_getevents
, 5)
2230 MIPS_SYS(sys_io_submit
, 3)
2231 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2232 MIPS_SYS(sys_exit_group
, 1)
2233 MIPS_SYS(sys_lookup_dcookie
, 3)
2234 MIPS_SYS(sys_epoll_create
, 1)
2235 MIPS_SYS(sys_epoll_ctl
, 4)
2236 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2237 MIPS_SYS(sys_remap_file_pages
, 5)
2238 MIPS_SYS(sys_set_tid_address
, 1)
2239 MIPS_SYS(sys_restart_syscall
, 0)
2240 MIPS_SYS(sys_fadvise64_64
, 7)
2241 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2242 MIPS_SYS(sys_fstatfs64
, 2)
2243 MIPS_SYS(sys_timer_create
, 3)
2244 MIPS_SYS(sys_timer_settime
, 4)
2245 MIPS_SYS(sys_timer_gettime
, 2)
2246 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2247 MIPS_SYS(sys_timer_delete
, 1)
2248 MIPS_SYS(sys_clock_settime
, 2)
2249 MIPS_SYS(sys_clock_gettime
, 2)
2250 MIPS_SYS(sys_clock_getres
, 2)
2251 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2252 MIPS_SYS(sys_tgkill
, 3)
2253 MIPS_SYS(sys_utimes
, 2)
2254 MIPS_SYS(sys_mbind
, 4)
2255 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2256 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2257 MIPS_SYS(sys_mq_open
, 4)
2258 MIPS_SYS(sys_mq_unlink
, 1)
2259 MIPS_SYS(sys_mq_timedsend
, 5)
2260 MIPS_SYS(sys_mq_timedreceive
, 5)
2261 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2262 MIPS_SYS(sys_mq_getsetattr
, 3)
2263 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2264 MIPS_SYS(sys_waitid
, 4)
2265 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2266 MIPS_SYS(sys_add_key
, 5)
2267 MIPS_SYS(sys_request_key
, 4)
2268 MIPS_SYS(sys_keyctl
, 5)
2269 MIPS_SYS(sys_set_thread_area
, 1)
2270 MIPS_SYS(sys_inotify_init
, 0)
2271 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2272 MIPS_SYS(sys_inotify_rm_watch
, 2)
2273 MIPS_SYS(sys_migrate_pages
, 4)
2274 MIPS_SYS(sys_openat
, 4)
2275 MIPS_SYS(sys_mkdirat
, 3)
2276 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2277 MIPS_SYS(sys_fchownat
, 5)
2278 MIPS_SYS(sys_futimesat
, 3)
2279 MIPS_SYS(sys_fstatat64
, 4)
2280 MIPS_SYS(sys_unlinkat
, 3)
2281 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2282 MIPS_SYS(sys_linkat
, 5)
2283 MIPS_SYS(sys_symlinkat
, 3)
2284 MIPS_SYS(sys_readlinkat
, 4)
2285 MIPS_SYS(sys_fchmodat
, 3)
2286 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2287 MIPS_SYS(sys_pselect6
, 6)
2288 MIPS_SYS(sys_ppoll
, 5)
2289 MIPS_SYS(sys_unshare
, 1)
2290 MIPS_SYS(sys_splice
, 6)
2291 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2292 MIPS_SYS(sys_tee
, 4)
2293 MIPS_SYS(sys_vmsplice
, 4)
2294 MIPS_SYS(sys_move_pages
, 6)
2295 MIPS_SYS(sys_set_robust_list
, 2)
2296 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2297 MIPS_SYS(sys_kexec_load
, 4)
2298 MIPS_SYS(sys_getcpu
, 3)
2299 MIPS_SYS(sys_epoll_pwait
, 6)
2300 MIPS_SYS(sys_ioprio_set
, 3)
2301 MIPS_SYS(sys_ioprio_get
, 2)
2302 MIPS_SYS(sys_utimensat
, 4)
2303 MIPS_SYS(sys_signalfd
, 3)
2304 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2305 MIPS_SYS(sys_eventfd
, 1)
2306 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2307 MIPS_SYS(sys_timerfd_create
, 2)
2308 MIPS_SYS(sys_timerfd_gettime
, 2)
2309 MIPS_SYS(sys_timerfd_settime
, 4)
2310 MIPS_SYS(sys_signalfd4
, 4)
2311 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2312 MIPS_SYS(sys_epoll_create1
, 1)
2313 MIPS_SYS(sys_dup3
, 3)
2314 MIPS_SYS(sys_pipe2
, 2)
2315 MIPS_SYS(sys_inotify_init1
, 1)
2316 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2317 MIPS_SYS(sys_pwritev
, 6)
2318 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2319 MIPS_SYS(sys_perf_event_open
, 5)
2320 MIPS_SYS(sys_accept4
, 4)
2321 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2322 MIPS_SYS(sys_fanotify_init
, 2)
2323 MIPS_SYS(sys_fanotify_mark
, 6)
2324 MIPS_SYS(sys_prlimit64
, 4)
2325 MIPS_SYS(sys_name_to_handle_at
, 5)
2326 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2327 MIPS_SYS(sys_clock_adjtime
, 2)
2328 MIPS_SYS(sys_syncfs
, 1)
2333 static int do_store_exclusive(CPUMIPSState
*env
)
2336 target_ulong page_addr
;
2344 page_addr
= addr
& TARGET_PAGE_MASK
;
2347 flags
= page_get_flags(page_addr
);
2348 if ((flags
& PAGE_READ
) == 0) {
2351 reg
= env
->llreg
& 0x1f;
2352 d
= (env
->llreg
& 0x20) != 0;
2354 segv
= get_user_s64(val
, addr
);
2356 segv
= get_user_s32(val
, addr
);
2359 if (val
!= env
->llval
) {
2360 env
->active_tc
.gpr
[reg
] = 0;
2363 segv
= put_user_u64(env
->llnewval
, addr
);
2365 segv
= put_user_u32(env
->llnewval
, addr
);
2368 env
->active_tc
.gpr
[reg
] = 1;
2375 env
->active_tc
.PC
+= 4;
2388 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2396 info
->si_signo
= TARGET_SIGFPE
;
2398 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2399 queue_signal(env
, info
->si_signo
, &*info
);
2403 info
->si_signo
= TARGET_SIGTRAP
;
2405 queue_signal(env
, info
->si_signo
, &*info
);
2413 void cpu_loop(CPUMIPSState
*env
)
2415 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2416 target_siginfo_t info
;
2419 # ifdef TARGET_ABI_MIPSO32
2420 unsigned int syscall_num
;
2425 trapnr
= cpu_mips_exec(env
);
2429 env
->active_tc
.PC
+= 4;
2430 # ifdef TARGET_ABI_MIPSO32
2431 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2432 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2433 ret
= -TARGET_ENOSYS
;
2437 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2439 nb_args
= mips_syscall_args
[syscall_num
];
2440 sp_reg
= env
->active_tc
.gpr
[29];
2442 /* these arguments are taken from the stack */
2444 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2448 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2452 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2456 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2462 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2463 env
->active_tc
.gpr
[4],
2464 env
->active_tc
.gpr
[5],
2465 env
->active_tc
.gpr
[6],
2466 env
->active_tc
.gpr
[7],
2467 arg5
, arg6
, arg7
, arg8
);
2471 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2472 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2473 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2474 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2475 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2477 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2478 /* Returning from a successful sigreturn syscall.
2479 Avoid clobbering register state. */
2482 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2483 env
->active_tc
.gpr
[7] = 1; /* error flag */
2486 env
->active_tc
.gpr
[7] = 0; /* error flag */
2488 env
->active_tc
.gpr
[2] = ret
;
2494 info
.si_signo
= TARGET_SIGSEGV
;
2496 /* XXX: check env->error_code */
2497 info
.si_code
= TARGET_SEGV_MAPERR
;
2498 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2499 queue_signal(env
, info
.si_signo
, &info
);
2503 info
.si_signo
= TARGET_SIGILL
;
2506 queue_signal(env
, info
.si_signo
, &info
);
2508 case EXCP_INTERRUPT
:
2509 /* just indicate that signals should be handled asap */
2515 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2518 info
.si_signo
= sig
;
2520 info
.si_code
= TARGET_TRAP_BRKPT
;
2521 queue_signal(env
, info
.si_signo
, &info
);
2526 if (do_store_exclusive(env
)) {
2527 info
.si_signo
= TARGET_SIGSEGV
;
2529 info
.si_code
= TARGET_SEGV_MAPERR
;
2530 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2531 queue_signal(env
, info
.si_signo
, &info
);
2535 info
.si_signo
= TARGET_SIGILL
;
2537 info
.si_code
= TARGET_ILL_ILLOPC
;
2538 queue_signal(env
, info
.si_signo
, &info
);
2540 /* The code below was inspired by the MIPS Linux kernel trap
2541 * handling code in arch/mips/kernel/traps.c.
2545 abi_ulong trap_instr
;
2548 if (env
->hflags
& MIPS_HFLAG_M16
) {
2549 if (env
->insn_flags
& ASE_MICROMIPS
) {
2550 /* microMIPS mode */
2551 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2556 if ((trap_instr
>> 10) == 0x11) {
2557 /* 16-bit instruction */
2558 code
= trap_instr
& 0xf;
2560 /* 32-bit instruction */
2563 ret
= get_user_u16(instr_lo
,
2564 env
->active_tc
.PC
+ 2);
2568 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2569 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2570 /* Unfortunately, microMIPS also suffers from
2571 the old assembler bug... */
2572 if (code
>= (1 << 10)) {
2578 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2582 code
= (trap_instr
>> 6) & 0x3f;
2585 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2590 /* As described in the original Linux kernel code, the
2591 * below checks on 'code' are to work around an old
2594 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2595 if (code
>= (1 << 10)) {
2600 if (do_break(env
, &info
, code
) != 0) {
2607 abi_ulong trap_instr
;
2608 unsigned int code
= 0;
2610 if (env
->hflags
& MIPS_HFLAG_M16
) {
2611 /* microMIPS mode */
2614 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2615 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2617 trap_instr
= (instr
[0] << 16) | instr
[1];
2619 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2626 /* The immediate versions don't provide a code. */
2627 if (!(trap_instr
& 0xFC000000)) {
2628 if (env
->hflags
& MIPS_HFLAG_M16
) {
2629 /* microMIPS mode */
2630 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2632 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2636 if (do_break(env
, &info
, code
) != 0) {
2643 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2645 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2648 process_pending_signals(env
);
2653 #ifdef TARGET_OPENRISC
2655 void cpu_loop(CPUOpenRISCState
*env
)
2657 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2661 trapnr
= cpu_exec(env
);
2666 qemu_log("\nReset request, exit, pc is %#x\n", env
->pc
);
2670 qemu_log("\nBus error, exit, pc is %#x\n", env
->pc
);
2675 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2676 gdbsig
= TARGET_SIGSEGV
;
2679 qemu_log("\nTick time interrupt pc is %#x\n", env
->pc
);
2682 qemu_log("\nAlignment pc is %#x\n", env
->pc
);
2686 qemu_log("\nIllegal instructionpc is %#x\n", env
->pc
);
2690 qemu_log("\nExternal interruptpc is %#x\n", env
->pc
);
2694 qemu_log("\nTLB miss\n");
2697 qemu_log("\nRange\n");
2701 env
->pc
+= 4; /* 0xc00; */
2702 env
->gpr
[11] = do_syscall(env
,
2703 env
->gpr
[11], /* return value */
2704 env
->gpr
[3], /* r3 - r7 are params */
2712 qemu_log("\nFloating point error\n");
2715 qemu_log("\nTrap\n");
2722 qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n",
2724 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2725 gdbsig
= TARGET_SIGILL
;
2729 gdb_handlesig(cs
, gdbsig
);
2730 if (gdbsig
!= TARGET_SIGTRAP
) {
2735 process_pending_signals(env
);
2739 #endif /* TARGET_OPENRISC */
2742 void cpu_loop(CPUSH4State
*env
)
2744 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2746 target_siginfo_t info
;
2749 trapnr
= cpu_sh4_exec (env
);
2754 ret
= do_syscall(env
,
2763 env
->gregs
[0] = ret
;
2765 case EXCP_INTERRUPT
:
2766 /* just indicate that signals should be handled asap */
2772 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2775 info
.si_signo
= sig
;
2777 info
.si_code
= TARGET_TRAP_BRKPT
;
2778 queue_signal(env
, info
.si_signo
, &info
);
2784 info
.si_signo
= SIGSEGV
;
2786 info
.si_code
= TARGET_SEGV_MAPERR
;
2787 info
._sifields
._sigfault
._addr
= env
->tea
;
2788 queue_signal(env
, info
.si_signo
, &info
);
2792 printf ("Unhandled trap: 0x%x\n", trapnr
);
2793 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2796 process_pending_signals (env
);
2802 void cpu_loop(CPUCRISState
*env
)
2804 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2806 target_siginfo_t info
;
2809 trapnr
= cpu_cris_exec (env
);
2813 info
.si_signo
= SIGSEGV
;
2815 /* XXX: check env->error_code */
2816 info
.si_code
= TARGET_SEGV_MAPERR
;
2817 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2818 queue_signal(env
, info
.si_signo
, &info
);
2821 case EXCP_INTERRUPT
:
2822 /* just indicate that signals should be handled asap */
2825 ret
= do_syscall(env
,
2834 env
->regs
[10] = ret
;
2840 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2843 info
.si_signo
= sig
;
2845 info
.si_code
= TARGET_TRAP_BRKPT
;
2846 queue_signal(env
, info
.si_signo
, &info
);
2851 printf ("Unhandled trap: 0x%x\n", trapnr
);
2852 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2855 process_pending_signals (env
);
2860 #ifdef TARGET_MICROBLAZE
2861 void cpu_loop(CPUMBState
*env
)
2863 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2865 target_siginfo_t info
;
2868 trapnr
= cpu_mb_exec (env
);
2872 info
.si_signo
= SIGSEGV
;
2874 /* XXX: check env->error_code */
2875 info
.si_code
= TARGET_SEGV_MAPERR
;
2876 info
._sifields
._sigfault
._addr
= 0;
2877 queue_signal(env
, info
.si_signo
, &info
);
2880 case EXCP_INTERRUPT
:
2881 /* just indicate that signals should be handled asap */
2884 /* Return address is 4 bytes after the call. */
2886 env
->sregs
[SR_PC
] = env
->regs
[14];
2887 ret
= do_syscall(env
,
2899 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2900 if (env
->iflags
& D_FLAG
) {
2901 env
->sregs
[SR_ESR
] |= 1 << 12;
2902 env
->sregs
[SR_PC
] -= 4;
2903 /* FIXME: if branch was immed, replay the imm as well. */
2906 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2908 switch (env
->sregs
[SR_ESR
] & 31) {
2909 case ESR_EC_DIVZERO
:
2910 info
.si_signo
= SIGFPE
;
2912 info
.si_code
= TARGET_FPE_FLTDIV
;
2913 info
._sifields
._sigfault
._addr
= 0;
2914 queue_signal(env
, info
.si_signo
, &info
);
2917 info
.si_signo
= SIGFPE
;
2919 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2920 info
.si_code
= TARGET_FPE_FLTINV
;
2922 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2923 info
.si_code
= TARGET_FPE_FLTDIV
;
2925 info
._sifields
._sigfault
._addr
= 0;
2926 queue_signal(env
, info
.si_signo
, &info
);
2929 printf ("Unhandled hw-exception: 0x%x\n",
2930 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2931 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2940 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2943 info
.si_signo
= sig
;
2945 info
.si_code
= TARGET_TRAP_BRKPT
;
2946 queue_signal(env
, info
.si_signo
, &info
);
2951 printf ("Unhandled trap: 0x%x\n", trapnr
);
2952 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2955 process_pending_signals (env
);
2962 void cpu_loop(CPUM68KState
*env
)
2964 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
2967 target_siginfo_t info
;
2968 TaskState
*ts
= cs
->opaque
;
2971 trapnr
= cpu_m68k_exec(env
);
2975 if (ts
->sim_syscalls
) {
2977 nr
= lduw(env
->pc
+ 2);
2979 do_m68k_simcall(env
, nr
);
2985 case EXCP_HALT_INSN
:
2986 /* Semihosing syscall. */
2988 do_m68k_semihosting(env
, env
->dregs
[0]);
2992 case EXCP_UNSUPPORTED
:
2994 info
.si_signo
= SIGILL
;
2996 info
.si_code
= TARGET_ILL_ILLOPN
;
2997 info
._sifields
._sigfault
._addr
= env
->pc
;
2998 queue_signal(env
, info
.si_signo
, &info
);
3002 ts
->sim_syscalls
= 0;
3005 env
->dregs
[0] = do_syscall(env
,
3016 case EXCP_INTERRUPT
:
3017 /* just indicate that signals should be handled asap */
3021 info
.si_signo
= SIGSEGV
;
3023 /* XXX: check env->error_code */
3024 info
.si_code
= TARGET_SEGV_MAPERR
;
3025 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3026 queue_signal(env
, info
.si_signo
, &info
);
3033 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3036 info
.si_signo
= sig
;
3038 info
.si_code
= TARGET_TRAP_BRKPT
;
3039 queue_signal(env
, info
.si_signo
, &info
);
3044 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
3046 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3049 process_pending_signals(env
);
3052 #endif /* TARGET_M68K */
3055 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3057 target_ulong addr
, val
, tmp
;
3058 target_siginfo_t info
;
3061 addr
= env
->lock_addr
;
3062 tmp
= env
->lock_st_addr
;
3063 env
->lock_addr
= -1;
3064 env
->lock_st_addr
= 0;
3070 if (quad ?
get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3074 if (val
== env
->lock_value
) {
3076 if (quad ?
put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3093 info
.si_signo
= TARGET_SIGSEGV
;
3095 info
.si_code
= TARGET_SEGV_MAPERR
;
3096 info
._sifields
._sigfault
._addr
= addr
;
3097 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3100 void cpu_loop(CPUAlphaState
*env
)
3102 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3104 target_siginfo_t info
;
3108 trapnr
= cpu_alpha_exec (env
);
3110 /* All of the traps imply a transition through PALcode, which
3111 implies an REI instruction has been executed. Which means
3112 that the intr_flag should be cleared. */
3117 fprintf(stderr
, "Reset requested. Exit\n");
3121 fprintf(stderr
, "Machine check exception. Exit\n");
3124 case EXCP_SMP_INTERRUPT
:
3125 case EXCP_CLK_INTERRUPT
:
3126 case EXCP_DEV_INTERRUPT
:
3127 fprintf(stderr
, "External interrupt. Exit\n");
3131 env
->lock_addr
= -1;
3132 info
.si_signo
= TARGET_SIGSEGV
;
3134 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3135 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3136 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3137 queue_signal(env
, info
.si_signo
, &info
);
3140 env
->lock_addr
= -1;
3141 info
.si_signo
= TARGET_SIGBUS
;
3143 info
.si_code
= TARGET_BUS_ADRALN
;
3144 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3145 queue_signal(env
, info
.si_signo
, &info
);
3149 env
->lock_addr
= -1;
3150 info
.si_signo
= TARGET_SIGILL
;
3152 info
.si_code
= TARGET_ILL_ILLOPC
;
3153 info
._sifields
._sigfault
._addr
= env
->pc
;
3154 queue_signal(env
, info
.si_signo
, &info
);
3157 env
->lock_addr
= -1;
3158 info
.si_signo
= TARGET_SIGFPE
;
3160 info
.si_code
= TARGET_FPE_FLTINV
;
3161 info
._sifields
._sigfault
._addr
= env
->pc
;
3162 queue_signal(env
, info
.si_signo
, &info
);
3165 /* No-op. Linux simply re-enables the FPU. */
3168 env
->lock_addr
= -1;
3169 switch (env
->error_code
) {
3172 info
.si_signo
= TARGET_SIGTRAP
;
3174 info
.si_code
= TARGET_TRAP_BRKPT
;
3175 info
._sifields
._sigfault
._addr
= env
->pc
;
3176 queue_signal(env
, info
.si_signo
, &info
);
3180 info
.si_signo
= TARGET_SIGTRAP
;
3183 info
._sifields
._sigfault
._addr
= env
->pc
;
3184 queue_signal(env
, info
.si_signo
, &info
);
3188 trapnr
= env
->ir
[IR_V0
];
3189 sysret
= do_syscall(env
, trapnr
,
3190 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3191 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3192 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3194 if (trapnr
== TARGET_NR_sigreturn
3195 || trapnr
== TARGET_NR_rt_sigreturn
) {
3198 /* Syscall writes 0 to V0 to bypass error check, similar
3199 to how this is handled internal to Linux kernel.
3200 (Ab)use trapnr temporarily as boolean indicating error. */
3201 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3202 env
->ir
[IR_V0
] = (trapnr ?
-sysret
: sysret
);
3203 env
->ir
[IR_A3
] = trapnr
;
3207 /* ??? We can probably elide the code using page_unprotect
3208 that is checking for self-modifying code. Instead we
3209 could simply call tb_flush here. Until we work out the
3210 changes required to turn off the extra write protection,
3211 this can be a no-op. */