2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
25 #include "qemu-common.h"
26 #include "target_signal.h"
29 static struct target_sigaltstack target_sigaltstack_used
= {
32 .ss_flags
= TARGET_SS_DISABLE
,
35 static struct target_sigaction sigact_table
[TARGET_NSIG
];
37 static void host_signal_handler(int host_signum
, siginfo_t
*info
,
40 static uint8_t host_to_target_signal_table
[_NSIG
] = {
41 [SIGHUP
] = TARGET_SIGHUP
,
42 [SIGINT
] = TARGET_SIGINT
,
43 [SIGQUIT
] = TARGET_SIGQUIT
,
44 [SIGILL
] = TARGET_SIGILL
,
45 [SIGTRAP
] = TARGET_SIGTRAP
,
46 [SIGABRT
] = TARGET_SIGABRT
,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS
] = TARGET_SIGBUS
,
49 [SIGFPE
] = TARGET_SIGFPE
,
50 [SIGKILL
] = TARGET_SIGKILL
,
51 [SIGUSR1
] = TARGET_SIGUSR1
,
52 [SIGSEGV
] = TARGET_SIGSEGV
,
53 [SIGUSR2
] = TARGET_SIGUSR2
,
54 [SIGPIPE
] = TARGET_SIGPIPE
,
55 [SIGALRM
] = TARGET_SIGALRM
,
56 [SIGTERM
] = TARGET_SIGTERM
,
58 [SIGSTKFLT
] = TARGET_SIGSTKFLT
,
60 [SIGCHLD
] = TARGET_SIGCHLD
,
61 [SIGCONT
] = TARGET_SIGCONT
,
62 [SIGSTOP
] = TARGET_SIGSTOP
,
63 [SIGTSTP
] = TARGET_SIGTSTP
,
64 [SIGTTIN
] = TARGET_SIGTTIN
,
65 [SIGTTOU
] = TARGET_SIGTTOU
,
66 [SIGURG
] = TARGET_SIGURG
,
67 [SIGXCPU
] = TARGET_SIGXCPU
,
68 [SIGXFSZ
] = TARGET_SIGXFSZ
,
69 [SIGVTALRM
] = TARGET_SIGVTALRM
,
70 [SIGPROF
] = TARGET_SIGPROF
,
71 [SIGWINCH
] = TARGET_SIGWINCH
,
72 [SIGIO
] = TARGET_SIGIO
,
73 [SIGPWR
] = TARGET_SIGPWR
,
74 [SIGSYS
] = TARGET_SIGSYS
,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN
] = __SIGRTMAX
,
81 [__SIGRTMAX
] = __SIGRTMIN
,
83 static uint8_t target_to_host_signal_table
[_NSIG
];
85 static inline int on_sig_stack(unsigned long sp
)
87 return (sp
- target_sigaltstack_used
.ss_sp
88 < target_sigaltstack_used
.ss_size
);
91 static inline int sas_ss_flags(unsigned long sp
)
93 return (target_sigaltstack_used
.ss_size
== 0 ? SS_DISABLE
94 : on_sig_stack(sp
) ? SS_ONSTACK
: 0);
97 int host_to_target_signal(int sig
)
99 if (sig
< 0 || sig
>= _NSIG
)
101 return host_to_target_signal_table
[sig
];
104 int target_to_host_signal(int sig
)
106 if (sig
< 0 || sig
>= _NSIG
)
108 return target_to_host_signal_table
[sig
];
111 static inline void target_sigemptyset(target_sigset_t
*set
)
113 memset(set
, 0, sizeof(*set
));
116 static inline void target_sigaddset(target_sigset_t
*set
, int signum
)
119 abi_ulong mask
= (abi_ulong
)1 << (signum
% TARGET_NSIG_BPW
);
120 set
->sig
[signum
/ TARGET_NSIG_BPW
] |= mask
;
123 static inline int target_sigismember(const target_sigset_t
*set
, int signum
)
126 abi_ulong mask
= (abi_ulong
)1 << (signum
% TARGET_NSIG_BPW
);
127 return ((set
->sig
[signum
/ TARGET_NSIG_BPW
] & mask
) != 0);
130 static void host_to_target_sigset_internal(target_sigset_t
*d
,
134 target_sigemptyset(d
);
135 for (i
= 1; i
<= TARGET_NSIG
; i
++) {
136 if (sigismember(s
, i
)) {
137 target_sigaddset(d
, host_to_target_signal(i
));
142 void host_to_target_sigset(target_sigset_t
*d
, const sigset_t
*s
)
147 host_to_target_sigset_internal(&d1
, s
);
148 for(i
= 0;i
< TARGET_NSIG_WORDS
; i
++)
149 d
->sig
[i
] = tswapal(d1
.sig
[i
]);
152 static void target_to_host_sigset_internal(sigset_t
*d
,
153 const target_sigset_t
*s
)
157 for (i
= 1; i
<= TARGET_NSIG
; i
++) {
158 if (target_sigismember(s
, i
)) {
159 sigaddset(d
, target_to_host_signal(i
));
164 void target_to_host_sigset(sigset_t
*d
, const target_sigset_t
*s
)
169 for(i
= 0;i
< TARGET_NSIG_WORDS
; i
++)
170 s1
.sig
[i
] = tswapal(s
->sig
[i
]);
171 target_to_host_sigset_internal(d
, &s1
);
174 void host_to_target_old_sigset(abi_ulong
*old_sigset
,
175 const sigset_t
*sigset
)
178 host_to_target_sigset(&d
, sigset
);
179 *old_sigset
= d
.sig
[0];
182 void target_to_host_old_sigset(sigset_t
*sigset
,
183 const abi_ulong
*old_sigset
)
188 d
.sig
[0] = *old_sigset
;
189 for(i
= 1;i
< TARGET_NSIG_WORDS
; i
++)
191 target_to_host_sigset(sigset
, &d
);
194 int block_signals(void)
196 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
200 /* It's OK to block everything including SIGSEGV, because we won't
201 * run any further guest code before unblocking signals in
202 * process_pending_signals().
205 sigprocmask(SIG_SETMASK
, &set
, 0);
207 pending
= atomic_xchg(&ts
->signal_pending
, 1);
212 /* Wrapper for sigprocmask function
213 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
214 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
215 * a signal was already pending and the syscall must be restarted, or
217 * If set is NULL, this is guaranteed not to fail.
219 int do_sigprocmask(int how
, const sigset_t
*set
, sigset_t
*oldset
)
221 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
224 *oldset
= ts
->signal_mask
;
230 if (block_signals()) {
231 return -TARGET_ERESTARTSYS
;
236 sigorset(&ts
->signal_mask
, &ts
->signal_mask
, set
);
239 for (i
= 1; i
<= NSIG
; ++i
) {
240 if (sigismember(set
, i
)) {
241 sigdelset(&ts
->signal_mask
, i
);
246 ts
->signal_mask
= *set
;
249 g_assert_not_reached();
252 /* Silently ignore attempts to change blocking status of KILL or STOP */
253 sigdelset(&ts
->signal_mask
, SIGKILL
);
254 sigdelset(&ts
->signal_mask
, SIGSTOP
);
259 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
260 !defined(TARGET_X86_64)
261 /* Just set the guest's signal mask to the specified value; the
262 * caller is assumed to have called block_signals() already.
264 static void set_sigmask(const sigset_t
*set
)
266 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
268 ts
->signal_mask
= *set
;
272 /* siginfo conversion */
274 static inline void host_to_target_siginfo_noswap(target_siginfo_t
*tinfo
,
275 const siginfo_t
*info
)
277 int sig
= host_to_target_signal(info
->si_signo
);
278 int si_code
= info
->si_code
;
280 tinfo
->si_signo
= sig
;
282 tinfo
->si_code
= info
->si_code
;
284 /* This is awkward, because we have to use a combination of
285 * the si_code and si_signo to figure out which of the union's
286 * members are valid. (Within the host kernel it is always possible
287 * to tell, but the kernel carefully avoids giving userspace the
288 * high 16 bits of si_code, so we don't have the information to
289 * do this the easy way...) We therefore make our best guess,
290 * bearing in mind that a guest can spoof most of the si_codes
291 * via rt_sigqueueinfo() if it likes.
293 * Once we have made our guess, we record it in the top 16 bits of
294 * the si_code, so that tswap_siginfo() later can use it.
295 * tswap_siginfo() will strip these top bits out before writing
296 * si_code to the guest (sign-extending the lower bits).
303 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
304 * These are the only unspoofable si_code values.
306 tinfo
->_sifields
._kill
._pid
= info
->si_pid
;
307 tinfo
->_sifields
._kill
._uid
= info
->si_uid
;
308 si_type
= QEMU_SI_KILL
;
311 /* Everything else is spoofable. Make best guess based on signal */
314 tinfo
->_sifields
._sigchld
._pid
= info
->si_pid
;
315 tinfo
->_sifields
._sigchld
._uid
= info
->si_uid
;
316 tinfo
->_sifields
._sigchld
._status
317 = host_to_target_waitstatus(info
->si_status
);
318 tinfo
->_sifields
._sigchld
._utime
= info
->si_utime
;
319 tinfo
->_sifields
._sigchld
._stime
= info
->si_stime
;
320 si_type
= QEMU_SI_CHLD
;
323 tinfo
->_sifields
._sigpoll
._band
= info
->si_band
;
324 tinfo
->_sifields
._sigpoll
._fd
= info
->si_fd
;
325 si_type
= QEMU_SI_POLL
;
328 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
329 tinfo
->_sifields
._rt
._pid
= info
->si_pid
;
330 tinfo
->_sifields
._rt
._uid
= info
->si_uid
;
331 /* XXX: potential problem if 64 bit */
332 tinfo
->_sifields
._rt
._sigval
.sival_ptr
333 = (abi_ulong
)(unsigned long)info
->si_value
.sival_ptr
;
334 si_type
= QEMU_SI_RT
;
340 tinfo
->si_code
= deposit32(si_code
, 16, 16, si_type
);
343 static void tswap_siginfo(target_siginfo_t
*tinfo
,
344 const target_siginfo_t
*info
)
346 int si_type
= extract32(info
->si_code
, 16, 16);
347 int si_code
= sextract32(info
->si_code
, 0, 16);
349 __put_user(info
->si_signo
, &tinfo
->si_signo
);
350 __put_user(info
->si_errno
, &tinfo
->si_errno
);
351 __put_user(si_code
, &tinfo
->si_code
);
353 /* We can use our internal marker of which fields in the structure
354 * are valid, rather than duplicating the guesswork of
355 * host_to_target_siginfo_noswap() here.
359 __put_user(info
->_sifields
._kill
._pid
, &tinfo
->_sifields
._kill
._pid
);
360 __put_user(info
->_sifields
._kill
._uid
, &tinfo
->_sifields
._kill
._uid
);
363 __put_user(info
->_sifields
._timer
._timer1
,
364 &tinfo
->_sifields
._timer
._timer1
);
365 __put_user(info
->_sifields
._timer
._timer2
,
366 &tinfo
->_sifields
._timer
._timer2
);
369 __put_user(info
->_sifields
._sigpoll
._band
,
370 &tinfo
->_sifields
._sigpoll
._band
);
371 __put_user(info
->_sifields
._sigpoll
._fd
,
372 &tinfo
->_sifields
._sigpoll
._fd
);
375 __put_user(info
->_sifields
._sigfault
._addr
,
376 &tinfo
->_sifields
._sigfault
._addr
);
379 __put_user(info
->_sifields
._sigchld
._pid
,
380 &tinfo
->_sifields
._sigchld
._pid
);
381 __put_user(info
->_sifields
._sigchld
._uid
,
382 &tinfo
->_sifields
._sigchld
._uid
);
383 __put_user(info
->_sifields
._sigchld
._status
,
384 &tinfo
->_sifields
._sigchld
._status
);
385 __put_user(info
->_sifields
._sigchld
._utime
,
386 &tinfo
->_sifields
._sigchld
._utime
);
387 __put_user(info
->_sifields
._sigchld
._stime
,
388 &tinfo
->_sifields
._sigchld
._stime
);
391 __put_user(info
->_sifields
._rt
._pid
, &tinfo
->_sifields
._rt
._pid
);
392 __put_user(info
->_sifields
._rt
._uid
, &tinfo
->_sifields
._rt
._uid
);
393 __put_user(info
->_sifields
._rt
._sigval
.sival_ptr
,
394 &tinfo
->_sifields
._rt
._sigval
.sival_ptr
);
397 g_assert_not_reached();
401 void host_to_target_siginfo(target_siginfo_t
*tinfo
, const siginfo_t
*info
)
403 host_to_target_siginfo_noswap(tinfo
, info
);
404 tswap_siginfo(tinfo
, tinfo
);
407 /* XXX: we support only POSIX RT signals are used. */
408 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
409 void target_to_host_siginfo(siginfo_t
*info
, const target_siginfo_t
*tinfo
)
411 /* This conversion is used only for the rt_sigqueueinfo syscall,
412 * and so we know that the _rt fields are the valid ones.
416 __get_user(info
->si_signo
, &tinfo
->si_signo
);
417 __get_user(info
->si_errno
, &tinfo
->si_errno
);
418 __get_user(info
->si_code
, &tinfo
->si_code
);
419 __get_user(info
->si_pid
, &tinfo
->_sifields
._rt
._pid
);
420 __get_user(info
->si_uid
, &tinfo
->_sifields
._rt
._uid
);
421 __get_user(sival_ptr
, &tinfo
->_sifields
._rt
._sigval
.sival_ptr
);
422 info
->si_value
.sival_ptr
= (void *)(long)sival_ptr
;
425 static int fatal_signal (int sig
)
430 case TARGET_SIGWINCH
:
431 /* Ignored by default. */
438 /* Job control signals. */
445 /* returns 1 if given signal should dump core if not handled */
446 static int core_dump_signal(int sig
)
462 void signal_init(void)
464 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
465 struct sigaction act
;
466 struct sigaction oact
;
470 /* generate signal conversion tables */
471 for(i
= 1; i
< _NSIG
; i
++) {
472 if (host_to_target_signal_table
[i
] == 0)
473 host_to_target_signal_table
[i
] = i
;
475 for(i
= 1; i
< _NSIG
; i
++) {
476 j
= host_to_target_signal_table
[i
];
477 target_to_host_signal_table
[j
] = i
;
480 /* Set the signal mask from the host mask. */
481 sigprocmask(0, 0, &ts
->signal_mask
);
483 /* set all host signal handlers. ALL signals are blocked during
484 the handlers to serialize them. */
485 memset(sigact_table
, 0, sizeof(sigact_table
));
487 sigfillset(&act
.sa_mask
);
488 act
.sa_flags
= SA_SIGINFO
;
489 act
.sa_sigaction
= host_signal_handler
;
490 for(i
= 1; i
<= TARGET_NSIG
; i
++) {
491 host_sig
= target_to_host_signal(i
);
492 sigaction(host_sig
, NULL
, &oact
);
493 if (oact
.sa_sigaction
== (void *)SIG_IGN
) {
494 sigact_table
[i
- 1]._sa_handler
= TARGET_SIG_IGN
;
495 } else if (oact
.sa_sigaction
== (void *)SIG_DFL
) {
496 sigact_table
[i
- 1]._sa_handler
= TARGET_SIG_DFL
;
498 /* If there's already a handler installed then something has
499 gone horribly wrong, so don't even try to handle that case. */
500 /* Install some handlers for our own use. We need at least
501 SIGSEGV and SIGBUS, to detect exceptions. We can not just
502 trap all signals because it affects syscall interrupt
503 behavior. But do trap all default-fatal signals. */
504 if (fatal_signal (i
))
505 sigaction(host_sig
, &act
, NULL
);
510 /* abort execution with signal */
511 static void QEMU_NORETURN
force_sig(int target_sig
)
513 CPUState
*cpu
= thread_cpu
;
514 CPUArchState
*env
= cpu
->env_ptr
;
515 TaskState
*ts
= (TaskState
*)cpu
->opaque
;
516 int host_sig
, core_dumped
= 0;
517 struct sigaction act
;
519 host_sig
= target_to_host_signal(target_sig
);
520 trace_user_force_sig(env
, target_sig
, host_sig
);
521 gdb_signalled(env
, target_sig
);
523 /* dump core if supported by target binary format */
524 if (core_dump_signal(target_sig
) && (ts
->bprm
->core_dump
!= NULL
)) {
527 ((*ts
->bprm
->core_dump
)(target_sig
, env
) == 0);
530 /* we already dumped the core of target process, we don't want
531 * a coredump of qemu itself */
532 struct rlimit nodump
;
533 getrlimit(RLIMIT_CORE
, &nodump
);
535 setrlimit(RLIMIT_CORE
, &nodump
);
536 (void) fprintf(stderr
, "qemu: uncaught target signal %d (%s) - %s\n",
537 target_sig
, strsignal(host_sig
), "core dumped" );
540 /* The proper exit code for dying from an uncaught signal is
541 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
542 * a negative value. To get the proper exit code we need to
543 * actually die from an uncaught signal. Here the default signal
544 * handler is installed, we send ourself a signal and we wait for
546 sigfillset(&act
.sa_mask
);
547 act
.sa_handler
= SIG_DFL
;
549 sigaction(host_sig
, &act
, NULL
);
551 /* For some reason raise(host_sig) doesn't send the signal when
552 * statically linked on x86-64. */
553 kill(getpid(), host_sig
);
555 /* Make sure the signal isn't masked (just reuse the mask inside
557 sigdelset(&act
.sa_mask
, host_sig
);
558 sigsuspend(&act
.sa_mask
);
564 /* queue a signal so that it will be send to the virtual CPU as soon
566 int queue_signal(CPUArchState
*env
, int sig
, target_siginfo_t
*info
)
568 CPUState
*cpu
= ENV_GET_CPU(env
);
569 TaskState
*ts
= cpu
->opaque
;
571 trace_user_queue_signal(env
, sig
);
573 /* Currently all callers define siginfo structures which
574 * use the _sifields._sigfault union member, so we can
575 * set the type here. If that changes we should push this
576 * out so the si_type is passed in by callers.
578 info
->si_code
= deposit32(info
->si_code
, 16, 16, QEMU_SI_FAULT
);
580 ts
->sync_signal
.info
= *info
;
581 ts
->sync_signal
.pending
= sig
;
582 /* signal that a new signal is pending */
583 atomic_set(&ts
->signal_pending
, 1);
584 return 1; /* indicates that the signal was queued */
587 #ifndef HAVE_SAFE_SYSCALL
588 static inline void rewind_if_in_safe_syscall(void *puc
)
590 /* Default version: never rewind */
594 static void host_signal_handler(int host_signum
, siginfo_t
*info
,
597 CPUArchState
*env
= thread_cpu
->env_ptr
;
598 CPUState
*cpu
= ENV_GET_CPU(env
);
599 TaskState
*ts
= cpu
->opaque
;
602 target_siginfo_t tinfo
;
603 ucontext_t
*uc
= puc
;
604 struct emulated_sigtable
*k
;
606 /* the CPU emulator uses some host signals to detect exceptions,
607 we forward to it some signals */
608 if ((host_signum
== SIGSEGV
|| host_signum
== SIGBUS
)
609 && info
->si_code
> 0) {
610 if (cpu_signal_handler(host_signum
, info
, puc
))
614 /* get target signal number */
615 sig
= host_to_target_signal(host_signum
);
616 if (sig
< 1 || sig
> TARGET_NSIG
)
618 trace_user_host_signal(env
, host_signum
, sig
);
620 rewind_if_in_safe_syscall(puc
);
622 host_to_target_siginfo_noswap(&tinfo
, info
);
623 k
= &ts
->sigtab
[sig
- 1];
626 ts
->signal_pending
= 1;
628 /* Block host signals until target signal handler entered. We
629 * can't block SIGSEGV or SIGBUS while we're executing guest
630 * code in case the guest code provokes one in the window between
631 * now and it getting out to the main loop. Signals will be
632 * unblocked again in process_pending_signals().
634 sigfillset(&uc
->uc_sigmask
);
635 sigdelset(&uc
->uc_sigmask
, SIGSEGV
);
636 sigdelset(&uc
->uc_sigmask
, SIGBUS
);
638 /* interrupt the virtual CPU as soon as possible */
639 cpu_exit(thread_cpu
);
642 /* do_sigaltstack() returns target values and errnos. */
643 /* compare linux/kernel/signal.c:do_sigaltstack() */
644 abi_long
do_sigaltstack(abi_ulong uss_addr
, abi_ulong uoss_addr
, abi_ulong sp
)
647 struct target_sigaltstack oss
;
649 /* XXX: test errors */
652 __put_user(target_sigaltstack_used
.ss_sp
, &oss
.ss_sp
);
653 __put_user(target_sigaltstack_used
.ss_size
, &oss
.ss_size
);
654 __put_user(sas_ss_flags(sp
), &oss
.ss_flags
);
659 struct target_sigaltstack
*uss
;
660 struct target_sigaltstack ss
;
661 size_t minstacksize
= TARGET_MINSIGSTKSZ
;
663 #if defined(TARGET_PPC64)
664 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
665 struct image_info
*image
= ((TaskState
*)thread_cpu
->opaque
)->info
;
666 if (get_ppc64_abi(image
) > 1) {
671 ret
= -TARGET_EFAULT
;
672 if (!lock_user_struct(VERIFY_READ
, uss
, uss_addr
, 1)) {
675 __get_user(ss
.ss_sp
, &uss
->ss_sp
);
676 __get_user(ss
.ss_size
, &uss
->ss_size
);
677 __get_user(ss
.ss_flags
, &uss
->ss_flags
);
678 unlock_user_struct(uss
, uss_addr
, 0);
681 if (on_sig_stack(sp
))
684 ret
= -TARGET_EINVAL
;
685 if (ss
.ss_flags
!= TARGET_SS_DISABLE
686 && ss
.ss_flags
!= TARGET_SS_ONSTACK
690 if (ss
.ss_flags
== TARGET_SS_DISABLE
) {
694 ret
= -TARGET_ENOMEM
;
695 if (ss
.ss_size
< minstacksize
) {
700 target_sigaltstack_used
.ss_sp
= ss
.ss_sp
;
701 target_sigaltstack_used
.ss_size
= ss
.ss_size
;
705 ret
= -TARGET_EFAULT
;
706 if (copy_to_user(uoss_addr
, &oss
, sizeof(oss
)))
715 /* do_sigaction() return target values and host errnos */
716 int do_sigaction(int sig
, const struct target_sigaction
*act
,
717 struct target_sigaction
*oact
)
719 struct target_sigaction
*k
;
720 struct sigaction act1
;
724 if (sig
< 1 || sig
> TARGET_NSIG
|| sig
== TARGET_SIGKILL
|| sig
== TARGET_SIGSTOP
) {
725 return -TARGET_EINVAL
;
728 if (block_signals()) {
729 return -TARGET_ERESTARTSYS
;
732 k
= &sigact_table
[sig
- 1];
734 __put_user(k
->_sa_handler
, &oact
->_sa_handler
);
735 __put_user(k
->sa_flags
, &oact
->sa_flags
);
736 #if !defined(TARGET_MIPS)
737 __put_user(k
->sa_restorer
, &oact
->sa_restorer
);
740 oact
->sa_mask
= k
->sa_mask
;
743 /* FIXME: This is not threadsafe. */
744 __get_user(k
->_sa_handler
, &act
->_sa_handler
);
745 __get_user(k
->sa_flags
, &act
->sa_flags
);
746 #if !defined(TARGET_MIPS)
747 __get_user(k
->sa_restorer
, &act
->sa_restorer
);
749 /* To be swapped in target_to_host_sigset. */
750 k
->sa_mask
= act
->sa_mask
;
752 /* we update the host linux signal state */
753 host_sig
= target_to_host_signal(sig
);
754 if (host_sig
!= SIGSEGV
&& host_sig
!= SIGBUS
) {
755 sigfillset(&act1
.sa_mask
);
756 act1
.sa_flags
= SA_SIGINFO
;
757 if (k
->sa_flags
& TARGET_SA_RESTART
)
758 act1
.sa_flags
|= SA_RESTART
;
759 /* NOTE: it is important to update the host kernel signal
760 ignore state to avoid getting unexpected interrupted
762 if (k
->_sa_handler
== TARGET_SIG_IGN
) {
763 act1
.sa_sigaction
= (void *)SIG_IGN
;
764 } else if (k
->_sa_handler
== TARGET_SIG_DFL
) {
765 if (fatal_signal (sig
))
766 act1
.sa_sigaction
= host_signal_handler
;
768 act1
.sa_sigaction
= (void *)SIG_DFL
;
770 act1
.sa_sigaction
= host_signal_handler
;
772 ret
= sigaction(host_sig
, &act1
, NULL
);
778 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
780 /* from the Linux kernel */
782 struct target_fpreg
{
783 uint16_t significand
[4];
787 struct target_fpxreg
{
788 uint16_t significand
[4];
793 struct target_xmmreg
{
794 abi_ulong element
[4];
797 struct target_fpstate
{
798 /* Regular FPU environment */
806 struct target_fpreg _st
[8];
808 uint16_t magic
; /* 0xffff = regular FPU data only */
810 /* FXSR FPU environment */
811 abi_ulong _fxsr_env
[6]; /* FXSR FPU env is ignored */
814 struct target_fpxreg _fxsr_st
[8]; /* FXSR FPU reg data is ignored */
815 struct target_xmmreg _xmm
[8];
816 abi_ulong padding
[56];
819 #define X86_FXSR_MAGIC 0x0000
821 struct target_sigcontext
{
839 abi_ulong esp_at_signal
;
841 abi_ulong fpstate
; /* pointer */
846 struct target_ucontext
{
849 target_stack_t tuc_stack
;
850 struct target_sigcontext tuc_mcontext
;
851 target_sigset_t tuc_sigmask
; /* mask last for extensibility */
858 struct target_sigcontext sc
;
859 struct target_fpstate fpstate
;
860 abi_ulong extramask
[TARGET_NSIG_WORDS
-1];
870 struct target_siginfo info
;
871 struct target_ucontext uc
;
872 struct target_fpstate fpstate
;
877 * Set up a signal frame.
880 /* XXX: save x87 state */
881 static void setup_sigcontext(struct target_sigcontext
*sc
,
882 struct target_fpstate
*fpstate
, CPUX86State
*env
, abi_ulong mask
,
883 abi_ulong fpstate_addr
)
885 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
888 /* already locked in setup_frame() */
889 __put_user(env
->segs
[R_GS
].selector
, (unsigned int *)&sc
->gs
);
890 __put_user(env
->segs
[R_FS
].selector
, (unsigned int *)&sc
->fs
);
891 __put_user(env
->segs
[R_ES
].selector
, (unsigned int *)&sc
->es
);
892 __put_user(env
->segs
[R_DS
].selector
, (unsigned int *)&sc
->ds
);
893 __put_user(env
->regs
[R_EDI
], &sc
->edi
);
894 __put_user(env
->regs
[R_ESI
], &sc
->esi
);
895 __put_user(env
->regs
[R_EBP
], &sc
->ebp
);
896 __put_user(env
->regs
[R_ESP
], &sc
->esp
);
897 __put_user(env
->regs
[R_EBX
], &sc
->ebx
);
898 __put_user(env
->regs
[R_EDX
], &sc
->edx
);
899 __put_user(env
->regs
[R_ECX
], &sc
->ecx
);
900 __put_user(env
->regs
[R_EAX
], &sc
->eax
);
901 __put_user(cs
->exception_index
, &sc
->trapno
);
902 __put_user(env
->error_code
, &sc
->err
);
903 __put_user(env
->eip
, &sc
->eip
);
904 __put_user(env
->segs
[R_CS
].selector
, (unsigned int *)&sc
->cs
);
905 __put_user(env
->eflags
, &sc
->eflags
);
906 __put_user(env
->regs
[R_ESP
], &sc
->esp_at_signal
);
907 __put_user(env
->segs
[R_SS
].selector
, (unsigned int *)&sc
->ss
);
909 cpu_x86_fsave(env
, fpstate_addr
, 1);
910 fpstate
->status
= fpstate
->sw
;
912 __put_user(magic
, &fpstate
->magic
);
913 __put_user(fpstate_addr
, &sc
->fpstate
);
915 /* non-iBCS2 extensions.. */
916 __put_user(mask
, &sc
->oldmask
);
917 __put_user(env
->cr
[2], &sc
->cr2
);
921 * Determine which stack to use..
924 static inline abi_ulong
925 get_sigframe(struct target_sigaction
*ka
, CPUX86State
*env
, size_t frame_size
)
929 /* Default to using normal stack */
930 esp
= env
->regs
[R_ESP
];
931 /* This is the X/Open sanctioned signal stack switching. */
932 if (ka
->sa_flags
& TARGET_SA_ONSTACK
) {
933 if (sas_ss_flags(esp
) == 0) {
934 esp
= target_sigaltstack_used
.ss_sp
+ target_sigaltstack_used
.ss_size
;
938 /* This is the legacy signal stack switching. */
939 if ((env
->segs
[R_SS
].selector
& 0xffff) != __USER_DS
&&
940 !(ka
->sa_flags
& TARGET_SA_RESTORER
) &&
942 esp
= (unsigned long) ka
->sa_restorer
;
945 return (esp
- frame_size
) & -8ul;
948 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
949 static void setup_frame(int sig
, struct target_sigaction
*ka
,
950 target_sigset_t
*set
, CPUX86State
*env
)
952 abi_ulong frame_addr
;
953 struct sigframe
*frame
;
956 frame_addr
= get_sigframe(ka
, env
, sizeof(*frame
));
957 trace_user_setup_frame(env
, frame_addr
);
959 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0))
962 __put_user(sig
, &frame
->sig
);
964 setup_sigcontext(&frame
->sc
, &frame
->fpstate
, env
, set
->sig
[0],
965 frame_addr
+ offsetof(struct sigframe
, fpstate
));
967 for(i
= 1; i
< TARGET_NSIG_WORDS
; i
++) {
968 __put_user(set
->sig
[i
], &frame
->extramask
[i
- 1]);
971 /* Set up to return from userspace. If provided, use a stub
972 already in userspace. */
973 if (ka
->sa_flags
& TARGET_SA_RESTORER
) {
974 __put_user(ka
->sa_restorer
, &frame
->pretcode
);
977 abi_ulong retcode_addr
;
978 retcode_addr
= frame_addr
+ offsetof(struct sigframe
, retcode
);
979 __put_user(retcode_addr
, &frame
->pretcode
);
980 /* This is popl %eax ; movl $,%eax ; int $0x80 */
982 __put_user(val16
, (uint16_t *)(frame
->retcode
+0));
983 __put_user(TARGET_NR_sigreturn
, (int *)(frame
->retcode
+2));
985 __put_user(val16
, (uint16_t *)(frame
->retcode
+6));
989 /* Set up registers for signal handler */
990 env
->regs
[R_ESP
] = frame_addr
;
991 env
->eip
= ka
->_sa_handler
;
993 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
994 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
995 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
996 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
997 env
->eflags
&= ~TF_MASK
;
999 unlock_user_struct(frame
, frame_addr
, 1);
1004 if (sig
== TARGET_SIGSEGV
) {
1005 ka
->_sa_handler
= TARGET_SIG_DFL
;
1007 force_sig(TARGET_SIGSEGV
/* , current */);
1010 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
1011 static void setup_rt_frame(int sig
, struct target_sigaction
*ka
,
1012 target_siginfo_t
*info
,
1013 target_sigset_t
*set
, CPUX86State
*env
)
1015 abi_ulong frame_addr
, addr
;
1016 struct rt_sigframe
*frame
;
1019 frame_addr
= get_sigframe(ka
, env
, sizeof(*frame
));
1020 trace_user_setup_rt_frame(env
, frame_addr
);
1022 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0))
1025 __put_user(sig
, &frame
->sig
);
1026 addr
= frame_addr
+ offsetof(struct rt_sigframe
, info
);
1027 __put_user(addr
, &frame
->pinfo
);
1028 addr
= frame_addr
+ offsetof(struct rt_sigframe
, uc
);
1029 __put_user(addr
, &frame
->puc
);
1030 tswap_siginfo(&frame
->info
, info
);
1032 /* Create the ucontext. */
1033 __put_user(0, &frame
->uc
.tuc_flags
);
1034 __put_user(0, &frame
->uc
.tuc_link
);
1035 __put_user(target_sigaltstack_used
.ss_sp
, &frame
->uc
.tuc_stack
.ss_sp
);
1036 __put_user(sas_ss_flags(get_sp_from_cpustate(env
)),
1037 &frame
->uc
.tuc_stack
.ss_flags
);
1038 __put_user(target_sigaltstack_used
.ss_size
,
1039 &frame
->uc
.tuc_stack
.ss_size
);
1040 setup_sigcontext(&frame
->uc
.tuc_mcontext
, &frame
->fpstate
, env
,
1041 set
->sig
[0], frame_addr
+ offsetof(struct rt_sigframe
, fpstate
));
1043 for(i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
1044 __put_user(set
->sig
[i
], &frame
->uc
.tuc_sigmask
.sig
[i
]);
1047 /* Set up to return from userspace. If provided, use a stub
1048 already in userspace. */
1049 if (ka
->sa_flags
& TARGET_SA_RESTORER
) {
1050 __put_user(ka
->sa_restorer
, &frame
->pretcode
);
1053 addr
= frame_addr
+ offsetof(struct rt_sigframe
, retcode
);
1054 __put_user(addr
, &frame
->pretcode
);
1055 /* This is movl $,%eax ; int $0x80 */
1056 __put_user(0xb8, (char *)(frame
->retcode
+0));
1057 __put_user(TARGET_NR_rt_sigreturn
, (int *)(frame
->retcode
+1));
1059 __put_user(val16
, (uint16_t *)(frame
->retcode
+5));
1062 /* Set up registers for signal handler */
1063 env
->regs
[R_ESP
] = frame_addr
;
1064 env
->eip
= ka
->_sa_handler
;
1066 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
1067 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
1068 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
1069 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
1070 env
->eflags
&= ~TF_MASK
;
1072 unlock_user_struct(frame
, frame_addr
, 1);
1077 if (sig
== TARGET_SIGSEGV
) {
1078 ka
->_sa_handler
= TARGET_SIG_DFL
;
1080 force_sig(TARGET_SIGSEGV
/* , current */);
1084 restore_sigcontext(CPUX86State
*env
, struct target_sigcontext
*sc
)
1086 unsigned int err
= 0;
1087 abi_ulong fpstate_addr
;
1088 unsigned int tmpflags
;
1090 cpu_x86_load_seg(env
, R_GS
, tswap16(sc
->gs
));
1091 cpu_x86_load_seg(env
, R_FS
, tswap16(sc
->fs
));
1092 cpu_x86_load_seg(env
, R_ES
, tswap16(sc
->es
));
1093 cpu_x86_load_seg(env
, R_DS
, tswap16(sc
->ds
));
1095 env
->regs
[R_EDI
] = tswapl(sc
->edi
);
1096 env
->regs
[R_ESI
] = tswapl(sc
->esi
);
1097 env
->regs
[R_EBP
] = tswapl(sc
->ebp
);
1098 env
->regs
[R_ESP
] = tswapl(sc
->esp
);
1099 env
->regs
[R_EBX
] = tswapl(sc
->ebx
);
1100 env
->regs
[R_EDX
] = tswapl(sc
->edx
);
1101 env
->regs
[R_ECX
] = tswapl(sc
->ecx
);
1102 env
->regs
[R_EAX
] = tswapl(sc
->eax
);
1103 env
->eip
= tswapl(sc
->eip
);
1105 cpu_x86_load_seg(env
, R_CS
, lduw_p(&sc
->cs
) | 3);
1106 cpu_x86_load_seg(env
, R_SS
, lduw_p(&sc
->ss
) | 3);
1108 tmpflags
= tswapl(sc
->eflags
);
1109 env
->eflags
= (env
->eflags
& ~0x40DD5) | (tmpflags
& 0x40DD5);
1110 // regs->orig_eax = -1; /* disable syscall checks */
1112 fpstate_addr
= tswapl(sc
->fpstate
);
1113 if (fpstate_addr
!= 0) {
1114 if (!access_ok(VERIFY_READ
, fpstate_addr
,
1115 sizeof(struct target_fpstate
)))
1117 cpu_x86_frstor(env
, fpstate_addr
, 1);
1125 long do_sigreturn(CPUX86State
*env
)
1127 struct sigframe
*frame
;
1128 abi_ulong frame_addr
= env
->regs
[R_ESP
] - 8;
1129 target_sigset_t target_set
;
1133 trace_user_do_sigreturn(env
, frame_addr
);
1134 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1))
1136 /* set blocked signals */
1137 __get_user(target_set
.sig
[0], &frame
->sc
.oldmask
);
1138 for(i
= 1; i
< TARGET_NSIG_WORDS
; i
++) {
1139 __get_user(target_set
.sig
[i
], &frame
->extramask
[i
- 1]);
1142 target_to_host_sigset_internal(&set
, &target_set
);
1145 /* restore registers */
1146 if (restore_sigcontext(env
, &frame
->sc
))
1148 unlock_user_struct(frame
, frame_addr
, 0);
1149 return -TARGET_QEMU_ESIGRETURN
;
1152 unlock_user_struct(frame
, frame_addr
, 0);
1153 force_sig(TARGET_SIGSEGV
);
1157 long do_rt_sigreturn(CPUX86State
*env
)
1159 abi_ulong frame_addr
;
1160 struct rt_sigframe
*frame
;
1163 frame_addr
= env
->regs
[R_ESP
] - 4;
1164 trace_user_do_rt_sigreturn(env
, frame_addr
);
1165 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1))
1167 target_to_host_sigset(&set
, &frame
->uc
.tuc_sigmask
);
1170 if (restore_sigcontext(env
, &frame
->uc
.tuc_mcontext
)) {
1174 if (do_sigaltstack(frame_addr
+ offsetof(struct rt_sigframe
, uc
.tuc_stack
), 0,
1175 get_sp_from_cpustate(env
)) == -EFAULT
) {
1179 unlock_user_struct(frame
, frame_addr
, 0);
1180 return -TARGET_QEMU_ESIGRETURN
;
1183 unlock_user_struct(frame
, frame_addr
, 0);
1184 force_sig(TARGET_SIGSEGV
);
1188 #elif defined(TARGET_AARCH64)
1190 struct target_sigcontext
{
1191 uint64_t fault_address
;
1192 /* AArch64 registers */
1197 /* 4K reserved for FP/SIMD state and future expansion */
1198 char __reserved
[4096] __attribute__((__aligned__(16)));
1201 struct target_ucontext
{
1202 abi_ulong tuc_flags
;
1204 target_stack_t tuc_stack
;
1205 target_sigset_t tuc_sigmask
;
1206 /* glibc uses a 1024-bit sigset_t */
1207 char __unused
[1024 / 8 - sizeof(target_sigset_t
)];
1208 /* last for future expansion */
1209 struct target_sigcontext tuc_mcontext
;
1213 * Header to be used at the beginning of structures extending the user
1214 * context. Such structures must be placed after the rt_sigframe on the stack
1215 * and be 16-byte aligned. The last structure must be a dummy one with the
1216 * magic and size set to 0.
1218 struct target_aarch64_ctx
{
1223 #define TARGET_FPSIMD_MAGIC 0x46508001
1225 struct target_fpsimd_context
{
1226 struct target_aarch64_ctx head
;
1229 uint64_t vregs
[32 * 2]; /* really uint128_t vregs[32] */
1233 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1234 * user space as it will change with the addition of new context. User space
1235 * should check the magic/size information.
1237 struct target_aux_context
{
1238 struct target_fpsimd_context fpsimd
;
1239 /* additional context to be added before "end" */
1240 struct target_aarch64_ctx end
;
1243 struct target_rt_sigframe
{
1244 struct target_siginfo info
;
1245 struct target_ucontext uc
;
1251 static int target_setup_sigframe(struct target_rt_sigframe
*sf
,
1252 CPUARMState
*env
, target_sigset_t
*set
)
1255 struct target_aux_context
*aux
=
1256 (struct target_aux_context
*)sf
->uc
.tuc_mcontext
.__reserved
;
1258 /* set up the stack frame for unwinding */
1259 __put_user(env
->xregs
[29], &sf
->fp
);
1260 __put_user(env
->xregs
[30], &sf
->lr
);
1262 for (i
= 0; i
< 31; i
++) {
1263 __put_user(env
->xregs
[i
], &sf
->uc
.tuc_mcontext
.regs
[i
]);
1265 __put_user(env
->xregs
[31], &sf
->uc
.tuc_mcontext
.sp
);
1266 __put_user(env
->pc
, &sf
->uc
.tuc_mcontext
.pc
);
1267 __put_user(pstate_read(env
), &sf
->uc
.tuc_mcontext
.pstate
);
1269 __put_user(env
->exception
.vaddress
, &sf
->uc
.tuc_mcontext
.fault_address
);
1271 for (i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
1272 __put_user(set
->sig
[i
], &sf
->uc
.tuc_sigmask
.sig
[i
]);
1275 for (i
= 0; i
< 32; i
++) {
1276 #ifdef TARGET_WORDS_BIGENDIAN
1277 __put_user(env
->vfp
.regs
[i
* 2], &aux
->fpsimd
.vregs
[i
* 2 + 1]);
1278 __put_user(env
->vfp
.regs
[i
* 2 + 1], &aux
->fpsimd
.vregs
[i
* 2]);
1280 __put_user(env
->vfp
.regs
[i
* 2], &aux
->fpsimd
.vregs
[i
* 2]);
1281 __put_user(env
->vfp
.regs
[i
* 2 + 1], &aux
->fpsimd
.vregs
[i
* 2 + 1]);
1284 __put_user(vfp_get_fpsr(env
), &aux
->fpsimd
.fpsr
);
1285 __put_user(vfp_get_fpcr(env
), &aux
->fpsimd
.fpcr
);
1286 __put_user(TARGET_FPSIMD_MAGIC
, &aux
->fpsimd
.head
.magic
);
1287 __put_user(sizeof(struct target_fpsimd_context
),
1288 &aux
->fpsimd
.head
.size
);
1290 /* set the "end" magic */
1291 __put_user(0, &aux
->end
.magic
);
1292 __put_user(0, &aux
->end
.size
);
1297 static int target_restore_sigframe(CPUARMState
*env
,
1298 struct target_rt_sigframe
*sf
)
1302 struct target_aux_context
*aux
=
1303 (struct target_aux_context
*)sf
->uc
.tuc_mcontext
.__reserved
;
1304 uint32_t magic
, size
, fpsr
, fpcr
;
1307 target_to_host_sigset(&set
, &sf
->uc
.tuc_sigmask
);
1310 for (i
= 0; i
< 31; i
++) {
1311 __get_user(env
->xregs
[i
], &sf
->uc
.tuc_mcontext
.regs
[i
]);
1314 __get_user(env
->xregs
[31], &sf
->uc
.tuc_mcontext
.sp
);
1315 __get_user(env
->pc
, &sf
->uc
.tuc_mcontext
.pc
);
1316 __get_user(pstate
, &sf
->uc
.tuc_mcontext
.pstate
);
1317 pstate_write(env
, pstate
);
1319 __get_user(magic
, &aux
->fpsimd
.head
.magic
);
1320 __get_user(size
, &aux
->fpsimd
.head
.size
);
1322 if (magic
!= TARGET_FPSIMD_MAGIC
1323 || size
!= sizeof(struct target_fpsimd_context
)) {
1327 for (i
= 0; i
< 32; i
++) {
1328 #ifdef TARGET_WORDS_BIGENDIAN
1329 __get_user(env
->vfp
.regs
[i
* 2], &aux
->fpsimd
.vregs
[i
* 2 + 1]);
1330 __get_user(env
->vfp
.regs
[i
* 2 + 1], &aux
->fpsimd
.vregs
[i
* 2]);
1332 __get_user(env
->vfp
.regs
[i
* 2], &aux
->fpsimd
.vregs
[i
* 2]);
1333 __get_user(env
->vfp
.regs
[i
* 2 + 1], &aux
->fpsimd
.vregs
[i
* 2 + 1]);
1336 __get_user(fpsr
, &aux
->fpsimd
.fpsr
);
1337 vfp_set_fpsr(env
, fpsr
);
1338 __get_user(fpcr
, &aux
->fpsimd
.fpcr
);
1339 vfp_set_fpcr(env
, fpcr
);
1344 static abi_ulong
get_sigframe(struct target_sigaction
*ka
, CPUARMState
*env
)
1348 sp
= env
->xregs
[31];
1351 * This is the X/Open sanctioned signal stack switching.
1353 if ((ka
->sa_flags
& TARGET_SA_ONSTACK
) && !sas_ss_flags(sp
)) {
1354 sp
= target_sigaltstack_used
.ss_sp
+ target_sigaltstack_used
.ss_size
;
1357 sp
= (sp
- sizeof(struct target_rt_sigframe
)) & ~15;
1362 static void target_setup_frame(int usig
, struct target_sigaction
*ka
,
1363 target_siginfo_t
*info
, target_sigset_t
*set
,
1366 struct target_rt_sigframe
*frame
;
1367 abi_ulong frame_addr
, return_addr
;
1369 frame_addr
= get_sigframe(ka
, env
);
1370 trace_user_setup_frame(env
, frame_addr
);
1371 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0)) {
1375 __put_user(0, &frame
->uc
.tuc_flags
);
1376 __put_user(0, &frame
->uc
.tuc_link
);
1378 __put_user(target_sigaltstack_used
.ss_sp
,
1379 &frame
->uc
.tuc_stack
.ss_sp
);
1380 __put_user(sas_ss_flags(env
->xregs
[31]),
1381 &frame
->uc
.tuc_stack
.ss_flags
);
1382 __put_user(target_sigaltstack_used
.ss_size
,
1383 &frame
->uc
.tuc_stack
.ss_size
);
1384 target_setup_sigframe(frame
, env
, set
);
1385 if (ka
->sa_flags
& TARGET_SA_RESTORER
) {
1386 return_addr
= ka
->sa_restorer
;
1388 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1389 __put_user(0xd2801168, &frame
->tramp
[0]);
1390 __put_user(0xd4000001, &frame
->tramp
[1]);
1391 return_addr
= frame_addr
+ offsetof(struct target_rt_sigframe
, tramp
);
1393 env
->xregs
[0] = usig
;
1394 env
->xregs
[31] = frame_addr
;
1395 env
->xregs
[29] = env
->xregs
[31] + offsetof(struct target_rt_sigframe
, fp
);
1396 env
->pc
= ka
->_sa_handler
;
1397 env
->xregs
[30] = return_addr
;
1399 tswap_siginfo(&frame
->info
, info
);
1400 env
->xregs
[1] = frame_addr
+ offsetof(struct target_rt_sigframe
, info
);
1401 env
->xregs
[2] = frame_addr
+ offsetof(struct target_rt_sigframe
, uc
);
1404 unlock_user_struct(frame
, frame_addr
, 1);
1408 unlock_user_struct(frame
, frame_addr
, 1);
1409 force_sig(TARGET_SIGSEGV
);
1412 static void setup_rt_frame(int sig
, struct target_sigaction
*ka
,
1413 target_siginfo_t
*info
, target_sigset_t
*set
,
1416 target_setup_frame(sig
, ka
, info
, set
, env
);
1419 static void setup_frame(int sig
, struct target_sigaction
*ka
,
1420 target_sigset_t
*set
, CPUARMState
*env
)
1422 target_setup_frame(sig
, ka
, 0, set
, env
);
1425 long do_rt_sigreturn(CPUARMState
*env
)
1427 struct target_rt_sigframe
*frame
= NULL
;
1428 abi_ulong frame_addr
= env
->xregs
[31];
1430 trace_user_do_rt_sigreturn(env
, frame_addr
);
1431 if (frame_addr
& 15) {
1435 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1)) {
1439 if (target_restore_sigframe(env
, frame
)) {
1443 if (do_sigaltstack(frame_addr
+
1444 offsetof(struct target_rt_sigframe
, uc
.tuc_stack
),
1445 0, get_sp_from_cpustate(env
)) == -EFAULT
) {
1449 unlock_user_struct(frame
, frame_addr
, 0);
1450 return -TARGET_QEMU_ESIGRETURN
;
1453 unlock_user_struct(frame
, frame_addr
, 0);
1454 force_sig(TARGET_SIGSEGV
);
1458 long do_sigreturn(CPUARMState
*env
)
1460 return do_rt_sigreturn(env
);
1463 #elif defined(TARGET_ARM)
1465 struct target_sigcontext
{
1467 abi_ulong error_code
;
1486 abi_ulong fault_address
;
1489 struct target_ucontext_v1
{
1490 abi_ulong tuc_flags
;
1492 target_stack_t tuc_stack
;
1493 struct target_sigcontext tuc_mcontext
;
1494 target_sigset_t tuc_sigmask
; /* mask last for extensibility */
1497 struct target_ucontext_v2
{
1498 abi_ulong tuc_flags
;
1500 target_stack_t tuc_stack
;
1501 struct target_sigcontext tuc_mcontext
;
1502 target_sigset_t tuc_sigmask
; /* mask last for extensibility */
1503 char __unused
[128 - sizeof(target_sigset_t
)];
1504 abi_ulong tuc_regspace
[128] __attribute__((__aligned__(8)));
1507 struct target_user_vfp
{
1508 uint64_t fpregs
[32];
1512 struct target_user_vfp_exc
{
1518 struct target_vfp_sigframe
{
1521 struct target_user_vfp ufp
;
1522 struct target_user_vfp_exc ufp_exc
;
1523 } __attribute__((__aligned__(8)));
1525 struct target_iwmmxt_sigframe
{
1529 /* Note that not all the coprocessor control registers are stored here */
1536 } __attribute__((__aligned__(8)));
1538 #define TARGET_VFP_MAGIC 0x56465001
1539 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1543 struct target_sigcontext sc
;
1544 abi_ulong extramask
[TARGET_NSIG_WORDS
-1];
1550 struct target_ucontext_v2 uc
;
1554 struct rt_sigframe_v1
1558 struct target_siginfo info
;
1559 struct target_ucontext_v1 uc
;
1563 struct rt_sigframe_v2
1565 struct target_siginfo info
;
1566 struct target_ucontext_v2 uc
;
1570 #define TARGET_CONFIG_CPU_32 1
1573 * For ARM syscalls, we encode the syscall number into the instruction.
1575 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1576 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1579 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1580 * need two 16-bit instructions.
1582 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1583 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1585 static const abi_ulong retcodes
[4] = {
1586 SWI_SYS_SIGRETURN
, SWI_THUMB_SIGRETURN
,
1587 SWI_SYS_RT_SIGRETURN
, SWI_THUMB_RT_SIGRETURN
1591 static inline int valid_user_regs(CPUARMState
*regs
)
1597 setup_sigcontext(struct target_sigcontext
*sc
, /*struct _fpstate *fpstate,*/
1598 CPUARMState
*env
, abi_ulong mask
)
1600 __put_user(env
->regs
[0], &sc
->arm_r0
);
1601 __put_user(env
->regs
[1], &sc
->arm_r1
);
1602 __put_user(env
->regs
[2], &sc
->arm_r2
);
1603 __put_user(env
->regs
[3], &sc
->arm_r3
);
1604 __put_user(env
->regs
[4], &sc
->arm_r4
);
1605 __put_user(env
->regs
[5], &sc
->arm_r5
);
1606 __put_user(env
->regs
[6], &sc
->arm_r6
);
1607 __put_user(env
->regs
[7], &sc
->arm_r7
);
1608 __put_user(env
->regs
[8], &sc
->arm_r8
);
1609 __put_user(env
->regs
[9], &sc
->arm_r9
);
1610 __put_user(env
->regs
[10], &sc
->arm_r10
);
1611 __put_user(env
->regs
[11], &sc
->arm_fp
);
1612 __put_user(env
->regs
[12], &sc
->arm_ip
);
1613 __put_user(env
->regs
[13], &sc
->arm_sp
);
1614 __put_user(env
->regs
[14], &sc
->arm_lr
);
1615 __put_user(env
->regs
[15], &sc
->arm_pc
);
1616 #ifdef TARGET_CONFIG_CPU_32
1617 __put_user(cpsr_read(env
), &sc
->arm_cpsr
);
1620 __put_user(/* current->thread.trap_no */ 0, &sc
->trap_no
);
1621 __put_user(/* current->thread.error_code */ 0, &sc
->error_code
);
1622 __put_user(/* current->thread.address */ 0, &sc
->fault_address
);
1623 __put_user(mask
, &sc
->oldmask
);
1626 static inline abi_ulong
1627 get_sigframe(struct target_sigaction
*ka
, CPUARMState
*regs
, int framesize
)
1629 unsigned long sp
= regs
->regs
[13];
1632 * This is the X/Open sanctioned signal stack switching.
1634 if ((ka
->sa_flags
& TARGET_SA_ONSTACK
) && !sas_ss_flags(sp
)) {
1635 sp
= target_sigaltstack_used
.ss_sp
+ target_sigaltstack_used
.ss_size
;
1638 * ATPCS B01 mandates 8-byte alignment
1640 return (sp
- framesize
) & ~7;
1644 setup_return(CPUARMState
*env
, struct target_sigaction
*ka
,
1645 abi_ulong
*rc
, abi_ulong frame_addr
, int usig
, abi_ulong rc_addr
)
1647 abi_ulong handler
= ka
->_sa_handler
;
1649 int thumb
= handler
& 1;
1650 uint32_t cpsr
= cpsr_read(env
);
1659 if (ka
->sa_flags
& TARGET_SA_RESTORER
) {
1660 retcode
= ka
->sa_restorer
;
1662 unsigned int idx
= thumb
;
1664 if (ka
->sa_flags
& TARGET_SA_SIGINFO
) {
1668 __put_user(retcodes
[idx
], rc
);
1670 retcode
= rc_addr
+ thumb
;
1673 env
->regs
[0] = usig
;
1674 env
->regs
[13] = frame_addr
;
1675 env
->regs
[14] = retcode
;
1676 env
->regs
[15] = handler
& (thumb ?
~1 : ~3);
1677 cpsr_write(env
, cpsr
, CPSR_IT
| CPSR_T
, CPSRWriteByInstr
);
1680 static abi_ulong
*setup_sigframe_v2_vfp(abi_ulong
*regspace
, CPUARMState
*env
)
1683 struct target_vfp_sigframe
*vfpframe
;
1684 vfpframe
= (struct target_vfp_sigframe
*)regspace
;
1685 __put_user(TARGET_VFP_MAGIC
, &vfpframe
->magic
);
1686 __put_user(sizeof(*vfpframe
), &vfpframe
->size
);
1687 for (i
= 0; i
< 32; i
++) {
1688 __put_user(float64_val(env
->vfp
.regs
[i
]), &vfpframe
->ufp
.fpregs
[i
]);
1690 __put_user(vfp_get_fpscr(env
), &vfpframe
->ufp
.fpscr
);
1691 __put_user(env
->vfp
.xregs
[ARM_VFP_FPEXC
], &vfpframe
->ufp_exc
.fpexc
);
1692 __put_user(env
->vfp
.xregs
[ARM_VFP_FPINST
], &vfpframe
->ufp_exc
.fpinst
);
1693 __put_user(env
->vfp
.xregs
[ARM_VFP_FPINST2
], &vfpframe
->ufp_exc
.fpinst2
);
1694 return (abi_ulong
*)(vfpframe
+1);
1697 static abi_ulong
*setup_sigframe_v2_iwmmxt(abi_ulong
*regspace
,
1701 struct target_iwmmxt_sigframe
*iwmmxtframe
;
1702 iwmmxtframe
= (struct target_iwmmxt_sigframe
*)regspace
;
1703 __put_user(TARGET_IWMMXT_MAGIC
, &iwmmxtframe
->magic
);
1704 __put_user(sizeof(*iwmmxtframe
), &iwmmxtframe
->size
);
1705 for (i
= 0; i
< 16; i
++) {
1706 __put_user(env
->iwmmxt
.regs
[i
], &iwmmxtframe
->regs
[i
]);
1708 __put_user(env
->vfp
.xregs
[ARM_IWMMXT_wCSSF
], &iwmmxtframe
->wcssf
);
1709 __put_user(env
->vfp
.xregs
[ARM_IWMMXT_wCASF
], &iwmmxtframe
->wcssf
);
1710 __put_user(env
->vfp
.xregs
[ARM_IWMMXT_wCGR0
], &iwmmxtframe
->wcgr0
);
1711 __put_user(env
->vfp
.xregs
[ARM_IWMMXT_wCGR1
], &iwmmxtframe
->wcgr1
);
1712 __put_user(env
->vfp
.xregs
[ARM_IWMMXT_wCGR2
], &iwmmxtframe
->wcgr2
);
1713 __put_user(env
->vfp
.xregs
[ARM_IWMMXT_wCGR3
], &iwmmxtframe
->wcgr3
);
1714 return (abi_ulong
*)(iwmmxtframe
+1);
1717 static void setup_sigframe_v2(struct target_ucontext_v2
*uc
,
1718 target_sigset_t
*set
, CPUARMState
*env
)
1720 struct target_sigaltstack stack
;
1722 abi_ulong
*regspace
;
1724 /* Clear all the bits of the ucontext we don't use. */
1725 memset(uc
, 0, offsetof(struct target_ucontext_v2
, tuc_mcontext
));
1727 memset(&stack
, 0, sizeof(stack
));
1728 __put_user(target_sigaltstack_used
.ss_sp
, &stack
.ss_sp
);
1729 __put_user(target_sigaltstack_used
.ss_size
, &stack
.ss_size
);
1730 __put_user(sas_ss_flags(get_sp_from_cpustate(env
)), &stack
.ss_flags
);
1731 memcpy(&uc
->tuc_stack
, &stack
, sizeof(stack
));
1733 setup_sigcontext(&uc
->tuc_mcontext
, env
, set
->sig
[0]);
1734 /* Save coprocessor signal frame. */
1735 regspace
= uc
->tuc_regspace
;
1736 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
1737 regspace
= setup_sigframe_v2_vfp(regspace
, env
);
1739 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
1740 regspace
= setup_sigframe_v2_iwmmxt(regspace
, env
);
1743 /* Write terminating magic word */
1744 __put_user(0, regspace
);
1746 for(i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
1747 __put_user(set
->sig
[i
], &uc
->tuc_sigmask
.sig
[i
]);
1751 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1752 static void setup_frame_v1(int usig
, struct target_sigaction
*ka
,
1753 target_sigset_t
*set
, CPUARMState
*regs
)
1755 struct sigframe_v1
*frame
;
1756 abi_ulong frame_addr
= get_sigframe(ka
, regs
, sizeof(*frame
));
1759 trace_user_setup_frame(regs
, frame_addr
);
1760 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0)) {
1764 setup_sigcontext(&frame
->sc
, regs
, set
->sig
[0]);
1766 for(i
= 1; i
< TARGET_NSIG_WORDS
; i
++) {
1767 __put_user(set
->sig
[i
], &frame
->extramask
[i
- 1]);
1770 setup_return(regs
, ka
, &frame
->retcode
, frame_addr
, usig
,
1771 frame_addr
+ offsetof(struct sigframe_v1
, retcode
));
1773 unlock_user_struct(frame
, frame_addr
, 1);
1776 static void setup_frame_v2(int usig
, struct target_sigaction
*ka
,
1777 target_sigset_t
*set
, CPUARMState
*regs
)
1779 struct sigframe_v2
*frame
;
1780 abi_ulong frame_addr
= get_sigframe(ka
, regs
, sizeof(*frame
));
1782 trace_user_setup_frame(regs
, frame_addr
);
1783 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0)) {
1787 setup_sigframe_v2(&frame
->uc
, set
, regs
);
1789 setup_return(regs
, ka
, &frame
->retcode
, frame_addr
, usig
,
1790 frame_addr
+ offsetof(struct sigframe_v2
, retcode
));
1792 unlock_user_struct(frame
, frame_addr
, 1);
1795 static void setup_frame(int usig
, struct target_sigaction
*ka
,
1796 target_sigset_t
*set
, CPUARMState
*regs
)
1798 if (get_osversion() >= 0x020612) {
1799 setup_frame_v2(usig
, ka
, set
, regs
);
1801 setup_frame_v1(usig
, ka
, set
, regs
);
1805 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1806 static void setup_rt_frame_v1(int usig
, struct target_sigaction
*ka
,
1807 target_siginfo_t
*info
,
1808 target_sigset_t
*set
, CPUARMState
*env
)
1810 struct rt_sigframe_v1
*frame
;
1811 abi_ulong frame_addr
= get_sigframe(ka
, env
, sizeof(*frame
));
1812 struct target_sigaltstack stack
;
1814 abi_ulong info_addr
, uc_addr
;
1816 trace_user_setup_rt_frame(env
, frame_addr
);
1817 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0)) {
1821 info_addr
= frame_addr
+ offsetof(struct rt_sigframe_v1
, info
);
1822 __put_user(info_addr
, &frame
->pinfo
);
1823 uc_addr
= frame_addr
+ offsetof(struct rt_sigframe_v1
, uc
);
1824 __put_user(uc_addr
, &frame
->puc
);
1825 tswap_siginfo(&frame
->info
, info
);
1827 /* Clear all the bits of the ucontext we don't use. */
1828 memset(&frame
->uc
, 0, offsetof(struct target_ucontext_v1
, tuc_mcontext
));
1830 memset(&stack
, 0, sizeof(stack
));
1831 __put_user(target_sigaltstack_used
.ss_sp
, &stack
.ss_sp
);
1832 __put_user(target_sigaltstack_used
.ss_size
, &stack
.ss_size
);
1833 __put_user(sas_ss_flags(get_sp_from_cpustate(env
)), &stack
.ss_flags
);
1834 memcpy(&frame
->uc
.tuc_stack
, &stack
, sizeof(stack
));
1836 setup_sigcontext(&frame
->uc
.tuc_mcontext
, env
, set
->sig
[0]);
1837 for(i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
1838 __put_user(set
->sig
[i
], &frame
->uc
.tuc_sigmask
.sig
[i
]);
1841 setup_return(env
, ka
, &frame
->retcode
, frame_addr
, usig
,
1842 frame_addr
+ offsetof(struct rt_sigframe_v1
, retcode
));
1844 env
->regs
[1] = info_addr
;
1845 env
->regs
[2] = uc_addr
;
1847 unlock_user_struct(frame
, frame_addr
, 1);
1850 static void setup_rt_frame_v2(int usig
, struct target_sigaction
*ka
,
1851 target_siginfo_t
*info
,
1852 target_sigset_t
*set
, CPUARMState
*env
)
1854 struct rt_sigframe_v2
*frame
;
1855 abi_ulong frame_addr
= get_sigframe(ka
, env
, sizeof(*frame
));
1856 abi_ulong info_addr
, uc_addr
;
1858 trace_user_setup_rt_frame(env
, frame_addr
);
1859 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0)) {
1863 info_addr
= frame_addr
+ offsetof(struct rt_sigframe_v2
, info
);
1864 uc_addr
= frame_addr
+ offsetof(struct rt_sigframe_v2
, uc
);
1865 tswap_siginfo(&frame
->info
, info
);
1867 setup_sigframe_v2(&frame
->uc
, set
, env
);
1869 setup_return(env
, ka
, &frame
->retcode
, frame_addr
, usig
,
1870 frame_addr
+ offsetof(struct rt_sigframe_v2
, retcode
));
1872 env
->regs
[1] = info_addr
;
1873 env
->regs
[2] = uc_addr
;
1875 unlock_user_struct(frame
, frame_addr
, 1);
1878 static void setup_rt_frame(int usig
, struct target_sigaction
*ka
,
1879 target_siginfo_t
*info
,
1880 target_sigset_t
*set
, CPUARMState
*env
)
1882 if (get_osversion() >= 0x020612) {
1883 setup_rt_frame_v2(usig
, ka
, info
, set
, env
);
1885 setup_rt_frame_v1(usig
, ka
, info
, set
, env
);
1890 restore_sigcontext(CPUARMState
*env
, struct target_sigcontext
*sc
)
1895 __get_user(env
->regs
[0], &sc
->arm_r0
);
1896 __get_user(env
->regs
[1], &sc
->arm_r1
);
1897 __get_user(env
->regs
[2], &sc
->arm_r2
);
1898 __get_user(env
->regs
[3], &sc
->arm_r3
);
1899 __get_user(env
->regs
[4], &sc
->arm_r4
);
1900 __get_user(env
->regs
[5], &sc
->arm_r5
);
1901 __get_user(env
->regs
[6], &sc
->arm_r6
);
1902 __get_user(env
->regs
[7], &sc
->arm_r7
);
1903 __get_user(env
->regs
[8], &sc
->arm_r8
);
1904 __get_user(env
->regs
[9], &sc
->arm_r9
);
1905 __get_user(env
->regs
[10], &sc
->arm_r10
);
1906 __get_user(env
->regs
[11], &sc
->arm_fp
);
1907 __get_user(env
->regs
[12], &sc
->arm_ip
);
1908 __get_user(env
->regs
[13], &sc
->arm_sp
);
1909 __get_user(env
->regs
[14], &sc
->arm_lr
);
1910 __get_user(env
->regs
[15], &sc
->arm_pc
);
1911 #ifdef TARGET_CONFIG_CPU_32
1912 __get_user(cpsr
, &sc
->arm_cpsr
);
1913 cpsr_write(env
, cpsr
, CPSR_USER
| CPSR_EXEC
, CPSRWriteByInstr
);
1916 err
|= !valid_user_regs(env
);
1921 static long do_sigreturn_v1(CPUARMState
*env
)
1923 abi_ulong frame_addr
;
1924 struct sigframe_v1
*frame
= NULL
;
1925 target_sigset_t set
;
1930 * Since we stacked the signal on a 64-bit boundary,
1931 * then 'sp' should be word aligned here. If it's
1932 * not, then the user is trying to mess with us.
1934 frame_addr
= env
->regs
[13];
1935 trace_user_do_sigreturn(env
, frame_addr
);
1936 if (frame_addr
& 7) {
1940 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1)) {
1944 __get_user(set
.sig
[0], &frame
->sc
.oldmask
);
1945 for(i
= 1; i
< TARGET_NSIG_WORDS
; i
++) {
1946 __get_user(set
.sig
[i
], &frame
->extramask
[i
- 1]);
1949 target_to_host_sigset_internal(&host_set
, &set
);
1950 set_sigmask(&host_set
);
1952 if (restore_sigcontext(env
, &frame
->sc
)) {
1957 /* Send SIGTRAP if we're single-stepping */
1958 if (ptrace_cancel_bpt(current
))
1959 send_sig(SIGTRAP
, current
, 1);
1961 unlock_user_struct(frame
, frame_addr
, 0);
1962 return -TARGET_QEMU_ESIGRETURN
;
1965 force_sig(TARGET_SIGSEGV
/* , current */);
1969 static abi_ulong
*restore_sigframe_v2_vfp(CPUARMState
*env
, abi_ulong
*regspace
)
1972 abi_ulong magic
, sz
;
1973 uint32_t fpscr
, fpexc
;
1974 struct target_vfp_sigframe
*vfpframe
;
1975 vfpframe
= (struct target_vfp_sigframe
*)regspace
;
1977 __get_user(magic
, &vfpframe
->magic
);
1978 __get_user(sz
, &vfpframe
->size
);
1979 if (magic
!= TARGET_VFP_MAGIC
|| sz
!= sizeof(*vfpframe
)) {
1982 for (i
= 0; i
< 32; i
++) {
1983 __get_user(float64_val(env
->vfp
.regs
[i
]), &vfpframe
->ufp
.fpregs
[i
]);
1985 __get_user(fpscr
, &vfpframe
->ufp
.fpscr
);
1986 vfp_set_fpscr(env
, fpscr
);
1987 __get_user(fpexc
, &vfpframe
->ufp_exc
.fpexc
);
1988 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1989 * and the exception flag is cleared
1992 fpexc
&= ~((1 << 31) | (1 << 28));
1993 env
->vfp
.xregs
[ARM_VFP_FPEXC
] = fpexc
;
1994 __get_user(env
->vfp
.xregs
[ARM_VFP_FPINST
], &vfpframe
->ufp_exc
.fpinst
);
1995 __get_user(env
->vfp
.xregs
[ARM_VFP_FPINST2
], &vfpframe
->ufp_exc
.fpinst2
);
1996 return (abi_ulong
*)(vfpframe
+ 1);
1999 static abi_ulong
*restore_sigframe_v2_iwmmxt(CPUARMState
*env
,
2000 abi_ulong
*regspace
)
2003 abi_ulong magic
, sz
;
2004 struct target_iwmmxt_sigframe
*iwmmxtframe
;
2005 iwmmxtframe
= (struct target_iwmmxt_sigframe
*)regspace
;
2007 __get_user(magic
, &iwmmxtframe
->magic
);
2008 __get_user(sz
, &iwmmxtframe
->size
);
2009 if (magic
!= TARGET_IWMMXT_MAGIC
|| sz
!= sizeof(*iwmmxtframe
)) {
2012 for (i
= 0; i
< 16; i
++) {
2013 __get_user(env
->iwmmxt
.regs
[i
], &iwmmxtframe
->regs
[i
]);
2015 __get_user(env
->vfp
.xregs
[ARM_IWMMXT_wCSSF
], &iwmmxtframe
->wcssf
);
2016 __get_user(env
->vfp
.xregs
[ARM_IWMMXT_wCASF
], &iwmmxtframe
->wcssf
);
2017 __get_user(env
->vfp
.xregs
[ARM_IWMMXT_wCGR0
], &iwmmxtframe
->wcgr0
);
2018 __get_user(env
->vfp
.xregs
[ARM_IWMMXT_wCGR1
], &iwmmxtframe
->wcgr1
);
2019 __get_user(env
->vfp
.xregs
[ARM_IWMMXT_wCGR2
], &iwmmxtframe
->wcgr2
);
2020 __get_user(env
->vfp
.xregs
[ARM_IWMMXT_wCGR3
], &iwmmxtframe
->wcgr3
);
2021 return (abi_ulong
*)(iwmmxtframe
+ 1);
2024 static int do_sigframe_return_v2(CPUARMState
*env
, target_ulong frame_addr
,
2025 struct target_ucontext_v2
*uc
)
2028 abi_ulong
*regspace
;
2030 target_to_host_sigset(&host_set
, &uc
->tuc_sigmask
);
2031 set_sigmask(&host_set
);
2033 if (restore_sigcontext(env
, &uc
->tuc_mcontext
))
2036 /* Restore coprocessor signal frame */
2037 regspace
= uc
->tuc_regspace
;
2038 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
2039 regspace
= restore_sigframe_v2_vfp(env
, regspace
);
2044 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
2045 regspace
= restore_sigframe_v2_iwmmxt(env
, regspace
);
2051 if (do_sigaltstack(frame_addr
+ offsetof(struct target_ucontext_v2
, tuc_stack
), 0, get_sp_from_cpustate(env
)) == -EFAULT
)
2055 /* Send SIGTRAP if we're single-stepping */
2056 if (ptrace_cancel_bpt(current
))
2057 send_sig(SIGTRAP
, current
, 1);
2063 static long do_sigreturn_v2(CPUARMState
*env
)
2065 abi_ulong frame_addr
;
2066 struct sigframe_v2
*frame
= NULL
;
2069 * Since we stacked the signal on a 64-bit boundary,
2070 * then 'sp' should be word aligned here. If it's
2071 * not, then the user is trying to mess with us.
2073 frame_addr
= env
->regs
[13];
2074 trace_user_do_sigreturn(env
, frame_addr
);
2075 if (frame_addr
& 7) {
2079 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1)) {
2083 if (do_sigframe_return_v2(env
, frame_addr
, &frame
->uc
)) {
2087 unlock_user_struct(frame
, frame_addr
, 0);
2088 return -TARGET_QEMU_ESIGRETURN
;
2091 unlock_user_struct(frame
, frame_addr
, 0);
2092 force_sig(TARGET_SIGSEGV
/* , current */);
2096 long do_sigreturn(CPUARMState
*env
)
2098 if (get_osversion() >= 0x020612) {
2099 return do_sigreturn_v2(env
);
2101 return do_sigreturn_v1(env
);
2105 static long do_rt_sigreturn_v1(CPUARMState
*env
)
2107 abi_ulong frame_addr
;
2108 struct rt_sigframe_v1
*frame
= NULL
;
2112 * Since we stacked the signal on a 64-bit boundary,
2113 * then 'sp' should be word aligned here. If it's
2114 * not, then the user is trying to mess with us.
2116 frame_addr
= env
->regs
[13];
2117 trace_user_do_rt_sigreturn(env
, frame_addr
);
2118 if (frame_addr
& 7) {
2122 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1)) {
2126 target_to_host_sigset(&host_set
, &frame
->uc
.tuc_sigmask
);
2127 set_sigmask(&host_set
);
2129 if (restore_sigcontext(env
, &frame
->uc
.tuc_mcontext
)) {
2133 if (do_sigaltstack(frame_addr
+ offsetof(struct rt_sigframe_v1
, uc
.tuc_stack
), 0, get_sp_from_cpustate(env
)) == -EFAULT
)
2137 /* Send SIGTRAP if we're single-stepping */
2138 if (ptrace_cancel_bpt(current
))
2139 send_sig(SIGTRAP
, current
, 1);
2141 unlock_user_struct(frame
, frame_addr
, 0);
2142 return -TARGET_QEMU_ESIGRETURN
;
2145 unlock_user_struct(frame
, frame_addr
, 0);
2146 force_sig(TARGET_SIGSEGV
/* , current */);
2150 static long do_rt_sigreturn_v2(CPUARMState
*env
)
2152 abi_ulong frame_addr
;
2153 struct rt_sigframe_v2
*frame
= NULL
;
2156 * Since we stacked the signal on a 64-bit boundary,
2157 * then 'sp' should be word aligned here. If it's
2158 * not, then the user is trying to mess with us.
2160 frame_addr
= env
->regs
[13];
2161 trace_user_do_rt_sigreturn(env
, frame_addr
);
2162 if (frame_addr
& 7) {
2166 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1)) {
2170 if (do_sigframe_return_v2(env
, frame_addr
, &frame
->uc
)) {
2174 unlock_user_struct(frame
, frame_addr
, 0);
2175 return -TARGET_QEMU_ESIGRETURN
;
2178 unlock_user_struct(frame
, frame_addr
, 0);
2179 force_sig(TARGET_SIGSEGV
/* , current */);
2183 long do_rt_sigreturn(CPUARMState
*env
)
2185 if (get_osversion() >= 0x020612) {
2186 return do_rt_sigreturn_v2(env
);
2188 return do_rt_sigreturn_v1(env
);
2192 #elif defined(TARGET_SPARC)
2194 #define __SUNOS_MAXWIN 31
2196 /* This is what SunOS does, so shall I. */
2197 struct target_sigcontext
{
2198 abi_ulong sigc_onstack
; /* state to restore */
2200 abi_ulong sigc_mask
; /* sigmask to restore */
2201 abi_ulong sigc_sp
; /* stack pointer */
2202 abi_ulong sigc_pc
; /* program counter */
2203 abi_ulong sigc_npc
; /* next program counter */
2204 abi_ulong sigc_psr
; /* for condition codes etc */
2205 abi_ulong sigc_g1
; /* User uses these two registers */
2206 abi_ulong sigc_o0
; /* within the trampoline code. */
2208 /* Now comes information regarding the users window set
2209 * at the time of the signal.
2211 abi_ulong sigc_oswins
; /* outstanding windows */
2213 /* stack ptrs for each regwin buf */
2214 char *sigc_spbuf
[__SUNOS_MAXWIN
];
2216 /* Windows to restore after signal */
2218 abi_ulong locals
[8];
2220 } sigc_wbuf
[__SUNOS_MAXWIN
];
2222 /* A Sparc stack frame */
2223 struct sparc_stackf
{
2224 abi_ulong locals
[8];
2226 /* It's simpler to treat fp and callers_pc as elements of ins[]
2227 * since we never need to access them ourselves.
2231 abi_ulong xxargs
[1];
2240 abi_ulong u_regs
[16]; /* globals and ins */
2246 abi_ulong si_float_regs
[32];
2247 unsigned long si_fsr
;
2248 unsigned long si_fpqdepth
;
2250 unsigned long *insn_addr
;
2253 } qemu_siginfo_fpu_t
;
2256 struct target_signal_frame
{
2257 struct sparc_stackf ss
;
2260 abi_ulong insns
[2] __attribute__ ((aligned (8)));
2261 abi_ulong extramask
[TARGET_NSIG_WORDS
- 1];
2262 abi_ulong extra_size
; /* Should be 0 */
2263 qemu_siginfo_fpu_t fpu_state
;
2265 struct target_rt_signal_frame
{
2266 struct sparc_stackf ss
;
2271 unsigned int insns
[2];
2273 unsigned int extra_size
; /* Should be 0 */
2274 qemu_siginfo_fpu_t fpu_state
;
2288 #define UREG_FP UREG_I6
2289 #define UREG_SP UREG_O6
2291 static inline abi_ulong
get_sigframe(struct target_sigaction
*sa
,
2293 unsigned long framesize
)
2297 sp
= env
->regwptr
[UREG_FP
];
2299 /* This is the X/Open sanctioned signal stack switching. */
2300 if (sa
->sa_flags
& TARGET_SA_ONSTACK
) {
2301 if (!on_sig_stack(sp
)
2302 && !((target_sigaltstack_used
.ss_sp
+ target_sigaltstack_used
.ss_size
) & 7)) {
2303 sp
= target_sigaltstack_used
.ss_sp
+ target_sigaltstack_used
.ss_size
;
2306 return sp
- framesize
;
2310 setup___siginfo(__siginfo_t
*si
, CPUSPARCState
*env
, abi_ulong mask
)
2314 __put_user(env
->psr
, &si
->si_regs
.psr
);
2315 __put_user(env
->pc
, &si
->si_regs
.pc
);
2316 __put_user(env
->npc
, &si
->si_regs
.npc
);
2317 __put_user(env
->y
, &si
->si_regs
.y
);
2318 for (i
=0; i
< 8; i
++) {
2319 __put_user(env
->gregs
[i
], &si
->si_regs
.u_regs
[i
]);
2321 for (i
=0; i
< 8; i
++) {
2322 __put_user(env
->regwptr
[UREG_I0
+ i
], &si
->si_regs
.u_regs
[i
+8]);
2324 __put_user(mask
, &si
->si_mask
);
2330 setup_sigcontext(struct target_sigcontext
*sc
, /*struct _fpstate *fpstate,*/
2331 CPUSPARCState
*env
, unsigned long mask
)
2335 __put_user(mask
, &sc
->sigc_mask
);
2336 __put_user(env
->regwptr
[UREG_SP
], &sc
->sigc_sp
);
2337 __put_user(env
->pc
, &sc
->sigc_pc
);
2338 __put_user(env
->npc
, &sc
->sigc_npc
);
2339 __put_user(env
->psr
, &sc
->sigc_psr
);
2340 __put_user(env
->gregs
[1], &sc
->sigc_g1
);
2341 __put_user(env
->regwptr
[UREG_O0
], &sc
->sigc_o0
);
2346 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2348 static void setup_frame(int sig
, struct target_sigaction
*ka
,
2349 target_sigset_t
*set
, CPUSPARCState
*env
)
2352 struct target_signal_frame
*sf
;
2353 int sigframe_size
, err
, i
;
2355 /* 1. Make sure everything is clean */
2356 //synchronize_user_stack();
2358 sigframe_size
= NF_ALIGNEDSZ
;
2359 sf_addr
= get_sigframe(ka
, env
, sigframe_size
);
2360 trace_user_setup_frame(env
, sf_addr
);
2362 sf
= lock_user(VERIFY_WRITE
, sf_addr
,
2363 sizeof(struct target_signal_frame
), 0);
2368 if (invalid_frame_pointer(sf
, sigframe_size
))
2369 goto sigill_and_return
;
2371 /* 2. Save the current process state */
2372 err
= setup___siginfo(&sf
->info
, env
, set
->sig
[0]);
2373 __put_user(0, &sf
->extra_size
);
2375 //save_fpu_state(regs, &sf->fpu_state);
2376 //__put_user(&sf->fpu_state, &sf->fpu_save);
2378 __put_user(set
->sig
[0], &sf
->info
.si_mask
);
2379 for (i
= 0; i
< TARGET_NSIG_WORDS
- 1; i
++) {
2380 __put_user(set
->sig
[i
+ 1], &sf
->extramask
[i
]);
2383 for (i
= 0; i
< 8; i
++) {
2384 __put_user(env
->regwptr
[i
+ UREG_L0
], &sf
->ss
.locals
[i
]);
2386 for (i
= 0; i
< 8; i
++) {
2387 __put_user(env
->regwptr
[i
+ UREG_I0
], &sf
->ss
.ins
[i
]);
2392 /* 3. signal handler back-trampoline and parameters */
2393 env
->regwptr
[UREG_FP
] = sf_addr
;
2394 env
->regwptr
[UREG_I0
] = sig
;
2395 env
->regwptr
[UREG_I1
] = sf_addr
+
2396 offsetof(struct target_signal_frame
, info
);
2397 env
->regwptr
[UREG_I2
] = sf_addr
+
2398 offsetof(struct target_signal_frame
, info
);
2400 /* 4. signal handler */
2401 env
->pc
= ka
->_sa_handler
;
2402 env
->npc
= (env
->pc
+ 4);
2403 /* 5. return to kernel instructions */
2404 if (ka
->sa_restorer
) {
2405 env
->regwptr
[UREG_I7
] = ka
->sa_restorer
;
2409 env
->regwptr
[UREG_I7
] = sf_addr
+
2410 offsetof(struct target_signal_frame
, insns
) - 2 * 4;
2412 /* mov __NR_sigreturn, %g1 */
2414 __put_user(val32
, &sf
->insns
[0]);
2418 __put_user(val32
, &sf
->insns
[1]);
2422 /* Flush instruction space. */
2423 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2426 unlock_user(sf
, sf_addr
, sizeof(struct target_signal_frame
));
2430 force_sig(TARGET_SIGILL
);
2433 unlock_user(sf
, sf_addr
, sizeof(struct target_signal_frame
));
2434 force_sig(TARGET_SIGSEGV
);
2437 static void setup_rt_frame(int sig
, struct target_sigaction
*ka
,
2438 target_siginfo_t
*info
,
2439 target_sigset_t
*set
, CPUSPARCState
*env
)
2441 fprintf(stderr
, "setup_rt_frame: not implemented\n");
2444 long do_sigreturn(CPUSPARCState
*env
)
2447 struct target_signal_frame
*sf
;
2448 uint32_t up_psr
, pc
, npc
;
2449 target_sigset_t set
;
2453 sf_addr
= env
->regwptr
[UREG_FP
];
2454 trace_user_do_sigreturn(env
, sf_addr
);
2455 if (!lock_user_struct(VERIFY_READ
, sf
, sf_addr
, 1)) {
2459 /* 1. Make sure we are not getting garbage from the user */
2464 __get_user(pc
, &sf
->info
.si_regs
.pc
);
2465 __get_user(npc
, &sf
->info
.si_regs
.npc
);
2467 if ((pc
| npc
) & 3) {
2471 /* 2. Restore the state */
2472 __get_user(up_psr
, &sf
->info
.si_regs
.psr
);
2474 /* User can only change condition codes and FPU enabling in %psr. */
2475 env
->psr
= (up_psr
& (PSR_ICC
/* | PSR_EF */))
2476 | (env
->psr
& ~(PSR_ICC
/* | PSR_EF */));
2480 __get_user(env
->y
, &sf
->info
.si_regs
.y
);
2481 for (i
=0; i
< 8; i
++) {
2482 __get_user(env
->gregs
[i
], &sf
->info
.si_regs
.u_regs
[i
]);
2484 for (i
=0; i
< 8; i
++) {
2485 __get_user(env
->regwptr
[i
+ UREG_I0
], &sf
->info
.si_regs
.u_regs
[i
+8]);
2488 /* FIXME: implement FPU save/restore:
2489 * __get_user(fpu_save, &sf->fpu_save);
2491 * err |= restore_fpu_state(env, fpu_save);
2494 /* This is pretty much atomic, no amount locking would prevent
2495 * the races which exist anyways.
2497 __get_user(set
.sig
[0], &sf
->info
.si_mask
);
2498 for(i
= 1; i
< TARGET_NSIG_WORDS
; i
++) {
2499 __get_user(set
.sig
[i
], &sf
->extramask
[i
- 1]);
2502 target_to_host_sigset_internal(&host_set
, &set
);
2503 set_sigmask(&host_set
);
2508 unlock_user_struct(sf
, sf_addr
, 0);
2509 return -TARGET_QEMU_ESIGRETURN
;
2512 unlock_user_struct(sf
, sf_addr
, 0);
2513 force_sig(TARGET_SIGSEGV
);
2516 long do_rt_sigreturn(CPUSPARCState
*env
)
2518 trace_user_do_rt_sigreturn(env
, 0);
2519 fprintf(stderr
, "do_rt_sigreturn: not implemented\n");
2520 return -TARGET_ENOSYS
;
2523 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2545 typedef abi_ulong target_mc_greg_t
;
2546 typedef target_mc_greg_t target_mc_gregset_t
[MC_NGREG
];
2548 struct target_mc_fq
{
2549 abi_ulong
*mcfq_addr
;
2553 struct target_mc_fpu
{
2557 //uint128_t qregs[16];
2559 abi_ulong mcfpu_fsr
;
2560 abi_ulong mcfpu_fprs
;
2561 abi_ulong mcfpu_gsr
;
2562 struct target_mc_fq
*mcfpu_fq
;
2563 unsigned char mcfpu_qcnt
;
2564 unsigned char mcfpu_qentsz
;
2565 unsigned char mcfpu_enab
;
2567 typedef struct target_mc_fpu target_mc_fpu_t
;
2570 target_mc_gregset_t mc_gregs
;
2571 target_mc_greg_t mc_fp
;
2572 target_mc_greg_t mc_i7
;
2573 target_mc_fpu_t mc_fpregs
;
2574 } target_mcontext_t
;
2576 struct target_ucontext
{
2577 struct target_ucontext
*tuc_link
;
2578 abi_ulong tuc_flags
;
2579 target_sigset_t tuc_sigmask
;
2580 target_mcontext_t tuc_mcontext
;
2583 /* A V9 register window */
2584 struct target_reg_window
{
2585 abi_ulong locals
[8];
2589 #define TARGET_STACK_BIAS 2047
2591 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2592 void sparc64_set_context(CPUSPARCState
*env
)
2595 struct target_ucontext
*ucp
;
2596 target_mc_gregset_t
*grp
;
2597 abi_ulong pc
, npc
, tstate
;
2598 abi_ulong fp
, i7
, w_addr
;
2601 ucp_addr
= env
->regwptr
[UREG_I0
];
2602 if (!lock_user_struct(VERIFY_READ
, ucp
, ucp_addr
, 1)) {
2605 grp
= &ucp
->tuc_mcontext
.mc_gregs
;
2606 __get_user(pc
, &((*grp
)[MC_PC
]));
2607 __get_user(npc
, &((*grp
)[MC_NPC
]));
2608 if ((pc
| npc
) & 3) {
2611 if (env
->regwptr
[UREG_I1
]) {
2612 target_sigset_t target_set
;
2615 if (TARGET_NSIG_WORDS
== 1) {
2616 __get_user(target_set
.sig
[0], &ucp
->tuc_sigmask
.sig
[0]);
2618 abi_ulong
*src
, *dst
;
2619 src
= ucp
->tuc_sigmask
.sig
;
2620 dst
= target_set
.sig
;
2621 for (i
= 0; i
< TARGET_NSIG_WORDS
; i
++, dst
++, src
++) {
2622 __get_user(*dst
, src
);
2625 target_to_host_sigset_internal(&set
, &target_set
);
2630 __get_user(env
->y
, &((*grp
)[MC_Y
]));
2631 __get_user(tstate
, &((*grp
)[MC_TSTATE
]));
2632 env
->asi
= (tstate
>> 24) & 0xff;
2633 cpu_put_ccr(env
, tstate
>> 32);
2634 cpu_put_cwp64(env
, tstate
& 0x1f);
2635 __get_user(env
->gregs
[1], (&(*grp
)[MC_G1
]));
2636 __get_user(env
->gregs
[2], (&(*grp
)[MC_G2
]));
2637 __get_user(env
->gregs
[3], (&(*grp
)[MC_G3
]));
2638 __get_user(env
->gregs
[4], (&(*grp
)[MC_G4
]));
2639 __get_user(env
->gregs
[5], (&(*grp
)[MC_G5
]));
2640 __get_user(env
->gregs
[6], (&(*grp
)[MC_G6
]));
2641 __get_user(env
->gregs
[7], (&(*grp
)[MC_G7
]));
2642 __get_user(env
->regwptr
[UREG_I0
], (&(*grp
)[MC_O0
]));
2643 __get_user(env
->regwptr
[UREG_I1
], (&(*grp
)[MC_O1
]));
2644 __get_user(env
->regwptr
[UREG_I2
], (&(*grp
)[MC_O2
]));
2645 __get_user(env
->regwptr
[UREG_I3
], (&(*grp
)[MC_O3
]));
2646 __get_user(env
->regwptr
[UREG_I4
], (&(*grp
)[MC_O4
]));
2647 __get_user(env
->regwptr
[UREG_I5
], (&(*grp
)[MC_O5
]));
2648 __get_user(env
->regwptr
[UREG_I6
], (&(*grp
)[MC_O6
]));
2649 __get_user(env
->regwptr
[UREG_I7
], (&(*grp
)[MC_O7
]));
2651 __get_user(fp
, &(ucp
->tuc_mcontext
.mc_fp
));
2652 __get_user(i7
, &(ucp
->tuc_mcontext
.mc_i7
));
2654 w_addr
= TARGET_STACK_BIAS
+env
->regwptr
[UREG_I6
];
2655 if (put_user(fp
, w_addr
+ offsetof(struct target_reg_window
, ins
[6]),
2659 if (put_user(i7
, w_addr
+ offsetof(struct target_reg_window
, ins
[7]),
2663 /* FIXME this does not match how the kernel handles the FPU in
2664 * its sparc64_set_context implementation. In particular the FPU
2665 * is only restored if fenab is non-zero in:
2666 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2668 __get_user(env
->fprs
, &(ucp
->tuc_mcontext
.mc_fpregs
.mcfpu_fprs
));
2670 uint32_t *src
= ucp
->tuc_mcontext
.mc_fpregs
.mcfpu_fregs
.sregs
;
2671 for (i
= 0; i
< 64; i
++, src
++) {
2673 __get_user(env
->fpr
[i
/2].l
.lower
, src
);
2675 __get_user(env
->fpr
[i
/2].l
.upper
, src
);
2679 __get_user(env
->fsr
,
2680 &(ucp
->tuc_mcontext
.mc_fpregs
.mcfpu_fsr
));
2681 __get_user(env
->gsr
,
2682 &(ucp
->tuc_mcontext
.mc_fpregs
.mcfpu_gsr
));
2683 unlock_user_struct(ucp
, ucp_addr
, 0);
2686 unlock_user_struct(ucp
, ucp_addr
, 0);
2687 force_sig(TARGET_SIGSEGV
);
2690 void sparc64_get_context(CPUSPARCState
*env
)
2693 struct target_ucontext
*ucp
;
2694 target_mc_gregset_t
*grp
;
2695 target_mcontext_t
*mcp
;
2696 abi_ulong fp
, i7
, w_addr
;
2699 target_sigset_t target_set
;
2702 ucp_addr
= env
->regwptr
[UREG_I0
];
2703 if (!lock_user_struct(VERIFY_WRITE
, ucp
, ucp_addr
, 0)) {
2707 mcp
= &ucp
->tuc_mcontext
;
2708 grp
= &mcp
->mc_gregs
;
2710 /* Skip over the trap instruction, first. */
2714 /* If we're only reading the signal mask then do_sigprocmask()
2715 * is guaranteed not to fail, which is important because we don't
2716 * have any way to signal a failure or restart this operation since
2717 * this is not a normal syscall.
2719 err
= do_sigprocmask(0, NULL
, &set
);
2721 host_to_target_sigset_internal(&target_set
, &set
);
2722 if (TARGET_NSIG_WORDS
== 1) {
2723 __put_user(target_set
.sig
[0],
2724 (abi_ulong
*)&ucp
->tuc_sigmask
);
2726 abi_ulong
*src
, *dst
;
2727 src
= target_set
.sig
;
2728 dst
= ucp
->tuc_sigmask
.sig
;
2729 for (i
= 0; i
< TARGET_NSIG_WORDS
; i
++, dst
++, src
++) {
2730 __put_user(*src
, dst
);
2736 /* XXX: tstate must be saved properly */
2737 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2738 __put_user(env
->pc
, &((*grp
)[MC_PC
]));
2739 __put_user(env
->npc
, &((*grp
)[MC_NPC
]));
2740 __put_user(env
->y
, &((*grp
)[MC_Y
]));
2741 __put_user(env
->gregs
[1], &((*grp
)[MC_G1
]));
2742 __put_user(env
->gregs
[2], &((*grp
)[MC_G2
]));
2743 __put_user(env
->gregs
[3], &((*grp
)[MC_G3
]));
2744 __put_user(env
->gregs
[4], &((*grp
)[MC_G4
]));
2745 __put_user(env
->gregs
[5], &((*grp
)[MC_G5
]));
2746 __put_user(env
->gregs
[6], &((*grp
)[MC_G6
]));
2747 __put_user(env
->gregs
[7], &((*grp
)[MC_G7
]));
2748 __put_user(env
->regwptr
[UREG_I0
], &((*grp
)[MC_O0
]));
2749 __put_user(env
->regwptr
[UREG_I1
], &((*grp
)[MC_O1
]));
2750 __put_user(env
->regwptr
[UREG_I2
], &((*grp
)[MC_O2
]));
2751 __put_user(env
->regwptr
[UREG_I3
], &((*grp
)[MC_O3
]));
2752 __put_user(env
->regwptr
[UREG_I4
], &((*grp
)[MC_O4
]));
2753 __put_user(env
->regwptr
[UREG_I5
], &((*grp
)[MC_O5
]));
2754 __put_user(env
->regwptr
[UREG_I6
], &((*grp
)[MC_O6
]));
2755 __put_user(env
->regwptr
[UREG_I7
], &((*grp
)[MC_O7
]));
2757 w_addr
= TARGET_STACK_BIAS
+env
->regwptr
[UREG_I6
];
2759 if (get_user(fp
, w_addr
+ offsetof(struct target_reg_window
, ins
[6]),
2763 if (get_user(i7
, w_addr
+ offsetof(struct target_reg_window
, ins
[7]),
2767 __put_user(fp
, &(mcp
->mc_fp
));
2768 __put_user(i7
, &(mcp
->mc_i7
));
2771 uint32_t *dst
= ucp
->tuc_mcontext
.mc_fpregs
.mcfpu_fregs
.sregs
;
2772 for (i
= 0; i
< 64; i
++, dst
++) {
2774 __put_user(env
->fpr
[i
/2].l
.lower
, dst
);
2776 __put_user(env
->fpr
[i
/2].l
.upper
, dst
);
2780 __put_user(env
->fsr
, &(mcp
->mc_fpregs
.mcfpu_fsr
));
2781 __put_user(env
->gsr
, &(mcp
->mc_fpregs
.mcfpu_gsr
));
2782 __put_user(env
->fprs
, &(mcp
->mc_fpregs
.mcfpu_fprs
));
2786 unlock_user_struct(ucp
, ucp_addr
, 1);
2789 unlock_user_struct(ucp
, ucp_addr
, 1);
2790 force_sig(TARGET_SIGSEGV
);
2793 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2795 # if defined(TARGET_ABI_MIPSO32)
2796 struct target_sigcontext
{
2797 uint32_t sc_regmask
; /* Unused */
2800 uint64_t sc_regs
[32];
2801 uint64_t sc_fpregs
[32];
2802 uint32_t sc_ownedfp
; /* Unused */
2803 uint32_t sc_fpc_csr
;
2804 uint32_t sc_fpc_eir
; /* Unused */
2805 uint32_t sc_used_math
;
2806 uint32_t sc_dsp
; /* dsp status, was sc_ssflags */
2810 target_ulong sc_hi1
; /* Was sc_cause */
2811 target_ulong sc_lo1
; /* Was sc_badvaddr */
2812 target_ulong sc_hi2
; /* Was sc_sigset[4] */
2813 target_ulong sc_lo2
;
2814 target_ulong sc_hi3
;
2815 target_ulong sc_lo3
;
2817 # else /* N32 || N64 */
2818 struct target_sigcontext
{
2819 uint64_t sc_regs
[32];
2820 uint64_t sc_fpregs
[32];
2830 uint32_t sc_fpc_csr
;
2831 uint32_t sc_used_math
;
2833 uint32_t sc_reserved
;
2838 uint32_t sf_ass
[4]; /* argument save space for o32 */
2839 uint32_t sf_code
[2]; /* signal trampoline */
2840 struct target_sigcontext sf_sc
;
2841 target_sigset_t sf_mask
;
2844 struct target_ucontext
{
2845 target_ulong tuc_flags
;
2846 target_ulong tuc_link
;
2847 target_stack_t tuc_stack
;
2849 struct target_sigcontext tuc_mcontext
;
2850 target_sigset_t tuc_sigmask
;
2853 struct target_rt_sigframe
{
2854 uint32_t rs_ass
[4]; /* argument save space for o32 */
2855 uint32_t rs_code
[2]; /* signal trampoline */
2856 struct target_siginfo rs_info
;
2857 struct target_ucontext rs_uc
;
2860 /* Install trampoline to jump back from signal handler */
2861 static inline int install_sigtramp(unsigned int *tramp
, unsigned int syscall
)
2866 * Set up the return code ...
2868 * li v0, __NR__foo_sigreturn
2872 __put_user(0x24020000 + syscall
, tramp
+ 0);
2873 __put_user(0x0000000c , tramp
+ 1);
2877 static inline void setup_sigcontext(CPUMIPSState
*regs
,
2878 struct target_sigcontext
*sc
)
2882 __put_user(exception_resume_pc(regs
), &sc
->sc_pc
);
2883 regs
->hflags
&= ~MIPS_HFLAG_BMASK
;
2885 __put_user(0, &sc
->sc_regs
[0]);
2886 for (i
= 1; i
< 32; ++i
) {
2887 __put_user(regs
->active_tc
.gpr
[i
], &sc
->sc_regs
[i
]);
2890 __put_user(regs
->active_tc
.HI
[0], &sc
->sc_mdhi
);
2891 __put_user(regs
->active_tc
.LO
[0], &sc
->sc_mdlo
);
2893 /* Rather than checking for dsp existence, always copy. The storage
2894 would just be garbage otherwise. */
2895 __put_user(regs
->active_tc
.HI
[1], &sc
->sc_hi1
);
2896 __put_user(regs
->active_tc
.HI
[2], &sc
->sc_hi2
);
2897 __put_user(regs
->active_tc
.HI
[3], &sc
->sc_hi3
);
2898 __put_user(regs
->active_tc
.LO
[1], &sc
->sc_lo1
);
2899 __put_user(regs
->active_tc
.LO
[2], &sc
->sc_lo2
);
2900 __put_user(regs
->active_tc
.LO
[3], &sc
->sc_lo3
);
2902 uint32_t dsp
= cpu_rddsp(0x3ff, regs
);
2903 __put_user(dsp
, &sc
->sc_dsp
);
2906 __put_user(1, &sc
->sc_used_math
);
2908 for (i
= 0; i
< 32; ++i
) {
2909 __put_user(regs
->active_fpu
.fpr
[i
].d
, &sc
->sc_fpregs
[i
]);
2914 restore_sigcontext(CPUMIPSState
*regs
, struct target_sigcontext
*sc
)
2918 __get_user(regs
->CP0_EPC
, &sc
->sc_pc
);
2920 __get_user(regs
->active_tc
.HI
[0], &sc
->sc_mdhi
);
2921 __get_user(regs
->active_tc
.LO
[0], &sc
->sc_mdlo
);
2923 for (i
= 1; i
< 32; ++i
) {
2924 __get_user(regs
->active_tc
.gpr
[i
], &sc
->sc_regs
[i
]);
2927 __get_user(regs
->active_tc
.HI
[1], &sc
->sc_hi1
);
2928 __get_user(regs
->active_tc
.HI
[2], &sc
->sc_hi2
);
2929 __get_user(regs
->active_tc
.HI
[3], &sc
->sc_hi3
);
2930 __get_user(regs
->active_tc
.LO
[1], &sc
->sc_lo1
);
2931 __get_user(regs
->active_tc
.LO
[2], &sc
->sc_lo2
);
2932 __get_user(regs
->active_tc
.LO
[3], &sc
->sc_lo3
);
2935 __get_user(dsp
, &sc
->sc_dsp
);
2936 cpu_wrdsp(dsp
, 0x3ff, regs
);
2939 for (i
= 0; i
< 32; ++i
) {
2940 __get_user(regs
->active_fpu
.fpr
[i
].d
, &sc
->sc_fpregs
[i
]);
2945 * Determine which stack to use..
2947 static inline abi_ulong
2948 get_sigframe(struct target_sigaction
*ka
, CPUMIPSState
*regs
, size_t frame_size
)
2952 /* Default to using normal stack */
2953 sp
= regs
->active_tc
.gpr
[29];
2956 * FPU emulator may have its own trampoline active just
2957 * above the user stack, 16-bytes before the next lowest
2958 * 16 byte boundary. Try to avoid trashing it.
2962 /* This is the X/Open sanctioned signal stack switching. */
2963 if ((ka
->sa_flags
& TARGET_SA_ONSTACK
) && (sas_ss_flags (sp
) == 0)) {
2964 sp
= target_sigaltstack_used
.ss_sp
+ target_sigaltstack_used
.ss_size
;
2967 return (sp
- frame_size
) & ~7;
2970 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState
*env
)
2972 if (env
->insn_flags
& (ASE_MIPS16
| ASE_MICROMIPS
)) {
2973 env
->hflags
&= ~MIPS_HFLAG_M16
;
2974 env
->hflags
|= (env
->active_tc
.PC
& 1) << MIPS_HFLAG_M16_SHIFT
;
2975 env
->active_tc
.PC
&= ~(target_ulong
) 1;
2979 # if defined(TARGET_ABI_MIPSO32)
2980 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2981 static void setup_frame(int sig
, struct target_sigaction
* ka
,
2982 target_sigset_t
*set
, CPUMIPSState
*regs
)
2984 struct sigframe
*frame
;
2985 abi_ulong frame_addr
;
2988 frame_addr
= get_sigframe(ka
, regs
, sizeof(*frame
));
2989 trace_user_setup_frame(regs
, frame_addr
);
2990 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0)) {
2994 install_sigtramp(frame
->sf_code
, TARGET_NR_sigreturn
);
2996 setup_sigcontext(regs
, &frame
->sf_sc
);
2998 for(i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
2999 __put_user(set
->sig
[i
], &frame
->sf_mask
.sig
[i
]);
3003 * Arguments to signal handler:
3005 * a0 = signal number
3006 * a1 = 0 (should be cause)
3007 * a2 = pointer to struct sigcontext
3009 * $25 and PC point to the signal handler, $29 points to the
3012 regs
->active_tc
.gpr
[ 4] = sig
;
3013 regs
->active_tc
.gpr
[ 5] = 0;
3014 regs
->active_tc
.gpr
[ 6] = frame_addr
+ offsetof(struct sigframe
, sf_sc
);
3015 regs
->active_tc
.gpr
[29] = frame_addr
;
3016 regs
->active_tc
.gpr
[31] = frame_addr
+ offsetof(struct sigframe
, sf_code
);
3017 /* The original kernel code sets CP0_EPC to the handler
3018 * since it returns to userland using eret
3019 * we cannot do this here, and we must set PC directly */
3020 regs
->active_tc
.PC
= regs
->active_tc
.gpr
[25] = ka
->_sa_handler
;
3021 mips_set_hflags_isa_mode_from_pc(regs
);
3022 unlock_user_struct(frame
, frame_addr
, 1);
3026 force_sig(TARGET_SIGSEGV
/*, current*/);
3029 long do_sigreturn(CPUMIPSState
*regs
)
3031 struct sigframe
*frame
;
3032 abi_ulong frame_addr
;
3034 target_sigset_t target_set
;
3037 frame_addr
= regs
->active_tc
.gpr
[29];
3038 trace_user_do_sigreturn(regs
, frame_addr
);
3039 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1))
3042 for(i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
3043 __get_user(target_set
.sig
[i
], &frame
->sf_mask
.sig
[i
]);
3046 target_to_host_sigset_internal(&blocked
, &target_set
);
3047 set_sigmask(&blocked
);
3049 restore_sigcontext(regs
, &frame
->sf_sc
);
3053 * Don't let your children do this ...
3055 __asm__
__volatile__(
3063 regs
->active_tc
.PC
= regs
->CP0_EPC
;
3064 mips_set_hflags_isa_mode_from_pc(regs
);
3065 /* I am not sure this is right, but it seems to work
3066 * maybe a problem with nested signals ? */
3068 return -TARGET_QEMU_ESIGRETURN
;
3071 force_sig(TARGET_SIGSEGV
/*, current*/);
3076 static void setup_rt_frame(int sig
, struct target_sigaction
*ka
,
3077 target_siginfo_t
*info
,
3078 target_sigset_t
*set
, CPUMIPSState
*env
)
3080 struct target_rt_sigframe
*frame
;
3081 abi_ulong frame_addr
;
3084 frame_addr
= get_sigframe(ka
, env
, sizeof(*frame
));
3085 trace_user_setup_rt_frame(env
, frame_addr
);
3086 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0)) {
3090 install_sigtramp(frame
->rs_code
, TARGET_NR_rt_sigreturn
);
3092 tswap_siginfo(&frame
->rs_info
, info
);
3094 __put_user(0, &frame
->rs_uc
.tuc_flags
);
3095 __put_user(0, &frame
->rs_uc
.tuc_link
);
3096 __put_user(target_sigaltstack_used
.ss_sp
, &frame
->rs_uc
.tuc_stack
.ss_sp
);
3097 __put_user(target_sigaltstack_used
.ss_size
, &frame
->rs_uc
.tuc_stack
.ss_size
);
3098 __put_user(sas_ss_flags(get_sp_from_cpustate(env
)),
3099 &frame
->rs_uc
.tuc_stack
.ss_flags
);
3101 setup_sigcontext(env
, &frame
->rs_uc
.tuc_mcontext
);
3103 for(i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
3104 __put_user(set
->sig
[i
], &frame
->rs_uc
.tuc_sigmask
.sig
[i
]);
3108 * Arguments to signal handler:
3110 * a0 = signal number
3111 * a1 = pointer to siginfo_t
3112 * a2 = pointer to struct ucontext
3114 * $25 and PC point to the signal handler, $29 points to the
3117 env
->active_tc
.gpr
[ 4] = sig
;
3118 env
->active_tc
.gpr
[ 5] = frame_addr
3119 + offsetof(struct target_rt_sigframe
, rs_info
);
3120 env
->active_tc
.gpr
[ 6] = frame_addr
3121 + offsetof(struct target_rt_sigframe
, rs_uc
);
3122 env
->active_tc
.gpr
[29] = frame_addr
;
3123 env
->active_tc
.gpr
[31] = frame_addr
3124 + offsetof(struct target_rt_sigframe
, rs_code
);
3125 /* The original kernel code sets CP0_EPC to the handler
3126 * since it returns to userland using eret
3127 * we cannot do this here, and we must set PC directly */
3128 env
->active_tc
.PC
= env
->active_tc
.gpr
[25] = ka
->_sa_handler
;
3129 mips_set_hflags_isa_mode_from_pc(env
);
3130 unlock_user_struct(frame
, frame_addr
, 1);
3134 unlock_user_struct(frame
, frame_addr
, 1);
3135 force_sig(TARGET_SIGSEGV
/*, current*/);
3138 long do_rt_sigreturn(CPUMIPSState
*env
)
3140 struct target_rt_sigframe
*frame
;
3141 abi_ulong frame_addr
;
3144 frame_addr
= env
->active_tc
.gpr
[29];
3145 trace_user_do_rt_sigreturn(env
, frame_addr
);
3146 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1)) {
3150 target_to_host_sigset(&blocked
, &frame
->rs_uc
.tuc_sigmask
);
3151 set_sigmask(&blocked
);
3153 restore_sigcontext(env
, &frame
->rs_uc
.tuc_mcontext
);
3155 if (do_sigaltstack(frame_addr
+
3156 offsetof(struct target_rt_sigframe
, rs_uc
.tuc_stack
),
3157 0, get_sp_from_cpustate(env
)) == -EFAULT
)
3160 env
->active_tc
.PC
= env
->CP0_EPC
;
3161 mips_set_hflags_isa_mode_from_pc(env
);
3162 /* I am not sure this is right, but it seems to work
3163 * maybe a problem with nested signals ? */
3165 return -TARGET_QEMU_ESIGRETURN
;
3168 force_sig(TARGET_SIGSEGV
/*, current*/);
3172 #elif defined(TARGET_SH4)
3175 * code and data structures from linux kernel:
3176 * include/asm-sh/sigcontext.h
3177 * arch/sh/kernel/signal.c
3180 struct target_sigcontext
{
3181 target_ulong oldmask
;
3184 target_ulong sc_gregs
[16];
3188 target_ulong sc_gbr
;
3189 target_ulong sc_mach
;
3190 target_ulong sc_macl
;
3193 target_ulong sc_fpregs
[16];
3194 target_ulong sc_xfpregs
[16];
3195 unsigned int sc_fpscr
;
3196 unsigned int sc_fpul
;
3197 unsigned int sc_ownedfp
;
3200 struct target_sigframe
3202 struct target_sigcontext sc
;
3203 target_ulong extramask
[TARGET_NSIG_WORDS
-1];
3204 uint16_t retcode
[3];
3208 struct target_ucontext
{
3209 target_ulong tuc_flags
;
3210 struct target_ucontext
*tuc_link
;
3211 target_stack_t tuc_stack
;
3212 struct target_sigcontext tuc_mcontext
;
3213 target_sigset_t tuc_sigmask
; /* mask last for extensibility */
3216 struct target_rt_sigframe
3218 struct target_siginfo info
;
3219 struct target_ucontext uc
;
3220 uint16_t retcode
[3];
3224 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3225 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3227 static abi_ulong
get_sigframe(struct target_sigaction
*ka
,
3228 unsigned long sp
, size_t frame_size
)
3230 if ((ka
->sa_flags
& TARGET_SA_ONSTACK
) && (sas_ss_flags(sp
) == 0)) {
3231 sp
= target_sigaltstack_used
.ss_sp
+ target_sigaltstack_used
.ss_size
;
3234 return (sp
- frame_size
) & -8ul;
3237 static void setup_sigcontext(struct target_sigcontext
*sc
,
3238 CPUSH4State
*regs
, unsigned long mask
)
3242 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3243 COPY(gregs
[0]); COPY(gregs
[1]);
3244 COPY(gregs
[2]); COPY(gregs
[3]);
3245 COPY(gregs
[4]); COPY(gregs
[5]);
3246 COPY(gregs
[6]); COPY(gregs
[7]);
3247 COPY(gregs
[8]); COPY(gregs
[9]);
3248 COPY(gregs
[10]); COPY(gregs
[11]);
3249 COPY(gregs
[12]); COPY(gregs
[13]);
3250 COPY(gregs
[14]); COPY(gregs
[15]);
3251 COPY(gbr
); COPY(mach
);
3252 COPY(macl
); COPY(pr
);
3256 for (i
=0; i
<16; i
++) {
3257 __put_user(regs
->fregs
[i
], &sc
->sc_fpregs
[i
]);
3259 __put_user(regs
->fpscr
, &sc
->sc_fpscr
);
3260 __put_user(regs
->fpul
, &sc
->sc_fpul
);
3262 /* non-iBCS2 extensions.. */
3263 __put_user(mask
, &sc
->oldmask
);
3266 static void restore_sigcontext(CPUSH4State
*regs
, struct target_sigcontext
*sc
)