linux-user: Don't allow guest to block SIGSEGV
[qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <stdarg.h>
23 #include <unistd.h>
24 #include <errno.h>
25 #include <assert.h>
26 #include <sys/ucontext.h>
27 #include <sys/resource.h>
28
29 #include "qemu.h"
30 #include "qemu-common.h"
31 #include "target_signal.h"
32
33 //#define DEBUG_SIGNAL
34
35 static struct target_sigaltstack target_sigaltstack_used = {
36 .ss_sp = 0,
37 .ss_size = 0,
38 .ss_flags = TARGET_SS_DISABLE,
39 };
40
41 static struct target_sigaction sigact_table[TARGET_NSIG];
42
43 static void host_signal_handler(int host_signum, siginfo_t *info,
44 void *puc);
45
46 static uint8_t host_to_target_signal_table[_NSIG] = {
47 [SIGHUP] = TARGET_SIGHUP,
48 [SIGINT] = TARGET_SIGINT,
49 [SIGQUIT] = TARGET_SIGQUIT,
50 [SIGILL] = TARGET_SIGILL,
51 [SIGTRAP] = TARGET_SIGTRAP,
52 [SIGABRT] = TARGET_SIGABRT,
53 /* [SIGIOT] = TARGET_SIGIOT,*/
54 [SIGBUS] = TARGET_SIGBUS,
55 [SIGFPE] = TARGET_SIGFPE,
56 [SIGKILL] = TARGET_SIGKILL,
57 [SIGUSR1] = TARGET_SIGUSR1,
58 [SIGSEGV] = TARGET_SIGSEGV,
59 [SIGUSR2] = TARGET_SIGUSR2,
60 [SIGPIPE] = TARGET_SIGPIPE,
61 [SIGALRM] = TARGET_SIGALRM,
62 [SIGTERM] = TARGET_SIGTERM,
63 #ifdef SIGSTKFLT
64 [SIGSTKFLT] = TARGET_SIGSTKFLT,
65 #endif
66 [SIGCHLD] = TARGET_SIGCHLD,
67 [SIGCONT] = TARGET_SIGCONT,
68 [SIGSTOP] = TARGET_SIGSTOP,
69 [SIGTSTP] = TARGET_SIGTSTP,
70 [SIGTTIN] = TARGET_SIGTTIN,
71 [SIGTTOU] = TARGET_SIGTTOU,
72 [SIGURG] = TARGET_SIGURG,
73 [SIGXCPU] = TARGET_SIGXCPU,
74 [SIGXFSZ] = TARGET_SIGXFSZ,
75 [SIGVTALRM] = TARGET_SIGVTALRM,
76 [SIGPROF] = TARGET_SIGPROF,
77 [SIGWINCH] = TARGET_SIGWINCH,
78 [SIGIO] = TARGET_SIGIO,
79 [SIGPWR] = TARGET_SIGPWR,
80 [SIGSYS] = TARGET_SIGSYS,
81 /* next signals stay the same */
82 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
83 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
84 To fix this properly we need to do manual signal delivery multiplexed
85 over a single host signal. */
86 [__SIGRTMIN] = __SIGRTMAX,
87 [__SIGRTMAX] = __SIGRTMIN,
88 };
89 static uint8_t target_to_host_signal_table[_NSIG];
90
91 static inline int on_sig_stack(unsigned long sp)
92 {
93 return (sp - target_sigaltstack_used.ss_sp
94 < target_sigaltstack_used.ss_size);
95 }
96
97 static inline int sas_ss_flags(unsigned long sp)
98 {
99 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
100 : on_sig_stack(sp) ? SS_ONSTACK : 0);
101 }
102
103 int host_to_target_signal(int sig)
104 {
105 if (sig < 0 || sig >= _NSIG)
106 return sig;
107 return host_to_target_signal_table[sig];
108 }
109
110 int target_to_host_signal(int sig)
111 {
112 if (sig < 0 || sig >= _NSIG)
113 return sig;
114 return target_to_host_signal_table[sig];
115 }
116
117 static inline void target_sigemptyset(target_sigset_t *set)
118 {
119 memset(set, 0, sizeof(*set));
120 }
121
122 static inline void target_sigaddset(target_sigset_t *set, int signum)
123 {
124 signum--;
125 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
126 set->sig[signum / TARGET_NSIG_BPW] |= mask;
127 }
128
129 static inline int target_sigismember(const target_sigset_t *set, int signum)
130 {
131 signum--;
132 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
133 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
134 }
135
136 static void host_to_target_sigset_internal(target_sigset_t *d,
137 const sigset_t *s)
138 {
139 int i;
140 target_sigemptyset(d);
141 for (i = 1; i <= TARGET_NSIG; i++) {
142 if (sigismember(s, i)) {
143 target_sigaddset(d, host_to_target_signal(i));
144 }
145 }
146 }
147
148 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
149 {
150 target_sigset_t d1;
151 int i;
152
153 host_to_target_sigset_internal(&d1, s);
154 for(i = 0;i < TARGET_NSIG_WORDS; i++)
155 d->sig[i] = tswapal(d1.sig[i]);
156 }
157
158 static void target_to_host_sigset_internal(sigset_t *d,
159 const target_sigset_t *s)
160 {
161 int i;
162 sigemptyset(d);
163 for (i = 1; i <= TARGET_NSIG; i++) {
164 if (target_sigismember(s, i)) {
165 sigaddset(d, target_to_host_signal(i));
166 }
167 }
168 }
169
170 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
171 {
172 target_sigset_t s1;
173 int i;
174
175 for(i = 0;i < TARGET_NSIG_WORDS; i++)
176 s1.sig[i] = tswapal(s->sig[i]);
177 target_to_host_sigset_internal(d, &s1);
178 }
179
180 void host_to_target_old_sigset(abi_ulong *old_sigset,
181 const sigset_t *sigset)
182 {
183 target_sigset_t d;
184 host_to_target_sigset(&d, sigset);
185 *old_sigset = d.sig[0];
186 }
187
188 void target_to_host_old_sigset(sigset_t *sigset,
189 const abi_ulong *old_sigset)
190 {
191 target_sigset_t d;
192 int i;
193
194 d.sig[0] = *old_sigset;
195 for(i = 1;i < TARGET_NSIG_WORDS; i++)
196 d.sig[i] = 0;
197 target_to_host_sigset(sigset, &d);
198 }
199
200 /* Wrapper for sigprocmask function
201 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
202 * are host signal set, not guest ones. This wraps the sigprocmask host calls
203 * that should be protected (calls originated from guest)
204 */
205 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
206 {
207 int ret;
208 sigset_t val;
209 sigset_t *temp = NULL;
210 CPUState *cpu = thread_cpu;
211 TaskState *ts = (TaskState *)cpu->opaque;
212 bool segv_was_blocked = ts->sigsegv_blocked;
213
214 if (set) {
215 bool has_sigsegv = sigismember(set, SIGSEGV);
216 val = *set;
217 temp = &val;
218
219 sigdelset(temp, SIGSEGV);
220
221 switch (how) {
222 case SIG_BLOCK:
223 if (has_sigsegv) {
224 ts->sigsegv_blocked = true;
225 }
226 break;
227 case SIG_UNBLOCK:
228 if (has_sigsegv) {
229 ts->sigsegv_blocked = false;
230 }
231 break;
232 case SIG_SETMASK:
233 ts->sigsegv_blocked = has_sigsegv;
234 break;
235 default:
236 g_assert_not_reached();
237 }
238 }
239
240 ret = sigprocmask(how, temp, oldset);
241
242 if (oldset && segv_was_blocked) {
243 sigaddset(oldset, SIGSEGV);
244 }
245
246 return ret;
247 }
248
249 /* siginfo conversion */
250
251 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
252 const siginfo_t *info)
253 {
254 int sig = host_to_target_signal(info->si_signo);
255 tinfo->si_signo = sig;
256 tinfo->si_errno = 0;
257 tinfo->si_code = info->si_code;
258
259 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
260 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
261 /* Should never come here, but who knows. The information for
262 the target is irrelevant. */
263 tinfo->_sifields._sigfault._addr = 0;
264 } else if (sig == TARGET_SIGIO) {
265 tinfo->_sifields._sigpoll._band = info->si_band;
266 tinfo->_sifields._sigpoll._fd = info->si_fd;
267 } else if (sig == TARGET_SIGCHLD) {
268 tinfo->_sifields._sigchld._pid = info->si_pid;
269 tinfo->_sifields._sigchld._uid = info->si_uid;
270 tinfo->_sifields._sigchld._status
271 = host_to_target_waitstatus(info->si_status);
272 tinfo->_sifields._sigchld._utime = info->si_utime;
273 tinfo->_sifields._sigchld._stime = info->si_stime;
274 } else if (sig >= TARGET_SIGRTMIN) {
275 tinfo->_sifields._rt._pid = info->si_pid;
276 tinfo->_sifields._rt._uid = info->si_uid;
277 /* XXX: potential problem if 64 bit */
278 tinfo->_sifields._rt._sigval.sival_ptr
279 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
280 }
281 }
282
283 static void tswap_siginfo(target_siginfo_t *tinfo,
284 const target_siginfo_t *info)
285 {
286 int sig = info->si_signo;
287 tinfo->si_signo = tswap32(sig);
288 tinfo->si_errno = tswap32(info->si_errno);
289 tinfo->si_code = tswap32(info->si_code);
290
291 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
292 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
293 tinfo->_sifields._sigfault._addr
294 = tswapal(info->_sifields._sigfault._addr);
295 } else if (sig == TARGET_SIGIO) {
296 tinfo->_sifields._sigpoll._band
297 = tswap32(info->_sifields._sigpoll._band);
298 tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd);
299 } else if (sig == TARGET_SIGCHLD) {
300 tinfo->_sifields._sigchld._pid
301 = tswap32(info->_sifields._sigchld._pid);
302 tinfo->_sifields._sigchld._uid
303 = tswap32(info->_sifields._sigchld._uid);
304 tinfo->_sifields._sigchld._status
305 = tswap32(info->_sifields._sigchld._status);
306 tinfo->_sifields._sigchld._utime
307 = tswapal(info->_sifields._sigchld._utime);
308 tinfo->_sifields._sigchld._stime
309 = tswapal(info->_sifields._sigchld._stime);
310 } else if (sig >= TARGET_SIGRTMIN) {
311 tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid);
312 tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid);
313 tinfo->_sifields._rt._sigval.sival_ptr
314 = tswapal(info->_sifields._rt._sigval.sival_ptr);
315 }
316 }
317
318
319 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
320 {
321 host_to_target_siginfo_noswap(tinfo, info);
322 tswap_siginfo(tinfo, tinfo);
323 }
324
325 /* XXX: we support only POSIX RT signals are used. */
326 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
327 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
328 {
329 info->si_signo = tswap32(tinfo->si_signo);
330 info->si_errno = tswap32(tinfo->si_errno);
331 info->si_code = tswap32(tinfo->si_code);
332 info->si_pid = tswap32(tinfo->_sifields._rt._pid);
333 info->si_uid = tswap32(tinfo->_sifields._rt._uid);
334 info->si_value.sival_ptr =
335 (void *)(long)tswapal(tinfo->_sifields._rt._sigval.sival_ptr);
336 }
337
338 static int fatal_signal (int sig)
339 {
340 switch (sig) {
341 case TARGET_SIGCHLD:
342 case TARGET_SIGURG:
343 case TARGET_SIGWINCH:
344 /* Ignored by default. */
345 return 0;
346 case TARGET_SIGCONT:
347 case TARGET_SIGSTOP:
348 case TARGET_SIGTSTP:
349 case TARGET_SIGTTIN:
350 case TARGET_SIGTTOU:
351 /* Job control signals. */
352 return 0;
353 default:
354 return 1;
355 }
356 }
357
358 /* returns 1 if given signal should dump core if not handled */
359 static int core_dump_signal(int sig)
360 {
361 switch (sig) {
362 case TARGET_SIGABRT:
363 case TARGET_SIGFPE:
364 case TARGET_SIGILL:
365 case TARGET_SIGQUIT:
366 case TARGET_SIGSEGV:
367 case TARGET_SIGTRAP:
368 case TARGET_SIGBUS:
369 return (1);
370 default:
371 return (0);
372 }
373 }
374
375 void signal_init(void)
376 {
377 struct sigaction act;
378 struct sigaction oact;
379 int i, j;
380 int host_sig;
381
382 /* generate signal conversion tables */
383 for(i = 1; i < _NSIG; i++) {
384 if (host_to_target_signal_table[i] == 0)
385 host_to_target_signal_table[i] = i;
386 }
387 for(i = 1; i < _NSIG; i++) {
388 j = host_to_target_signal_table[i];
389 target_to_host_signal_table[j] = i;
390 }
391
392 /* set all host signal handlers. ALL signals are blocked during
393 the handlers to serialize them. */
394 memset(sigact_table, 0, sizeof(sigact_table));
395
396 sigfillset(&act.sa_mask);
397 act.sa_flags = SA_SIGINFO;
398 act.sa_sigaction = host_signal_handler;
399 for(i = 1; i <= TARGET_NSIG; i++) {
400 host_sig = target_to_host_signal(i);
401 sigaction(host_sig, NULL, &oact);
402 if (oact.sa_sigaction == (void *)SIG_IGN) {
403 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
404 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
405 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
406 }
407 /* If there's already a handler installed then something has
408 gone horribly wrong, so don't even try to handle that case. */
409 /* Install some handlers for our own use. We need at least
410 SIGSEGV and SIGBUS, to detect exceptions. We can not just
411 trap all signals because it affects syscall interrupt
412 behavior. But do trap all default-fatal signals. */
413 if (fatal_signal (i))
414 sigaction(host_sig, &act, NULL);
415 }
416 }
417
418 /* signal queue handling */
419
420 static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
421 {
422 CPUState *cpu = ENV_GET_CPU(env);
423 TaskState *ts = cpu->opaque;
424 struct sigqueue *q = ts->first_free;
425 if (!q)
426 return NULL;
427 ts->first_free = q->next;
428 return q;
429 }
430
431 static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
432 {
433 CPUState *cpu = ENV_GET_CPU(env);
434 TaskState *ts = cpu->opaque;
435
436 q->next = ts->first_free;
437 ts->first_free = q;
438 }
439
440 /* abort execution with signal */
441 static void QEMU_NORETURN force_sig(int target_sig)
442 {
443 CPUState *cpu = thread_cpu;
444 CPUArchState *env = cpu->env_ptr;
445 TaskState *ts = (TaskState *)cpu->opaque;
446 int host_sig, core_dumped = 0;
447 struct sigaction act;
448 host_sig = target_to_host_signal(target_sig);
449 gdb_signalled(env, target_sig);
450
451 /* dump core if supported by target binary format */
452 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
453 stop_all_tasks();
454 core_dumped =
455 ((*ts->bprm->core_dump)(target_sig, env) == 0);
456 }
457 if (core_dumped) {
458 /* we already dumped the core of target process, we don't want
459 * a coredump of qemu itself */
460 struct rlimit nodump;
461 getrlimit(RLIMIT_CORE, &nodump);
462 nodump.rlim_cur=0;
463 setrlimit(RLIMIT_CORE, &nodump);
464 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
465 target_sig, strsignal(host_sig), "core dumped" );
466 }
467
468 /* The proper exit code for dying from an uncaught signal is
469 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
470 * a negative value. To get the proper exit code we need to
471 * actually die from an uncaught signal. Here the default signal
472 * handler is installed, we send ourself a signal and we wait for
473 * it to arrive. */
474 sigfillset(&act.sa_mask);
475 act.sa_handler = SIG_DFL;
476 act.sa_flags = 0;
477 sigaction(host_sig, &act, NULL);
478
479 /* For some reason raise(host_sig) doesn't send the signal when
480 * statically linked on x86-64. */
481 kill(getpid(), host_sig);
482
483 /* Make sure the signal isn't masked (just reuse the mask inside
484 of act) */
485 sigdelset(&act.sa_mask, host_sig);
486 sigsuspend(&act.sa_mask);
487
488 /* unreachable */
489 abort();
490 }
491
492 /* queue a signal so that it will be send to the virtual CPU as soon
493 as possible */
494 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
495 {
496 CPUState *cpu = ENV_GET_CPU(env);
497 TaskState *ts = cpu->opaque;
498 struct emulated_sigtable *k;
499 struct sigqueue *q, **pq;
500 abi_ulong handler;
501 int queue;
502
503 #if defined(DEBUG_SIGNAL)
504 fprintf(stderr, "queue_signal: sig=%d\n",
505 sig);
506 #endif
507 k = &ts->sigtab[sig - 1];
508 queue = gdb_queuesig ();
509 handler = sigact_table[sig - 1]._sa_handler;
510
511 if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
512 /* Guest has blocked SIGSEGV but we got one anyway. Assume this
513 * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
514 * because it got a real MMU fault). A blocked SIGSEGV in that
515 * situation is treated as if using the default handler. This is
516 * not correct if some other process has randomly sent us a SIGSEGV
517 * via kill(), but that is not easy to distinguish at this point,
518 * so we assume it doesn't happen.
519 */
520 handler = TARGET_SIG_DFL;
521 }
522
523 if (!queue && handler == TARGET_SIG_DFL) {
524 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
525 kill(getpid(),SIGSTOP);
526 return 0;
527 } else
528 /* default handler : ignore some signal. The other are fatal */
529 if (sig != TARGET_SIGCHLD &&
530 sig != TARGET_SIGURG &&
531 sig != TARGET_SIGWINCH &&
532 sig != TARGET_SIGCONT) {
533 force_sig(sig);
534 } else {
535 return 0; /* indicate ignored */
536 }
537 } else if (!queue && handler == TARGET_SIG_IGN) {
538 /* ignore signal */
539 return 0;
540 } else if (!queue && handler == TARGET_SIG_ERR) {
541 force_sig(sig);
542 } else {
543 pq = &k->first;
544 if (sig < TARGET_SIGRTMIN) {
545 /* if non real time signal, we queue exactly one signal */
546 if (!k->pending)
547 q = &k->info;
548 else
549 return 0;
550 } else {
551 if (!k->pending) {
552 /* first signal */
553 q = &k->info;
554 } else {
555 q = alloc_sigqueue(env);
556 if (!q)
557 return -EAGAIN;
558 while (*pq != NULL)
559 pq = &(*pq)->next;
560 }
561 }
562 *pq = q;
563 q->info = *info;
564 q->next = NULL;
565 k->pending = 1;
566 /* signal that a new signal is pending */
567 ts->signal_pending = 1;
568 return 1; /* indicates that the signal was queued */
569 }
570 }
571
572 static void host_signal_handler(int host_signum, siginfo_t *info,
573 void *puc)
574 {
575 CPUArchState *env = thread_cpu->env_ptr;
576 int sig;
577 target_siginfo_t tinfo;
578
579 /* the CPU emulator uses some host signals to detect exceptions,
580 we forward to it some signals */
581 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
582 && info->si_code > 0) {
583 if (cpu_signal_handler(host_signum, info, puc))
584 return;
585 }
586
587 /* get target signal number */
588 sig = host_to_target_signal(host_signum);
589 if (sig < 1 || sig > TARGET_NSIG)
590 return;
591 #if defined(DEBUG_SIGNAL)
592 fprintf(stderr, "qemu: got signal %d\n", sig);
593 #endif
594 host_to_target_siginfo_noswap(&tinfo, info);
595 if (queue_signal(env, sig, &tinfo) == 1) {
596 /* interrupt the virtual CPU as soon as possible */
597 cpu_exit(thread_cpu);
598 }
599 }
600
601 /* do_sigaltstack() returns target values and errnos. */
602 /* compare linux/kernel/signal.c:do_sigaltstack() */
603 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
604 {
605 int ret;
606 struct target_sigaltstack oss;
607
608 /* XXX: test errors */
609 if(uoss_addr)
610 {
611 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
612 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
613 __put_user(sas_ss_flags(sp), &oss.ss_flags);
614 }
615
616 if(uss_addr)
617 {
618 struct target_sigaltstack *uss;
619 struct target_sigaltstack ss;
620
621 ret = -TARGET_EFAULT;
622 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)
623 || __get_user(ss.ss_sp, &uss->ss_sp)
624 || __get_user(ss.ss_size, &uss->ss_size)
625 || __get_user(ss.ss_flags, &uss->ss_flags))
626 goto out;
627 unlock_user_struct(uss, uss_addr, 0);
628
629 ret = -TARGET_EPERM;
630 if (on_sig_stack(sp))
631 goto out;
632
633 ret = -TARGET_EINVAL;
634 if (ss.ss_flags != TARGET_SS_DISABLE
635 && ss.ss_flags != TARGET_SS_ONSTACK
636 && ss.ss_flags != 0)
637 goto out;
638
639 if (ss.ss_flags == TARGET_SS_DISABLE) {
640 ss.ss_size = 0;
641 ss.ss_sp = 0;
642 } else {
643 ret = -TARGET_ENOMEM;
644 if (ss.ss_size < MINSIGSTKSZ)
645 goto out;
646 }
647
648 target_sigaltstack_used.ss_sp = ss.ss_sp;
649 target_sigaltstack_used.ss_size = ss.ss_size;
650 }
651
652 if (uoss_addr) {
653 ret = -TARGET_EFAULT;
654 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
655 goto out;
656 }
657
658 ret = 0;
659 out:
660 return ret;
661 }
662
663 /* do_sigaction() return host values and errnos */
664 int do_sigaction(int sig, const struct target_sigaction *act,
665 struct target_sigaction *oact)
666 {
667 struct target_sigaction *k;
668 struct sigaction act1;
669 int host_sig;
670 int ret = 0;
671
672 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)
673 return -EINVAL;
674 k = &sigact_table[sig - 1];
675 #if defined(DEBUG_SIGNAL)
676 fprintf(stderr, "sigaction sig=%d act=0x%p, oact=0x%p\n",
677 sig, act, oact);
678 #endif
679 if (oact) {
680 __put_user(k->_sa_handler, &oact->_sa_handler);
681 __put_user(k->sa_flags, &oact->sa_flags);
682 #if !defined(TARGET_MIPS)
683 __put_user(k->sa_restorer, &oact->sa_restorer);
684 #endif
685 /* Not swapped. */
686 oact->sa_mask = k->sa_mask;
687 }
688 if (act) {
689 /* FIXME: This is not threadsafe. */
690 __get_user(k->_sa_handler, &act->_sa_handler);
691 __get_user(k->sa_flags, &act->sa_flags);
692 #if !defined(TARGET_MIPS)
693 __get_user(k->sa_restorer, &act->sa_restorer);
694 #endif
695 /* To be swapped in target_to_host_sigset. */
696 k->sa_mask = act->sa_mask;
697
698 /* we update the host linux signal state */
699 host_sig = target_to_host_signal(sig);
700 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
701 sigfillset(&act1.sa_mask);
702 act1.sa_flags = SA_SIGINFO;
703 if (k->sa_flags & TARGET_SA_RESTART)
704 act1.sa_flags |= SA_RESTART;
705 /* NOTE: it is important to update the host kernel signal
706 ignore state to avoid getting unexpected interrupted
707 syscalls */
708 if (k->_sa_handler == TARGET_SIG_IGN) {
709 act1.sa_sigaction = (void *)SIG_IGN;
710 } else if (k->_sa_handler == TARGET_SIG_DFL) {
711 if (fatal_signal (sig))
712 act1.sa_sigaction = host_signal_handler;
713 else
714 act1.sa_sigaction = (void *)SIG_DFL;
715 } else {
716 act1.sa_sigaction = host_signal_handler;
717 }
718 ret = sigaction(host_sig, &act1, NULL);
719 }
720 }
721 return ret;
722 }
723
724 static inline int copy_siginfo_to_user(target_siginfo_t *tinfo,
725 const target_siginfo_t *info)
726 {
727 tswap_siginfo(tinfo, info);
728 return 0;
729 }
730
731 static inline int current_exec_domain_sig(int sig)
732 {
733 return /* current->exec_domain && current->exec_domain->signal_invmap
734 && sig < 32 ? current->exec_domain->signal_invmap[sig] : */ sig;
735 }
736
737 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
738
739 /* from the Linux kernel */
740
741 struct target_fpreg {
742 uint16_t significand[4];
743 uint16_t exponent;
744 };
745
746 struct target_fpxreg {
747 uint16_t significand[4];
748 uint16_t exponent;
749 uint16_t padding[3];
750 };
751
752 struct target_xmmreg {
753 abi_ulong element[4];
754 };
755
756 struct target_fpstate {
757 /* Regular FPU environment */
758 abi_ulong cw;
759 abi_ulong sw;
760 abi_ulong tag;
761 abi_ulong ipoff;
762 abi_ulong cssel;
763 abi_ulong dataoff;
764 abi_ulong datasel;
765 struct target_fpreg _st[8];
766 uint16_t status;
767 uint16_t magic; /* 0xffff = regular FPU data only */
768
769 /* FXSR FPU environment */
770 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
771 abi_ulong mxcsr;
772 abi_ulong reserved;
773 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
774 struct target_xmmreg _xmm[8];
775 abi_ulong padding[56];
776 };
777
778 #define X86_FXSR_MAGIC 0x0000
779
780 struct target_sigcontext {
781 uint16_t gs, __gsh;
782 uint16_t fs, __fsh;
783 uint16_t es, __esh;
784 uint16_t ds, __dsh;
785 abi_ulong edi;
786 abi_ulong esi;
787 abi_ulong ebp;
788 abi_ulong esp;
789 abi_ulong ebx;
790 abi_ulong edx;
791 abi_ulong ecx;
792 abi_ulong eax;
793 abi_ulong trapno;
794 abi_ulong err;
795 abi_ulong eip;
796 uint16_t cs, __csh;
797 abi_ulong eflags;
798 abi_ulong esp_at_signal;
799 uint16_t ss, __ssh;
800 abi_ulong fpstate; /* pointer */
801 abi_ulong oldmask;
802 abi_ulong cr2;
803 };
804
805 struct target_ucontext {
806 abi_ulong tuc_flags;
807 abi_ulong tuc_link;
808 target_stack_t tuc_stack;
809 struct target_sigcontext tuc_mcontext;
810 target_sigset_t tuc_sigmask; /* mask last for extensibility */
811 };
812
813 struct sigframe
814 {
815 abi_ulong pretcode;
816 int sig;
817 struct target_sigcontext sc;
818 struct target_fpstate fpstate;
819 abi_ulong extramask[TARGET_NSIG_WORDS-1];
820 char retcode[8];
821 };
822
823 struct rt_sigframe
824 {
825 abi_ulong pretcode;
826 int sig;
827 abi_ulong pinfo;
828 abi_ulong puc;
829 struct target_siginfo info;
830 struct target_ucontext uc;
831 struct target_fpstate fpstate;
832 char retcode[8];
833 };
834
835 /*
836 * Set up a signal frame.
837 */
838
839 /* XXX: save x87 state */
840 static int
841 setup_sigcontext(struct target_sigcontext *sc, struct target_fpstate *fpstate,
842 CPUX86State *env, abi_ulong mask, abi_ulong fpstate_addr)
843 {
844 CPUState *cs = CPU(x86_env_get_cpu(env));
845 int err = 0;
846 uint16_t magic;
847
848 /* already locked in setup_frame() */
849 err |= __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
850 err |= __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
851 err |= __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
852 err |= __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
853 err |= __put_user(env->regs[R_EDI], &sc->edi);
854 err |= __put_user(env->regs[R_ESI], &sc->esi);
855 err |= __put_user(env->regs[R_EBP], &sc->ebp);
856 err |= __put_user(env->regs[R_ESP], &sc->esp);
857 err |= __put_user(env->regs[R_EBX], &sc->ebx);
858 err |= __put_user(env->regs[R_EDX], &sc->edx);
859 err |= __put_user(env->regs[R_ECX], &sc->ecx);
860 err |= __put_user(env->regs[R_EAX], &sc->eax);
861 err |= __put_user(cs->exception_index, &sc->trapno);
862 err |= __put_user(env->error_code, &sc->err);
863 err |= __put_user(env->eip, &sc->eip);
864 err |= __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
865 err |= __put_user(env->eflags, &sc->eflags);
866 err |= __put_user(env->regs[R_ESP], &sc->esp_at_signal);
867 err |= __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
868
869 cpu_x86_fsave(env, fpstate_addr, 1);
870 fpstate->status = fpstate->sw;
871 magic = 0xffff;
872 err |= __put_user(magic, &fpstate->magic);
873 err |= __put_user(fpstate_addr, &sc->fpstate);
874
875 /* non-iBCS2 extensions.. */
876 err |= __put_user(mask, &sc->oldmask);
877 err |= __put_user(env->cr[2], &sc->cr2);
878 return err;
879 }
880
881 /*
882 * Determine which stack to use..
883 */
884
885 static inline abi_ulong
886 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
887 {
888 unsigned long esp;
889
890 /* Default to using normal stack */
891 esp = env->regs[R_ESP];
892 /* This is the X/Open sanctioned signal stack switching. */
893 if (ka->sa_flags & TARGET_SA_ONSTACK) {
894 if (sas_ss_flags(esp) == 0)
895 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
896 }
897
898 /* This is the legacy signal stack switching. */
899 else
900 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
901 !(ka->sa_flags & TARGET_SA_RESTORER) &&
902 ka->sa_restorer) {
903 esp = (unsigned long) ka->sa_restorer;
904 }
905 return (esp - frame_size) & -8ul;
906 }
907
908 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
909 static void setup_frame(int sig, struct target_sigaction *ka,
910 target_sigset_t *set, CPUX86State *env)
911 {
912 abi_ulong frame_addr;
913 struct sigframe *frame;
914 int i, err = 0;
915
916 frame_addr = get_sigframe(ka, env, sizeof(*frame));
917
918 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
919 goto give_sigsegv;
920
921 err |= __put_user(current_exec_domain_sig(sig),
922 &frame->sig);
923 if (err)
924 goto give_sigsegv;
925
926 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
927 frame_addr + offsetof(struct sigframe, fpstate));
928 if (err)
929 goto give_sigsegv;
930
931 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
932 if (__put_user(set->sig[i], &frame->extramask[i - 1]))
933 goto give_sigsegv;
934 }
935
936 /* Set up to return from userspace. If provided, use a stub
937 already in userspace. */
938 if (ka->sa_flags & TARGET_SA_RESTORER) {
939 err |= __put_user(ka->sa_restorer, &frame->pretcode);
940 } else {
941 uint16_t val16;
942 abi_ulong retcode_addr;
943 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
944 err |= __put_user(retcode_addr, &frame->pretcode);
945 /* This is popl %eax ; movl $,%eax ; int $0x80 */
946 val16 = 0xb858;
947 err |= __put_user(val16, (uint16_t *)(frame->retcode+0));
948 err |= __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
949 val16 = 0x80cd;
950 err |= __put_user(val16, (uint16_t *)(frame->retcode+6));
951 }
952
953 if (err)
954 goto give_sigsegv;
955
956 /* Set up registers for signal handler */
957 env->regs[R_ESP] = frame_addr;
958 env->eip = ka->_sa_handler;
959
960 cpu_x86_load_seg(env, R_DS, __USER_DS);
961 cpu_x86_load_seg(env, R_ES, __USER_DS);
962 cpu_x86_load_seg(env, R_SS, __USER_DS);
963 cpu_x86_load_seg(env, R_CS, __USER_CS);
964 env->eflags &= ~TF_MASK;
965
966 unlock_user_struct(frame, frame_addr, 1);
967
968 return;
969
970 give_sigsegv:
971 unlock_user_struct(frame, frame_addr, 1);
972 if (sig == TARGET_SIGSEGV)
973 ka->_sa_handler = TARGET_SIG_DFL;
974 force_sig(TARGET_SIGSEGV /* , current */);
975 }
976
977 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
978 static void setup_rt_frame(int sig, struct target_sigaction *ka,
979 target_siginfo_t *info,
980 target_sigset_t *set, CPUX86State *env)
981 {
982 abi_ulong frame_addr, addr;
983 struct rt_sigframe *frame;
984 int i, err = 0;
985
986 frame_addr = get_sigframe(ka, env, sizeof(*frame));
987
988 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
989 goto give_sigsegv;
990
991 err |= __put_user(current_exec_domain_sig(sig),
992 &frame->sig);
993 addr = frame_addr + offsetof(struct rt_sigframe, info);
994 err |= __put_user(addr, &frame->pinfo);
995 addr = frame_addr + offsetof(struct rt_sigframe, uc);
996 err |= __put_user(addr, &frame->puc);
997 err |= copy_siginfo_to_user(&frame->info, info);
998 if (err)
999 goto give_sigsegv;
1000
1001 /* Create the ucontext. */
1002 err |= __put_user(0, &frame->uc.tuc_flags);
1003 err |= __put_user(0, &frame->uc.tuc_link);
1004 err |= __put_user(target_sigaltstack_used.ss_sp,
1005 &frame->uc.tuc_stack.ss_sp);
1006 err |= __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1007 &frame->uc.tuc_stack.ss_flags);
1008 err |= __put_user(target_sigaltstack_used.ss_size,
1009 &frame->uc.tuc_stack.ss_size);
1010 err |= setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate,
1011 env, set->sig[0],
1012 frame_addr + offsetof(struct rt_sigframe, fpstate));
1013 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1014 if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]))
1015 goto give_sigsegv;
1016 }
1017
1018 /* Set up to return from userspace. If provided, use a stub
1019 already in userspace. */
1020 if (ka->sa_flags & TARGET_SA_RESTORER) {
1021 err |= __put_user(ka->sa_restorer, &frame->pretcode);
1022 } else {
1023 uint16_t val16;
1024 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1025 err |= __put_user(addr, &frame->pretcode);
1026 /* This is movl $,%eax ; int $0x80 */
1027 err |= __put_user(0xb8, (char *)(frame->retcode+0));
1028 err |= __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1029 val16 = 0x80cd;
1030 err |= __put_user(val16, (uint16_t *)(frame->retcode+5));
1031 }
1032
1033 if (err)
1034 goto give_sigsegv;
1035
1036 /* Set up registers for signal handler */
1037 env->regs[R_ESP] = frame_addr;
1038 env->eip = ka->_sa_handler;
1039
1040 cpu_x86_load_seg(env, R_DS, __USER_DS);
1041 cpu_x86_load_seg(env, R_ES, __USER_DS);
1042 cpu_x86_load_seg(env, R_SS, __USER_DS);
1043 cpu_x86_load_seg(env, R_CS, __USER_CS);
1044 env->eflags &= ~TF_MASK;
1045
1046 unlock_user_struct(frame, frame_addr, 1);
1047
1048 return;
1049
1050 give_sigsegv:
1051 unlock_user_struct(frame, frame_addr, 1);
1052 if (sig == TARGET_SIGSEGV)
1053 ka->_sa_handler = TARGET_SIG_DFL;
1054 force_sig(TARGET_SIGSEGV /* , current */);
1055 }
1056
1057 static int
1058 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc, int *peax)
1059 {
1060 unsigned int err = 0;
1061 abi_ulong fpstate_addr;
1062 unsigned int tmpflags;
1063
1064 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1065 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1066 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1067 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1068
1069 env->regs[R_EDI] = tswapl(sc->edi);
1070 env->regs[R_ESI] = tswapl(sc->esi);
1071 env->regs[R_EBP] = tswapl(sc->ebp);
1072 env->regs[R_ESP] = tswapl(sc->esp);
1073 env->regs[R_EBX] = tswapl(sc->ebx);
1074 env->regs[R_EDX] = tswapl(sc->edx);
1075 env->regs[R_ECX] = tswapl(sc->ecx);
1076 env->eip = tswapl(sc->eip);
1077
1078 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1079 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1080
1081 tmpflags = tswapl(sc->eflags);
1082 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1083 // regs->orig_eax = -1; /* disable syscall checks */
1084
1085 fpstate_addr = tswapl(sc->fpstate);
1086 if (fpstate_addr != 0) {
1087 if (!access_ok(VERIFY_READ, fpstate_addr,
1088 sizeof(struct target_fpstate)))
1089 goto badframe;
1090 cpu_x86_frstor(env, fpstate_addr, 1);
1091 }
1092
1093 *peax = tswapl(sc->eax);
1094 return err;
1095 badframe:
1096 return 1;
1097 }
1098
1099 long do_sigreturn(CPUX86State *env)
1100 {
1101 struct sigframe *frame;
1102 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1103 target_sigset_t target_set;
1104 sigset_t set;
1105 int eax, i;
1106
1107 #if defined(DEBUG_SIGNAL)
1108 fprintf(stderr, "do_sigreturn\n");
1109 #endif
1110 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1111 goto badframe;
1112 /* set blocked signals */
1113 if (__get_user(target_set.sig[0], &frame->sc.oldmask))
1114 goto badframe;
1115 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1116 if (__get_user(target_set.sig[i], &frame->extramask[i - 1]))
1117 goto badframe;
1118 }
1119
1120 target_to_host_sigset_internal(&set, &target_set);
1121 do_sigprocmask(SIG_SETMASK, &set, NULL);
1122
1123 /* restore registers */
1124 if (restore_sigcontext(env, &frame->sc, &eax))
1125 goto badframe;
1126 unlock_user_struct(frame, frame_addr, 0);
1127 return eax;
1128
1129 badframe:
1130 unlock_user_struct(frame, frame_addr, 0);
1131 force_sig(TARGET_SIGSEGV);
1132 return 0;
1133 }
1134
1135 long do_rt_sigreturn(CPUX86State *env)
1136 {
1137 abi_ulong frame_addr;
1138 struct rt_sigframe *frame;
1139 sigset_t set;
1140 int eax;
1141
1142 frame_addr = env->regs[R_ESP] - 4;
1143 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1144 goto badframe;
1145 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1146 do_sigprocmask(SIG_SETMASK, &set, NULL);
1147
1148 if (restore_sigcontext(env, &frame->uc.tuc_mcontext, &eax))
1149 goto badframe;
1150
1151 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1152 get_sp_from_cpustate(env)) == -EFAULT)
1153 goto badframe;
1154
1155 unlock_user_struct(frame, frame_addr, 0);
1156 return eax;
1157
1158 badframe:
1159 unlock_user_struct(frame, frame_addr, 0);
1160 force_sig(TARGET_SIGSEGV);
1161 return 0;
1162 }
1163
1164 #elif defined(TARGET_AARCH64)
1165
1166 struct target_sigcontext {
1167 uint64_t fault_address;
1168 /* AArch64 registers */
1169 uint64_t regs[31];
1170 uint64_t sp;
1171 uint64_t pc;
1172 uint64_t pstate;
1173 /* 4K reserved for FP/SIMD state and future expansion */
1174 char __reserved[4096] __attribute__((__aligned__(16)));
1175 };
1176
1177 struct target_ucontext {
1178 abi_ulong tuc_flags;
1179 abi_ulong tuc_link;
1180 target_stack_t tuc_stack;
1181 target_sigset_t tuc_sigmask;
1182 /* glibc uses a 1024-bit sigset_t */
1183 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1184 /* last for future expansion */
1185 struct target_sigcontext tuc_mcontext;
1186 };
1187
1188 /*
1189 * Header to be used at the beginning of structures extending the user
1190 * context. Such structures must be placed after the rt_sigframe on the stack
1191 * and be 16-byte aligned. The last structure must be a dummy one with the
1192 * magic and size set to 0.
1193 */
1194 struct target_aarch64_ctx {
1195 uint32_t magic;
1196 uint32_t size;
1197 };
1198
1199 #define TARGET_FPSIMD_MAGIC 0x46508001
1200
1201 struct target_fpsimd_context {
1202 struct target_aarch64_ctx head;
1203 uint32_t fpsr;
1204 uint32_t fpcr;
1205 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1206 };
1207
1208 /*
1209 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1210 * user space as it will change with the addition of new context. User space
1211 * should check the magic/size information.
1212 */
1213 struct target_aux_context {
1214 struct target_fpsimd_context fpsimd;
1215 /* additional context to be added before "end" */
1216 struct target_aarch64_ctx end;
1217 };
1218
1219 struct target_rt_sigframe {
1220 struct target_siginfo info;
1221 struct target_ucontext uc;
1222 uint64_t fp;
1223 uint64_t lr;
1224 uint32_t tramp[2];
1225 };
1226
1227 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1228 CPUARMState *env, target_sigset_t *set)
1229 {
1230 int i;
1231 struct target_aux_context *aux =
1232 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1233
1234 /* set up the stack frame for unwinding */
1235 __put_user(env->xregs[29], &sf->fp);
1236 __put_user(env->xregs[30], &sf->lr);
1237
1238 for (i = 0; i < 31; i++) {
1239 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1240 }
1241 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1242 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1243 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1244
1245 __put_user(/*current->thread.fault_address*/ 0,
1246 &sf->uc.tuc_mcontext.fault_address);
1247
1248 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1249 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1250 }
1251
1252 for (i = 0; i < 32; i++) {
1253 #ifdef TARGET_WORDS_BIGENDIAN
1254 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1255 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1256 #else
1257 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1258 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1259 #endif
1260 }
1261 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1262 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1263 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1264 __put_user(sizeof(struct target_fpsimd_context),
1265 &aux->fpsimd.head.size);
1266
1267 /* set the "end" magic */
1268 __put_user(0, &aux->end.magic);
1269 __put_user(0, &aux->end.size);
1270
1271 return 0;
1272 }
1273
1274 static int target_restore_sigframe(CPUARMState *env,
1275 struct target_rt_sigframe *sf)
1276 {
1277 sigset_t set;
1278 int i;
1279 struct target_aux_context *aux =
1280 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1281 uint32_t magic, size, fpsr, fpcr;
1282 uint64_t pstate;
1283
1284 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1285 do_sigprocmask(SIG_SETMASK, &set, NULL);
1286
1287 for (i = 0; i < 31; i++) {
1288 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1289 }
1290
1291 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1292 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1293 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1294 pstate_write(env, pstate);
1295
1296 __get_user(magic, &aux->fpsimd.head.magic);
1297 __get_user(size, &aux->fpsimd.head.size);
1298
1299 if (magic != TARGET_FPSIMD_MAGIC
1300 || size != sizeof(struct target_fpsimd_context)) {
1301 return 1;
1302 }
1303
1304 for (i = 0; i < 32; i++) {
1305 #ifdef TARGET_WORDS_BIGENDIAN
1306 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1307 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1308 #else
1309 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1310 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1311 #endif
1312 }
1313 __get_user(fpsr, &aux->fpsimd.fpsr);
1314 vfp_set_fpsr(env, fpsr);
1315 __get_user(fpcr, &aux->fpsimd.fpcr);
1316 vfp_set_fpcr(env, fpcr);
1317
1318 return 0;
1319 }
1320
1321 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1322 {
1323 abi_ulong sp;
1324
1325 sp = env->xregs[31];
1326
1327 /*
1328 * This is the X/Open sanctioned signal stack switching.
1329 */
1330 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
1331 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1332 }
1333
1334 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1335
1336 return sp;
1337 }
1338
1339 static void target_setup_frame(int usig, struct target_sigaction *ka,
1340 target_siginfo_t *info, target_sigset_t *set,
1341 CPUARMState *env)
1342 {
1343 struct target_rt_sigframe *frame;
1344 abi_ulong frame_addr, return_addr;
1345
1346 frame_addr = get_sigframe(ka, env);
1347 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1348 goto give_sigsegv;
1349 }
1350
1351 __put_user(0, &frame->uc.tuc_flags);
1352 __put_user(0, &frame->uc.tuc_link);
1353
1354 __put_user(target_sigaltstack_used.ss_sp,
1355 &frame->uc.tuc_stack.ss_sp);
1356 __put_user(sas_ss_flags(env->xregs[31]),
1357 &frame->uc.tuc_stack.ss_flags);
1358 __put_user(target_sigaltstack_used.ss_size,
1359 &frame->uc.tuc_stack.ss_size);
1360 target_setup_sigframe(frame, env, set);
1361 if (ka->sa_flags & TARGET_SA_RESTORER) {
1362 return_addr = ka->sa_restorer;
1363 } else {
1364 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1365 __put_user(0xd2801168, &frame->tramp[0]);
1366 __put_user(0xd4000001, &frame->tramp[1]);
1367 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1368 }
1369 env->xregs[0] = usig;
1370 env->xregs[31] = frame_addr;
1371 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1372 env->pc = ka->_sa_handler;
1373 env->xregs[30] = return_addr;
1374 if (info) {
1375 if (copy_siginfo_to_user(&frame->info, info)) {
1376 goto give_sigsegv;
1377 }
1378 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1379 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1380 }
1381
1382 unlock_user_struct(frame, frame_addr, 1);
1383 return;
1384
1385 give_sigsegv:
1386 unlock_user_struct(frame, frame_addr, 1);
1387 force_sig(TARGET_SIGSEGV);
1388 }
1389
1390 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1391 target_siginfo_t *info, target_sigset_t *set,
1392 CPUARMState *env)
1393 {
1394 target_setup_frame(sig, ka, info, set, env);
1395 }
1396
1397 static void setup_frame(int sig, struct target_sigaction *ka,
1398 target_sigset_t *set, CPUARMState *env)
1399 {
1400 target_setup_frame(sig, ka, 0, set, env);
1401 }
1402
1403 long do_rt_sigreturn(CPUARMState *env)
1404 {
1405 struct target_rt_sigframe *frame = NULL;
1406 abi_ulong frame_addr = env->xregs[31];
1407
1408 if (frame_addr & 15) {
1409 goto badframe;
1410 }
1411
1412 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1413 goto badframe;
1414 }
1415
1416 if (target_restore_sigframe(env, frame)) {
1417 goto badframe;
1418 }
1419
1420 if (do_sigaltstack(frame_addr +
1421 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1422 0, get_sp_from_cpustate(env)) == -EFAULT) {
1423 goto badframe;
1424 }
1425
1426 unlock_user_struct(frame, frame_addr, 0);
1427 return env->xregs[0];
1428
1429 badframe:
1430 unlock_user_struct(frame, frame_addr, 0);
1431 force_sig(TARGET_SIGSEGV);
1432 return 0;
1433 }
1434
1435 long do_sigreturn(CPUARMState *env)
1436 {
1437 return do_rt_sigreturn(env);
1438 }
1439
1440 #elif defined(TARGET_ARM)
1441
1442 struct target_sigcontext {
1443 abi_ulong trap_no;
1444 abi_ulong error_code;
1445 abi_ulong oldmask;
1446 abi_ulong arm_r0;
1447 abi_ulong arm_r1;
1448 abi_ulong arm_r2;
1449 abi_ulong arm_r3;
1450 abi_ulong arm_r4;
1451 abi_ulong arm_r5;
1452 abi_ulong arm_r6;
1453 abi_ulong arm_r7;
1454 abi_ulong arm_r8;
1455 abi_ulong arm_r9;
1456 abi_ulong arm_r10;
1457 abi_ulong arm_fp;
1458 abi_ulong arm_ip;
1459 abi_ulong arm_sp;
1460 abi_ulong arm_lr;
1461 abi_ulong arm_pc;
1462 abi_ulong arm_cpsr;
1463 abi_ulong fault_address;
1464 };
1465
1466 struct target_ucontext_v1 {
1467 abi_ulong tuc_flags;
1468 abi_ulong tuc_link;
1469 target_stack_t tuc_stack;
1470 struct target_sigcontext tuc_mcontext;
1471 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1472 };
1473
1474 struct target_ucontext_v2 {
1475 abi_ulong tuc_flags;
1476 abi_ulong tuc_link;
1477 target_stack_t tuc_stack;
1478 struct target_sigcontext tuc_mcontext;
1479 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1480 char __unused[128 - sizeof(target_sigset_t)];
1481 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1482 };
1483
1484 struct target_user_vfp {
1485 uint64_t fpregs[32];
1486 abi_ulong fpscr;
1487 };
1488
1489 struct target_user_vfp_exc {
1490 abi_ulong fpexc;
1491 abi_ulong fpinst;
1492 abi_ulong fpinst2;
1493 };
1494
1495 struct target_vfp_sigframe {
1496 abi_ulong magic;
1497 abi_ulong size;
1498 struct target_user_vfp ufp;
1499 struct target_user_vfp_exc ufp_exc;
1500 } __attribute__((__aligned__(8)));
1501
1502 struct target_iwmmxt_sigframe {
1503 abi_ulong magic;
1504 abi_ulong size;
1505 uint64_t regs[16];
1506 /* Note that not all the coprocessor control registers are stored here */
1507 uint32_t wcssf;
1508 uint32_t wcasf;
1509 uint32_t wcgr0;
1510 uint32_t wcgr1;
1511 uint32_t wcgr2;
1512 uint32_t wcgr3;
1513 } __attribute__((__aligned__(8)));
1514
1515 #define TARGET_VFP_MAGIC 0x56465001
1516 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1517
1518 struct sigframe_v1
1519 {
1520 struct target_sigcontext sc;
1521 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1522 abi_ulong retcode;
1523 };
1524
1525 struct sigframe_v2
1526 {
1527 struct target_ucontext_v2 uc;
1528 abi_ulong retcode;
1529 };
1530
1531 struct rt_sigframe_v1
1532 {
1533 abi_ulong pinfo;
1534 abi_ulong puc;
1535 struct target_siginfo info;
1536 struct target_ucontext_v1 uc;
1537 abi_ulong retcode;
1538 };
1539
1540 struct rt_sigframe_v2
1541 {
1542 struct target_siginfo info;
1543 struct target_ucontext_v2 uc;
1544 abi_ulong retcode;
1545 };
1546
1547 #define TARGET_CONFIG_CPU_32 1
1548
1549 /*
1550 * For ARM syscalls, we encode the syscall number into the instruction.
1551 */
1552 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1553 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1554
1555 /*
1556 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1557 * need two 16-bit instructions.
1558 */
1559 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1560 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1561
1562 static const abi_ulong retcodes[4] = {
1563 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1564 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1565 };
1566
1567
1568 #define __get_user_error(x,p,e) __get_user(x, p)
1569
1570 static inline int valid_user_regs(CPUARMState *regs)
1571 {
1572 return 1;
1573 }
1574
1575 static void
1576 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1577 CPUARMState *env, abi_ulong mask)
1578 {
1579 __put_user(env->regs[0], &sc->arm_r0);
1580 __put_user(env->regs[1], &sc->arm_r1);
1581 __put_user(env->regs[2], &sc->arm_r2);
1582 __put_user(env->regs[3], &sc->arm_r3);
1583 __put_user(env->regs[4], &sc->arm_r4);
1584 __put_user(env->regs[5], &sc->arm_r5);
1585 __put_user(env->regs[6], &sc->arm_r6);
1586 __put_user(env->regs[7], &sc->arm_r7);
1587 __put_user(env->regs[8], &sc->arm_r8);
1588 __put_user(env->regs[9], &sc->arm_r9);
1589 __put_user(env->regs[10], &sc->arm_r10);
1590 __put_user(env->regs[11], &sc->arm_fp);
1591 __put_user(env->regs[12], &sc->arm_ip);
1592 __put_user(env->regs[13], &sc->arm_sp);
1593 __put_user(env->regs[14], &sc->arm_lr);
1594 __put_user(env->regs[15], &sc->arm_pc);
1595 #ifdef TARGET_CONFIG_CPU_32
1596 __put_user(cpsr_read(env), &sc->arm_cpsr);
1597 #endif
1598
1599 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1600 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1601 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1602 __put_user(mask, &sc->oldmask);
1603 }
1604
1605 static inline abi_ulong
1606 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1607 {
1608 unsigned long sp = regs->regs[13];
1609
1610 /*
1611 * This is the X/Open sanctioned signal stack switching.
1612 */
1613 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp))
1614 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1615 /*
1616 * ATPCS B01 mandates 8-byte alignment
1617 */
1618 return (sp - framesize) & ~7;
1619 }
1620
1621 static int
1622 setup_return(CPUARMState *env, struct target_sigaction *ka,
1623 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1624 {
1625 abi_ulong handler = ka->_sa_handler;
1626 abi_ulong retcode;
1627 int thumb = handler & 1;
1628 uint32_t cpsr = cpsr_read(env);
1629
1630 cpsr &= ~CPSR_IT;
1631 if (thumb) {
1632 cpsr |= CPSR_T;
1633 } else {
1634 cpsr &= ~CPSR_T;
1635 }
1636
1637 if (ka->sa_flags & TARGET_SA_RESTORER) {
1638 retcode = ka->sa_restorer;
1639 } else {
1640 unsigned int idx = thumb;
1641
1642 if (ka->sa_flags & TARGET_SA_SIGINFO)
1643 idx += 2;
1644
1645 if (__put_user(retcodes[idx], rc))
1646 return 1;
1647
1648 retcode = rc_addr + thumb;
1649 }
1650
1651 env->regs[0] = usig;
1652 env->regs[13] = frame_addr;
1653 env->regs[14] = retcode;
1654 env->regs[15] = handler & (thumb ? ~1 : ~3);
1655 cpsr_write(env, cpsr, 0xffffffff);
1656
1657 return 0;
1658 }
1659
1660 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1661 {
1662 int i;
1663 struct target_vfp_sigframe *vfpframe;
1664 vfpframe = (struct target_vfp_sigframe *)regspace;
1665 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1666 __put_user(sizeof(*vfpframe), &vfpframe->size);
1667 for (i = 0; i < 32; i++) {
1668 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1669 }
1670 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1671 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1672 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1673 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1674 return (abi_ulong*)(vfpframe+1);
1675 }
1676
1677 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1678 CPUARMState *env)
1679 {
1680 int i;
1681 struct target_iwmmxt_sigframe *iwmmxtframe;
1682 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1683 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1684 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1685 for (i = 0; i < 16; i++) {
1686 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1687 }
1688 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1689 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1690 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1691 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1692 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1693 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1694 return (abi_ulong*)(iwmmxtframe+1);
1695 }
1696
1697 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1698 target_sigset_t *set, CPUARMState *env)
1699 {
1700 struct target_sigaltstack stack;
1701 int i;
1702 abi_ulong *regspace;
1703
1704 /* Clear all the bits of the ucontext we don't use. */
1705 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1706
1707 memset(&stack, 0, sizeof(stack));
1708 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1709 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1710 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1711 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1712
1713 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1714 /* Save coprocessor signal frame. */
1715 regspace = uc->tuc_regspace;
1716 if (arm_feature(env, ARM_FEATURE_VFP)) {
1717 regspace = setup_sigframe_v2_vfp(regspace, env);
1718 }
1719 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1720 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1721 }
1722
1723 /* Write terminating magic word */
1724 __put_user(0, regspace);
1725
1726 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1727 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1728 }
1729 }
1730
1731 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1732 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1733 target_sigset_t *set, CPUARMState *regs)
1734 {
1735 struct sigframe_v1 *frame;
1736 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1737 int i;
1738
1739 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1740 return;
1741
1742 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1743
1744 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1745 if (__put_user(set->sig[i], &frame->extramask[i - 1]))
1746 goto end;
1747 }
1748
1749 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1750 frame_addr + offsetof(struct sigframe_v1, retcode));
1751
1752 end:
1753 unlock_user_struct(frame, frame_addr, 1);
1754 }
1755
1756 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1757 target_sigset_t *set, CPUARMState *regs)
1758 {
1759 struct sigframe_v2 *frame;
1760 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1761
1762 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1763 return;
1764
1765 setup_sigframe_v2(&frame->uc, set, regs);
1766
1767 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1768 frame_addr + offsetof(struct sigframe_v2, retcode));
1769
1770 unlock_user_struct(frame, frame_addr, 1);
1771 }
1772
1773 static void setup_frame(int usig, struct target_sigaction *ka,
1774 target_sigset_t *set, CPUARMState *regs)
1775 {
1776 if (get_osversion() >= 0x020612) {
1777 setup_frame_v2(usig, ka, set, regs);
1778 } else {
1779 setup_frame_v1(usig, ka, set, regs);
1780 }
1781 }
1782
1783 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1784 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1785 target_siginfo_t *info,
1786 target_sigset_t *set, CPUARMState *env)
1787 {
1788 struct rt_sigframe_v1 *frame;
1789 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1790 struct target_sigaltstack stack;
1791 int i;
1792 abi_ulong info_addr, uc_addr;
1793
1794 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1795 return /* 1 */;
1796
1797 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1798 __put_user(info_addr, &frame->pinfo);
1799 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1800 __put_user(uc_addr, &frame->puc);
1801 copy_siginfo_to_user(&frame->info, info);
1802
1803 /* Clear all the bits of the ucontext we don't use. */
1804 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1805
1806 memset(&stack, 0, sizeof(stack));
1807 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1808 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1809 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1810 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1811
1812 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1813 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1814 if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]))
1815 goto end;
1816 }
1817
1818 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1819 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1820
1821 env->regs[1] = info_addr;
1822 env->regs[2] = uc_addr;
1823
1824 end:
1825 unlock_user_struct(frame, frame_addr, 1);
1826 }
1827
1828 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1829 target_siginfo_t *info,
1830 target_sigset_t *set, CPUARMState *env)
1831 {
1832 struct rt_sigframe_v2 *frame;
1833 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1834 abi_ulong info_addr, uc_addr;
1835
1836 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1837 return /* 1 */;
1838
1839 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1840 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1841 copy_siginfo_to_user(&frame->info, info);
1842
1843 setup_sigframe_v2(&frame->uc, set, env);
1844
1845 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1846 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1847
1848 env->regs[1] = info_addr;
1849 env->regs[2] = uc_addr;
1850
1851 unlock_user_struct(frame, frame_addr, 1);
1852 }
1853
1854 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1855 target_siginfo_t *info,
1856 target_sigset_t *set, CPUARMState *env)
1857 {
1858 if (get_osversion() >= 0x020612) {
1859 setup_rt_frame_v2(usig, ka, info, set, env);
1860 } else {
1861 setup_rt_frame_v1(usig, ka, info, set, env);
1862 }
1863 }
1864
1865 static int
1866 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1867 {
1868 int err = 0;
1869 uint32_t cpsr;
1870
1871 __get_user_error(env->regs[0], &sc->arm_r0, err);
1872 __get_user_error(env->regs[1], &sc->arm_r1, err);
1873 __get_user_error(env->regs[2], &sc->arm_r2, err);
1874 __get_user_error(env->regs[3], &sc->arm_r3, err);
1875 __get_user_error(env->regs[4], &sc->arm_r4, err);
1876 __get_user_error(env->regs[5], &sc->arm_r5, err);
1877 __get_user_error(env->regs[6], &sc->arm_r6, err);
1878 __get_user_error(env->regs[7], &sc->arm_r7, err);
1879 __get_user_error(env->regs[8], &sc->arm_r8, err);
1880 __get_user_error(env->regs[9], &sc->arm_r9, err);
1881 __get_user_error(env->regs[10], &sc->arm_r10, err);
1882 __get_user_error(env->regs[11], &sc->arm_fp, err);
1883 __get_user_error(env->regs[12], &sc->arm_ip, err);
1884 __get_user_error(env->regs[13], &sc->arm_sp, err);
1885 __get_user_error(env->regs[14], &sc->arm_lr, err);
1886 __get_user_error(env->regs[15], &sc->arm_pc, err);
1887 #ifdef TARGET_CONFIG_CPU_32
1888 __get_user_error(cpsr, &sc->arm_cpsr, err);
1889 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC);
1890 #endif
1891
1892 err |= !valid_user_regs(env);
1893
1894 return err;
1895 }
1896
1897 static long do_sigreturn_v1(CPUARMState *env)
1898 {
1899 abi_ulong frame_addr;
1900 struct sigframe_v1 *frame = NULL;
1901 target_sigset_t set;
1902 sigset_t host_set;
1903 int i;
1904
1905 /*
1906 * Since we stacked the signal on a 64-bit boundary,
1907 * then 'sp' should be word aligned here. If it's
1908 * not, then the user is trying to mess with us.
1909 */
1910 frame_addr = env->regs[13];
1911 if (frame_addr & 7) {
1912 goto badframe;
1913 }
1914
1915 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1916 goto badframe;
1917
1918 if (__get_user(set.sig[0], &frame->sc.oldmask))
1919 goto badframe;
1920 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1921 if (__get_user(set.sig[i], &frame->extramask[i - 1]))
1922 goto badframe;
1923 }
1924
1925 target_to_host_sigset_internal(&host_set, &set);
1926 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
1927
1928 if (restore_sigcontext(env, &frame->sc))
1929 goto badframe;
1930
1931 #if 0
1932 /* Send SIGTRAP if we're single-stepping */
1933 if (ptrace_cancel_bpt(current))
1934 send_sig(SIGTRAP, current, 1);
1935 #endif
1936 unlock_user_struct(frame, frame_addr, 0);
1937 return env->regs[0];
1938
1939 badframe:
1940 unlock_user_struct(frame, frame_addr, 0);
1941 force_sig(TARGET_SIGSEGV /* , current */);
1942 return 0;
1943 }
1944
1945 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1946 {
1947 int i;
1948 abi_ulong magic, sz;
1949 uint32_t fpscr, fpexc;
1950 struct target_vfp_sigframe *vfpframe;
1951 vfpframe = (struct target_vfp_sigframe *)regspace;
1952
1953 __get_user(magic, &vfpframe->magic);
1954 __get_user(sz, &vfpframe->size);
1955 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1956 return 0;
1957 }
1958 for (i = 0; i < 32; i++) {
1959 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1960 }
1961 __get_user(fpscr, &vfpframe->ufp.fpscr);
1962 vfp_set_fpscr(env, fpscr);
1963 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1964 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1965 * and the exception flag is cleared
1966 */
1967 fpexc |= (1 << 30);
1968 fpexc &= ~((1 << 31) | (1 << 28));
1969 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
1970 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1971 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1972 return (abi_ulong*)(vfpframe + 1);
1973 }
1974
1975 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
1976 abi_ulong *regspace)
1977 {
1978 int i;
1979 abi_ulong magic, sz;
1980 struct target_iwmmxt_sigframe *iwmmxtframe;
1981 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1982
1983 __get_user(magic, &iwmmxtframe->magic);
1984 __get_user(sz, &iwmmxtframe->size);
1985 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
1986 return 0;
1987 }
1988 for (i = 0; i < 16; i++) {
1989 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1990 }
1991 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1992 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1993 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1994 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1995 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1996 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1997 return (abi_ulong*)(iwmmxtframe + 1);
1998 }
1999
2000 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
2001 struct target_ucontext_v2 *uc)
2002 {
2003 sigset_t host_set;
2004 abi_ulong *regspace;
2005
2006 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2007 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
2008
2009 if (restore_sigcontext(env, &uc->tuc_mcontext))
2010 return 1;
2011
2012 /* Restore coprocessor signal frame */
2013 regspace = uc->tuc_regspace;
2014 if (arm_feature(env, ARM_FEATURE_VFP)) {
2015 regspace = restore_sigframe_v2_vfp(env, regspace);
2016 if (!regspace) {
2017 return 1;
2018 }
2019 }
2020 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2021 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2022 if (!regspace) {
2023 return 1;
2024 }
2025 }
2026
2027 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2028 return 1;
2029
2030 #if 0
2031 /* Send SIGTRAP if we're single-stepping */
2032 if (ptrace_cancel_bpt(current))
2033 send_sig(SIGTRAP, current, 1);
2034 #endif
2035
2036 return 0;
2037 }
2038
2039 static long do_sigreturn_v2(CPUARMState *env)
2040 {
2041 abi_ulong frame_addr;
2042 struct sigframe_v2 *frame = NULL;
2043
2044 /*
2045 * Since we stacked the signal on a 64-bit boundary,
2046 * then 'sp' should be word aligned here. If it's
2047 * not, then the user is trying to mess with us.
2048 */
2049 frame_addr = env->regs[13];
2050 if (frame_addr & 7) {
2051 goto badframe;
2052 }
2053
2054 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2055 goto badframe;
2056
2057 if (do_sigframe_return_v2(env, frame_addr, &frame->uc))
2058 goto badframe;
2059
2060 unlock_user_struct(frame, frame_addr, 0);
2061 return env->regs[0];
2062
2063 badframe:
2064 unlock_user_struct(frame, frame_addr, 0);
2065 force_sig(TARGET_SIGSEGV /* , current */);
2066 return 0;
2067 }
2068
2069 long do_sigreturn(CPUARMState *env)
2070 {
2071 if (get_osversion() >= 0x020612) {
2072 return do_sigreturn_v2(env);
2073 } else {
2074 return do_sigreturn_v1(env);
2075 }
2076 }
2077
2078 static long do_rt_sigreturn_v1(CPUARMState *env)
2079 {
2080 abi_ulong frame_addr;
2081 struct rt_sigframe_v1 *frame = NULL;
2082 sigset_t host_set;
2083
2084 /*
2085 * Since we stacked the signal on a 64-bit boundary,
2086 * then 'sp' should be word aligned here. If it's
2087 * not, then the user is trying to mess with us.
2088 */
2089 frame_addr = env->regs[13];
2090 if (frame_addr & 7) {
2091 goto badframe;
2092 }
2093
2094 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2095 goto badframe;
2096
2097 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2098 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
2099
2100 if (restore_sigcontext(env, &frame->uc.tuc_mcontext))
2101 goto badframe;
2102
2103 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2104 goto badframe;
2105
2106 #if 0
2107 /* Send SIGTRAP if we're single-stepping */
2108 if (ptrace_cancel_bpt(current))
2109 send_sig(SIGTRAP, current, 1);
2110 #endif
2111 unlock_user_struct(frame, frame_addr, 0);
2112 return env->regs[0];
2113
2114 badframe:
2115 unlock_user_struct(frame, frame_addr, 0);
2116 force_sig(TARGET_SIGSEGV /* , current */);
2117 return 0;
2118 }
2119
2120 static long do_rt_sigreturn_v2(CPUARMState *env)
2121 {
2122 abi_ulong frame_addr;
2123 struct rt_sigframe_v2 *frame = NULL;
2124
2125 /*
2126 * Since we stacked the signal on a 64-bit boundary,
2127 * then 'sp' should be word aligned here. If it's
2128 * not, then the user is trying to mess with us.
2129 */
2130 frame_addr = env->regs[13];
2131 if (frame_addr & 7) {
2132 goto badframe;
2133 }
2134
2135 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2136 goto badframe;
2137
2138 if (do_sigframe_return_v2(env, frame_addr, &frame->uc))
2139 goto badframe;
2140
2141 unlock_user_struct(frame, frame_addr, 0);
2142 return env->regs[0];
2143
2144 badframe:
2145 unlock_user_struct(frame, frame_addr, 0);
2146 force_sig(TARGET_SIGSEGV /* , current */);
2147 return 0;
2148 }
2149
2150 long do_rt_sigreturn(CPUARMState *env)
2151 {
2152 if (get_osversion() >= 0x020612) {
2153 return do_rt_sigreturn_v2(env);
2154 } else {
2155 return do_rt_sigreturn_v1(env);
2156 }
2157 }
2158
2159 #elif defined(TARGET_SPARC)
2160
2161 #define __SUNOS_MAXWIN 31
2162
2163 /* This is what SunOS does, so shall I. */
2164 struct target_sigcontext {
2165 abi_ulong sigc_onstack; /* state to restore */
2166
2167 abi_ulong sigc_mask; /* sigmask to restore */
2168 abi_ulong sigc_sp; /* stack pointer */
2169 abi_ulong sigc_pc; /* program counter */
2170 abi_ulong sigc_npc; /* next program counter */
2171 abi_ulong sigc_psr; /* for condition codes etc */
2172 abi_ulong sigc_g1; /* User uses these two registers */
2173 abi_ulong sigc_o0; /* within the trampoline code. */
2174
2175 /* Now comes information regarding the users window set
2176 * at the time of the signal.
2177 */
2178 abi_ulong sigc_oswins; /* outstanding windows */
2179
2180 /* stack ptrs for each regwin buf */
2181 char *sigc_spbuf[__SUNOS_MAXWIN];
2182
2183 /* Windows to restore after signal */
2184 struct {
2185 abi_ulong locals[8];
2186 abi_ulong ins[8];
2187 } sigc_wbuf[__SUNOS_MAXWIN];
2188 };
2189 /* A Sparc stack frame */
2190 struct sparc_stackf {
2191 abi_ulong locals[8];
2192 abi_ulong ins[8];
2193 /* It's simpler to treat fp and callers_pc as elements of ins[]
2194 * since we never need to access them ourselves.
2195 */
2196 char *structptr;
2197 abi_ulong xargs[6];
2198 abi_ulong xxargs[1];
2199 };
2200
2201 typedef struct {
2202 struct {
2203 abi_ulong psr;
2204 abi_ulong pc;
2205 abi_ulong npc;
2206 abi_ulong y;
2207 abi_ulong u_regs[16]; /* globals and ins */
2208 } si_regs;
2209 int si_mask;
2210 } __siginfo_t;
2211
2212 typedef struct {
2213 abi_ulong si_float_regs[32];
2214 unsigned long si_fsr;
2215 unsigned long si_fpqdepth;
2216 struct {
2217 unsigned long *insn_addr;
2218 unsigned long insn;
2219 } si_fpqueue [16];
2220 } qemu_siginfo_fpu_t;
2221
2222
2223 struct target_signal_frame {
2224 struct sparc_stackf ss;
2225 __siginfo_t info;
2226 abi_ulong fpu_save;
2227 abi_ulong insns[2] __attribute__ ((aligned (8)));
2228 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2229 abi_ulong extra_size; /* Should be 0 */
2230 qemu_siginfo_fpu_t fpu_state;
2231 };
2232 struct target_rt_signal_frame {
2233 struct sparc_stackf ss;
2234 siginfo_t info;
2235 abi_ulong regs[20];
2236 sigset_t mask;
2237 abi_ulong fpu_save;
2238 unsigned int insns[2];
2239 stack_t stack;
2240 unsigned int extra_size; /* Should be 0 */
2241 qemu_siginfo_fpu_t fpu_state;
2242 };
2243
2244 #define UREG_O0 16
2245 #define UREG_O6 22
2246 #define UREG_I0 0
2247 #define UREG_I1 1
2248 #define UREG_I2 2
2249 #define UREG_I3 3
2250 #define UREG_I4 4
2251 #define UREG_I5 5
2252 #define UREG_I6 6
2253 #define UREG_I7 7
2254 #define UREG_L0 8
2255 #define UREG_FP UREG_I6
2256 #define UREG_SP UREG_O6
2257
2258 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2259 CPUSPARCState *env,
2260 unsigned long framesize)
2261 {
2262 abi_ulong sp;
2263
2264 sp = env->regwptr[UREG_FP];
2265
2266 /* This is the X/Open sanctioned signal stack switching. */
2267 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2268 if (!on_sig_stack(sp)
2269 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7))
2270 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2271 }
2272 return sp - framesize;
2273 }
2274
2275 static int
2276 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2277 {
2278 int err = 0, i;
2279
2280 err |= __put_user(env->psr, &si->si_regs.psr);
2281 err |= __put_user(env->pc, &si->si_regs.pc);
2282 err |= __put_user(env->npc, &si->si_regs.npc);
2283 err |= __put_user(env->y, &si->si_regs.y);
2284 for (i=0; i < 8; i++) {
2285 err |= __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2286 }
2287 for (i=0; i < 8; i++) {
2288 err |= __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2289 }
2290 err |= __put_user(mask, &si->si_mask);
2291 return err;
2292 }
2293
2294 #if 0
2295 static int
2296 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2297 CPUSPARCState *env, unsigned long mask)
2298 {
2299 int err = 0;
2300
2301 err |= __put_user(mask, &sc->sigc_mask);
2302 err |= __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2303 err |= __put_user(env->pc, &sc->sigc_pc);
2304 err |= __put_user(env->npc, &sc->sigc_npc);
2305 err |= __put_user(env->psr, &sc->sigc_psr);
2306 err |= __put_user(env->gregs[1], &sc->sigc_g1);
2307 err |= __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2308
2309 return err;
2310 }
2311 #endif
2312 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2313
2314 static void setup_frame(int sig, struct target_sigaction *ka,
2315 target_sigset_t *set, CPUSPARCState *env)
2316 {
2317 abi_ulong sf_addr;
2318 struct target_signal_frame *sf;
2319 int sigframe_size, err, i;
2320
2321 /* 1. Make sure everything is clean */
2322 //synchronize_user_stack();
2323
2324 sigframe_size = NF_ALIGNEDSZ;
2325 sf_addr = get_sigframe(ka, env, sigframe_size);
2326
2327 sf = lock_user(VERIFY_WRITE, sf_addr,
2328 sizeof(struct target_signal_frame), 0);
2329 if (!sf)
2330 goto sigsegv;
2331
2332 //fprintf(stderr, "sf: %x pc %x fp %x sp %x\n", sf, env->pc, env->regwptr[UREG_FP], env->regwptr[UREG_SP]);
2333 #if 0
2334 if (invalid_frame_pointer(sf, sigframe_size))
2335 goto sigill_and_return;
2336 #endif
2337 /* 2. Save the current process state */
2338 err = setup___siginfo(&sf->info, env, set->sig[0]);
2339 err |= __put_user(0, &sf->extra_size);
2340
2341 //err |= save_fpu_state(regs, &sf->fpu_state);
2342 //err |= __put_user(&sf->fpu_state, &sf->fpu_save);
2343
2344 err |= __put_user(set->sig[0], &sf->info.si_mask);
2345 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2346 err |= __put_user(set->sig[i + 1], &sf->extramask[i]);
2347 }
2348
2349 for (i = 0; i < 8; i++) {
2350 err |= __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2351 }
2352 for (i = 0; i < 8; i++) {
2353 err |= __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2354 }
2355 if (err)
2356 goto sigsegv;
2357
2358 /* 3. signal handler back-trampoline and parameters */
2359 env->regwptr[UREG_FP] = sf_addr;
2360 env->regwptr[UREG_I0] = sig;
2361 env->regwptr[UREG_I1] = sf_addr +
2362 offsetof(struct target_signal_frame, info);
2363 env->regwptr[UREG_I2] = sf_addr +
2364 offsetof(struct target_signal_frame, info);
2365
2366 /* 4. signal handler */
2367 env->pc = ka->_sa_handler;
2368 env->npc = (env->pc + 4);
2369 /* 5. return to kernel instructions */
2370 if (ka->sa_restorer)
2371 env->regwptr[UREG_I7] = ka->sa_restorer;
2372 else {
2373 uint32_t val32;
2374
2375 env->regwptr[UREG_I7] = sf_addr +
2376 offsetof(struct target_signal_frame, insns) - 2 * 4;
2377
2378 /* mov __NR_sigreturn, %g1 */
2379 val32 = 0x821020d8;
2380 err |= __put_user(val32, &sf->insns[0]);
2381
2382 /* t 0x10 */
2383 val32 = 0x91d02010;
2384 err |= __put_user(val32, &sf->insns[1]);
2385 if (err)
2386 goto sigsegv;
2387
2388 /* Flush instruction space. */
2389 //flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2390 // tb_flush(env);
2391 }
2392 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2393 return;
2394 #if 0
2395 sigill_and_return:
2396 force_sig(TARGET_SIGILL);
2397 #endif
2398 sigsegv:
2399 //fprintf(stderr, "force_sig\n");
2400 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2401 force_sig(TARGET_SIGSEGV);
2402 }
2403 static inline int
2404 restore_fpu_state(CPUSPARCState *env, qemu_siginfo_fpu_t *fpu)
2405 {
2406 int err;
2407 #if 0
2408 #ifdef CONFIG_SMP
2409 if (current->flags & PF_USEDFPU)
2410 regs->psr &= ~PSR_EF;
2411 #else
2412 if (current == last_task_used_math) {
2413 last_task_used_math = 0;
2414 regs->psr &= ~PSR_EF;
2415 }
2416 #endif
2417 current->used_math = 1;
2418 current->flags &= ~PF_USEDFPU;
2419 #endif
2420 #if 0
2421 if (verify_area (VERIFY_READ, fpu, sizeof(*fpu)))
2422 return -EFAULT;
2423 #endif
2424
2425 /* XXX: incorrect */
2426 err = copy_from_user(&env->fpr[0], fpu->si_float_regs[0],
2427 (sizeof(abi_ulong) * 32));
2428 err |= __get_user(env->fsr, &fpu->si_fsr);
2429 #if 0
2430 err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
2431 if (current->thread.fpqdepth != 0)
2432 err |= __copy_from_user(&current->thread.fpqueue[0],
2433 &fpu->si_fpqueue[0],
2434 ((sizeof(unsigned long) +
2435 (sizeof(unsigned long *)))*16));
2436 #endif
2437 return err;
2438 }
2439
2440
2441 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2442 target_siginfo_t *info,
2443 target_sigset_t *set, CPUSPARCState *env)
2444 {
2445 fprintf(stderr, "setup_rt_frame: not implemented\n");
2446 }
2447
2448 long do_sigreturn(CPUSPARCState *env)
2449 {
2450 abi_ulong sf_addr;
2451 struct target_signal_frame *sf;
2452 uint32_t up_psr, pc, npc;
2453 target_sigset_t set;
2454 sigset_t host_set;
2455 int err, i;
2456
2457 sf_addr = env->regwptr[UREG_FP];
2458 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1))
2459 goto segv_and_exit;
2460 #if 0
2461 fprintf(stderr, "sigreturn\n");
2462 fprintf(stderr, "sf: %x pc %x fp %x sp %x\n", sf, env->pc, env->regwptr[UREG_FP], env->regwptr[UREG_SP]);
2463 #endif
2464 //cpu_dump_state(env, stderr, fprintf, 0);
2465
2466 /* 1. Make sure we are not getting garbage from the user */
2467
2468 if (sf_addr & 3)
2469 goto segv_and_exit;
2470
2471 err = __get_user(pc, &sf->info.si_regs.pc);
2472 err |= __get_user(npc, &sf->info.si_regs.npc);
2473
2474 if ((pc | npc) & 3)
2475 goto segv_and_exit;
2476
2477 /* 2. Restore the state */
2478 err |= __get_user(up_psr, &sf->info.si_regs.psr);
2479
2480 /* User can only change condition codes and FPU enabling in %psr. */
2481 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2482 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2483
2484 env->pc = pc;
2485 env->npc = npc;
2486 err |= __get_user(env->y, &sf->info.si_regs.y);
2487 for (i=0; i < 8; i++) {
2488 err |= __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2489 }
2490 for (i=0; i < 8; i++) {
2491 err |= __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2492 }
2493
2494 /* FIXME: implement FPU save/restore:
2495 * __get_user(fpu_save, &sf->fpu_save);
2496 * if (fpu_save)
2497 * err |= restore_fpu_state(env, fpu_save);
2498 */
2499
2500 /* This is pretty much atomic, no amount locking would prevent
2501 * the races which exist anyways.
2502 */
2503 err |= __get_user(set.sig[0], &sf->info.si_mask);
2504 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2505 err |= (__get_user(set.sig[i], &sf->extramask[i - 1]));
2506 }
2507
2508 target_to_host_sigset_internal(&host_set, &set);
2509 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
2510
2511 if (err)
2512 goto segv_and_exit;
2513 unlock_user_struct(sf, sf_addr, 0);
2514 return env->regwptr[0];
2515
2516 segv_and_exit:
2517 unlock_user_struct(sf, sf_addr, 0);
2518 force_sig(TARGET_SIGSEGV);
2519 }
2520
2521 long do_rt_sigreturn(CPUSPARCState *env)
2522 {
2523 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2524 return -TARGET_ENOSYS;
2525 }
2526
2527 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2528 #define MC_TSTATE 0
2529 #define MC_PC 1
2530 #define MC_NPC 2
2531 #define MC_Y 3
2532 #define MC_G1 4
2533 #define MC_G2 5
2534 #define MC_G3 6
2535 #define MC_G4 7
2536 #define MC_G5 8
2537 #define MC_G6 9
2538 #define MC_G7 10
2539 #define MC_O0 11
2540 #define MC_O1 12
2541 #define MC_O2 13
2542 #define MC_O3 14
2543 #define MC_O4 15
2544 #define MC_O5 16
2545 #define MC_O6 17
2546 #define MC_O7 18
2547 #define MC_NGREG 19
2548
2549 typedef abi_ulong target_mc_greg_t;
2550 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2551
2552 struct target_mc_fq {
2553 abi_ulong *mcfq_addr;
2554 uint32_t mcfq_insn;
2555 };
2556
2557 struct target_mc_fpu {
2558 union {
2559 uint32_t sregs[32];
2560 uint64_t dregs[32];
2561 //uint128_t qregs[16];
2562 } mcfpu_fregs;
2563 abi_ulong mcfpu_fsr;
2564 abi_ulong mcfpu_fprs;
2565 abi_ulong mcfpu_gsr;
2566 struct target_mc_fq *mcfpu_fq;
2567 unsigned char mcfpu_qcnt;
2568 unsigned char mcfpu_qentsz;
2569 unsigned char mcfpu_enab;
2570 };
2571 typedef struct target_mc_fpu target_mc_fpu_t;
2572
2573 typedef struct {
2574 target_mc_gregset_t mc_gregs;
2575 target_mc_greg_t mc_fp;
2576 target_mc_greg_t mc_i7;
2577 target_mc_fpu_t mc_fpregs;
2578 } target_mcontext_t;
2579
2580 struct target_ucontext {
2581 struct target_ucontext *tuc_link;
2582 abi_ulong tuc_flags;
2583 target_sigset_t tuc_sigmask;
2584 target_mcontext_t tuc_mcontext;
2585 };
2586
2587 /* A V9 register window */
2588 struct target_reg_window {
2589 abi_ulong locals[8];
2590 abi_ulong ins[8];
2591 };
2592
2593 #define TARGET_STACK_BIAS 2047
2594
2595 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2596 void sparc64_set_context(CPUSPARCState *env)
2597 {
2598 abi_ulong ucp_addr;
2599 struct target_ucontext *ucp;
2600 target_mc_gregset_t *grp;
2601 abi_ulong pc, npc, tstate;
2602 abi_ulong fp, i7, w_addr;
2603 int err;
2604 unsigned int i;
2605
2606 ucp_addr = env->regwptr[UREG_I0];
2607 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1))
2608 goto do_sigsegv;
2609 grp = &ucp->tuc_mcontext.mc_gregs;
2610 err = __get_user(pc, &((*grp)[MC_PC]));
2611 err |= __get_user(npc, &((*grp)[MC_NPC]));
2612 if (err || ((pc | npc) & 3))
2613 goto do_sigsegv;
2614 if (env->regwptr[UREG_I1]) {
2615 target_sigset_t target_set;
2616 sigset_t set;
2617
2618 if (TARGET_NSIG_WORDS == 1) {
2619 if (__get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]))
2620 goto do_sigsegv;
2621 } else {
2622 abi_ulong *src, *dst;
2623 src = ucp->tuc_sigmask.sig;
2624 dst = target_set.sig;
2625 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2626 err |= __get_user(*dst, src);
2627 }
2628 if (err)
2629 goto do_sigsegv;
2630 }
2631 target_to_host_sigset_internal(&set, &target_set);
2632 do_sigprocmask(SIG_SETMASK, &set, NULL);
2633 }
2634 env->pc = pc;
2635 env->npc = npc;
2636 err |= __get_user(env->y, &((*grp)[MC_Y]));
2637 err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
2638 env->asi = (tstate >> 24) & 0xff;
2639 cpu_put_ccr(env, tstate >> 32);
2640 cpu_put_cwp64(env, tstate & 0x1f);
2641 err |= __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2642 err |= __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2643 err |= __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2644 err |= __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2645 err |= __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2646 err |= __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2647 err |= __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2648 err |= __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2649 err |= __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2650 err |= __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2651 err |= __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2652 err |= __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2653 err |= __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2654 err |= __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2655 err |= __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2656
2657 err |= __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2658 err |= __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2659
2660 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2661 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2662 abi_ulong) != 0)
2663 goto do_sigsegv;
2664 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2665 abi_ulong) != 0)
2666 goto do_sigsegv;
2667 /* FIXME this does not match how the kernel handles the FPU in
2668 * its sparc64_set_context implementation. In particular the FPU
2669 * is only restored if fenab is non-zero in:
2670 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2671 */
2672 err |= __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2673 {
2674 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2675 for (i = 0; i < 64; i++, src++) {
2676 if (i & 1) {
2677 err |= __get_user(env->fpr[i/2].l.lower, src);
2678 } else {
2679 err |= __get_user(env->fpr[i/2].l.upper, src);
2680 }
2681 }
2682 }
2683 err |= __get_user(env->fsr,
2684 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2685 err |= __get_user(env->gsr,
2686 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2687 if (err)
2688 goto do_sigsegv;
2689 unlock_user_struct(ucp, ucp_addr, 0);
2690 return;
2691 do_sigsegv:
2692 unlock_user_struct(ucp, ucp_addr, 0);
2693 force_sig(TARGET_SIGSEGV);
2694 }
2695
2696 void sparc64_get_context(CPUSPARCState *env)
2697 {
2698 abi_ulong ucp_addr;
2699 struct target_ucontext *ucp;
2700 target_mc_gregset_t *grp;
2701 target_mcontext_t *mcp;
2702 abi_ulong fp, i7, w_addr;
2703 int err;
2704 unsigned int i;
2705 target_sigset_t target_set;
2706 sigset_t set;
2707
2708 ucp_addr = env->regwptr[UREG_I0];
2709 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0))
2710 goto do_sigsegv;
2711
2712 mcp = &ucp->tuc_mcontext;
2713 grp = &mcp->mc_gregs;
2714
2715 /* Skip over the trap instruction, first. */
2716 env->pc = env->npc;
2717 env->npc += 4;
2718
2719 err = 0;
2720
2721 do_sigprocmask(0, NULL, &set);
2722 host_to_target_sigset_internal(&target_set, &set);
2723 if (TARGET_NSIG_WORDS == 1) {
2724 err |= __put_user(target_set.sig[0],
2725 (abi_ulong *)&ucp->tuc_sigmask);
2726 } else {
2727 abi_ulong *src, *dst;
2728 src = target_set.sig;
2729 dst = ucp->tuc_sigmask.sig;
2730 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2731 err |= __put_user(*src, dst);
2732 }
2733 if (err)
2734 goto do_sigsegv;
2735 }
2736
2737 /* XXX: tstate must be saved properly */
2738 // err |= __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2739 err |= __put_user(env->pc, &((*grp)[MC_PC]));
2740 err |= __put_user(env->npc, &((*grp)[MC_NPC]));
2741 err |= __put_user(env->y, &((*grp)[MC_Y]));
2742 err |= __put_user(env->gregs[1], &((*grp)[MC_G1]));
2743 err |= __put_user(env->gregs[2], &((*grp)[MC_G2]));
2744 err |= __put_user(env->gregs[3], &((*grp)[MC_G3]));
2745 err |= __put_user(env->gregs[4], &((*grp)[MC_G4]));
2746 err |= __put_user(env->gregs[5], &((*grp)[MC_G5]));
2747 err |= __put_user(env->gregs[6], &((*grp)[MC_G6]));
2748 err |= __put_user(env->gregs[7], &((*grp)[MC_G7]));
2749 err |= __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2750 err |= __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2751 err |= __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2752 err |= __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2753 err |= __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2754 err |= __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2755 err |= __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2756 err |= __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2757
2758 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2759 fp = i7 = 0;
2760 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2761 abi_ulong) != 0)
2762 goto do_sigsegv;
2763 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2764 abi_ulong) != 0)
2765 goto do_sigsegv;
2766 err |= __put_user(fp, &(mcp->mc_fp));
2767 err |= __put_user(i7, &(mcp->mc_i7));
2768
2769 {
2770 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2771 for (i = 0; i < 64; i++, dst++) {
2772 if (i & 1) {
2773 err |= __put_user(env->fpr[i/2].l.lower, dst);
2774 } else {
2775 err |= __put_user(env->fpr[i/2].l.upper, dst);
2776 }
2777 }
2778 }
2779 err |= __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2780 err |= __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2781 err |= __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2782
2783 if (err)
2784 goto do_sigsegv;
2785 unlock_user_struct(ucp, ucp_addr, 1);
2786 return;
2787 do_sigsegv:
2788 unlock_user_struct(ucp, ucp_addr, 1);
2789 force_sig(TARGET_SIGSEGV);
2790 }
2791 #endif
2792 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2793
2794 # if defined(TARGET_ABI_MIPSO32)
2795 struct target_sigcontext {
2796 uint32_t sc_regmask; /* Unused */
2797 uint32_t sc_status;
2798 uint64_t sc_pc;
2799 uint64_t sc_regs[32];
2800 uint64_t sc_fpregs[32];
2801 uint32_t sc_ownedfp; /* Unused */
2802 uint32_t sc_fpc_csr;
2803 uint32_t sc_fpc_eir; /* Unused */
2804 uint32_t sc_used_math;
2805 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2806 uint32_t pad0;
2807 uint64_t sc_mdhi;
2808 uint64_t sc_mdlo;
2809 target_ulong sc_hi1; /* Was sc_cause */
2810 target_ulong sc_lo1; /* Was sc_badvaddr */
2811 target_ulong sc_hi2; /* Was sc_sigset[4] */
2812 target_ulong sc_lo2;
2813 target_ulong sc_hi3;
2814 target_ulong sc_lo3;
2815 };
2816 # else /* N32 || N64 */
2817 struct target_sigcontext {
2818 uint64_t sc_regs[32];
2819 uint64_t sc_fpregs[32];
2820 uint64_t sc_mdhi;
2821 uint64_t sc_hi1;
2822 uint64_t sc_hi2;
2823 uint64_t sc_hi3;
2824 uint64_t sc_mdlo;
2825 uint64_t sc_lo1;
2826 uint64_t sc_lo2;
2827 uint64_t sc_lo3;
2828 uint64_t sc_pc;
2829 uint32_t sc_fpc_csr;
2830 uint32_t sc_used_math;
2831 uint32_t sc_dsp;
2832 uint32_t sc_reserved;
2833 };
2834 # endif /* O32 */
2835
2836 struct sigframe {
2837 uint32_t sf_ass[4]; /* argument save space for o32 */
2838 uint32_t sf_code[2]; /* signal trampoline */
2839 struct target_sigcontext sf_sc;
2840 target_sigset_t sf_mask;
2841 };
2842
2843 struct target_ucontext {
2844 target_ulong tuc_flags;
2845 target_ulong tuc_link;
2846 target_stack_t tuc_stack;
2847 target_ulong pad0;
2848 struct target_sigcontext tuc_mcontext;
2849 target_sigset_t tuc_sigmask;
2850 };
2851
2852 struct target_rt_sigframe {
2853 uint32_t rs_ass[4]; /* argument save space for o32 */
2854 uint32_t rs_code[2]; /* signal trampoline */
2855 struct target_siginfo rs_info;
2856 struct target_ucontext rs_uc;
2857 };
2858
2859 /* Install trampoline to jump back from signal handler */
2860 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2861 {
2862 int err = 0;
2863
2864 /*
2865 * Set up the return code ...
2866 *
2867 * li v0, __NR__foo_sigreturn
2868 * syscall
2869 */
2870
2871 err |= __put_user(0x24020000 + syscall, tramp + 0);
2872 err |= __put_user(0x0000000c , tramp + 1);
2873 return err;
2874 }
2875
2876 static inline int
2877 setup_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2878 {
2879 int err = 0;
2880 int i;
2881
2882 err |= __put_user(exception_resume_pc(regs), &sc->sc_pc);
2883 regs->hflags &= ~MIPS_HFLAG_BMASK;
2884
2885 __put_user(0, &sc->sc_regs[0]);
2886 for (i = 1; i < 32; ++i) {
2887 err |= __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2888 }
2889
2890 err |= __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2891 err |= __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2892
2893 /* Rather than checking for dsp existence, always copy. The storage
2894 would just be garbage otherwise. */
2895 err |= __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2896 err |= __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2897 err |= __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2898 err |= __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2899 err |= __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2900 err |= __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2901 {
2902 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2903 err |= __put_user(dsp, &sc->sc_dsp);
2904 }
2905
2906 err |= __put_user(1, &sc->sc_used_math);
2907
2908 for (i = 0; i < 32; ++i) {
2909 err |= __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2910 }
2911
2912 return err;
2913 }
2914
2915 static inline int
2916 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2917 {
2918 int err = 0;
2919 int i;
2920
2921 err |= __get_user(regs->CP0_EPC, &sc->sc_pc);
2922
2923 err |= __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2924 err |= __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2925
2926 for (i = 1; i < 32; ++i) {
2927 err |= __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2928 }
2929
2930 err |= __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2931 err |= __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2932 err |= __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2933 err |= __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2934 err |= __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2935 err |= __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2936 {
2937 uint32_t dsp;
2938 err |= __get_user(dsp, &sc->sc_dsp);
2939 cpu_wrdsp(dsp, 0x3ff, regs);
2940 }
2941
2942 for (i = 0; i < 32; ++i) {
2943 err |= __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2944 }
2945
2946 return err;
2947 }
2948
2949 /*
2950 * Determine which stack to use..
2951 */
2952 static inline abi_ulong
2953 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2954 {
2955 unsigned long sp;
2956
2957 /* Default to using normal stack */
2958 sp = regs->active_tc.gpr[29];
2959
2960 /*
2961 * FPU emulator may have its own trampoline active just
2962 * above the user stack, 16-bytes before the next lowest
2963 * 16 byte boundary. Try to avoid trashing it.
2964 */
2965 sp -= 32;
2966
2967 /* This is the X/Open sanctioned signal stack switching. */
2968 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2969 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2970 }
2971
2972 return (sp - frame_size) & ~7;
2973 }
2974
2975 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2976 {
2977 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2978 env->hflags &= ~MIPS_HFLAG_M16;
2979 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2980 env->active_tc.PC &= ~(target_ulong) 1;
2981 }
2982 }
2983
2984 # if defined(TARGET_ABI_MIPSO32)
2985 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2986 static void setup_frame(int sig, struct target_sigaction * ka,
2987 target_sigset_t *set, CPUMIPSState *regs)
2988 {
2989 struct sigframe *frame;
2990 abi_ulong frame_addr;
2991 int i;
2992
2993 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2994 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
2995 goto give_sigsegv;
2996
2997 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2998
2999 if(setup_sigcontext(regs, &frame->sf_sc))
3000 goto give_sigsegv;
3001
3002 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3003 if(__put_user(set->sig[i], &frame->sf_mask.sig[i]))
3004 goto give_sigsegv;
3005 }
3006
3007 /*
3008 * Arguments to signal handler:
3009 *
3010 * a0 = signal number
3011 * a1 = 0 (should be cause)
3012 * a2 = pointer to struct sigcontext
3013 *
3014 * $25 and PC point to the signal handler, $29 points to the
3015 * struct sigframe.
3016 */
3017 regs->active_tc.gpr[ 4] = sig;
3018 regs->active_tc.gpr[ 5] = 0;
3019 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3020 regs->active_tc.gpr[29] = frame_addr;
3021 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3022 /* The original kernel code sets CP0_EPC to the handler
3023 * since it returns to userland using eret
3024 * we cannot do this here, and we must set PC directly */
3025 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3026 mips_set_hflags_isa_mode_from_pc(regs);
3027 unlock_user_struct(frame, frame_addr, 1);
3028 return;
3029
3030 give_sigsegv:
3031 unlock_user_struct(frame, frame_addr, 1);
3032 force_sig(TARGET_SIGSEGV/*, current*/);
3033 }
3034
3035 long do_sigreturn(CPUMIPSState *regs)
3036 {
3037 struct sigframe *frame;
3038 abi_ulong frame_addr;
3039 sigset_t blocked;
3040 target_sigset_t target_set;
3041 int i;
3042
3043 #if defined(DEBUG_SIGNAL)
3044 fprintf(stderr, "do_sigreturn\n");
3045 #endif
3046 frame_addr = regs->active_tc.gpr[29];
3047 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3048 goto badframe;
3049
3050 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3051 if(__get_user(target_set.sig[i], &frame->sf_mask.sig[i]))
3052 goto badframe;
3053 }
3054
3055 target_to_host_sigset_internal(&blocked, &target_set);
3056 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
3057
3058 if (restore_sigcontext(regs, &frame->sf_sc))
3059 goto badframe;
3060
3061 #if 0
3062 /*
3063 * Don't let your children do this ...
3064 */
3065 __asm__ __volatile__(
3066 "move\t$29, %0\n\t"
3067 "j\tsyscall_exit"
3068 :/* no outputs */
3069 :"r" (&regs));
3070 /* Unreached */
3071 #endif
3072
3073 regs->active_tc.PC = regs->CP0_EPC;
3074 mips_set_hflags_isa_mode_from_pc(regs);
3075 /* I am not sure this is right, but it seems to work
3076 * maybe a problem with nested signals ? */
3077 regs->CP0_EPC = 0;
3078 return -TARGET_QEMU_ESIGRETURN;
3079
3080 badframe:
3081 force_sig(TARGET_SIGSEGV/*, current*/);
3082 return 0;
3083 }
3084 # endif /* O32 */
3085
3086 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3087 target_siginfo_t *info,
3088 target_sigset_t *set, CPUMIPSState *env)
3089 {
3090 struct target_rt_sigframe *frame;
3091 abi_ulong frame_addr;
3092 int i;
3093
3094 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3095 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3096 goto give_sigsegv;
3097
3098 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3099
3100 copy_siginfo_to_user(&frame->rs_info, info);
3101
3102 __put_user(0, &frame->rs_uc.tuc_flags);
3103 __put_user(0, &frame->rs_uc.tuc_link);
3104 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3105 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3106 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3107 &frame->rs_uc.tuc_stack.ss_flags);
3108
3109 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3110
3111 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3112 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3113 }
3114
3115 /*
3116 * Arguments to signal handler:
3117 *
3118 * a0 = signal number
3119 * a1 = pointer to siginfo_t
3120 * a2 = pointer to struct ucontext
3121 *
3122 * $25 and PC point to the signal handler, $29 points to the
3123 * struct sigframe.
3124 */
3125 env->active_tc.gpr[ 4] = sig;
3126 env->active_tc.gpr[ 5] = frame_addr
3127 + offsetof(struct target_rt_sigframe, rs_info);
3128 env->active_tc.gpr[ 6] = frame_addr
3129 + offsetof(struct target_rt_sigframe, rs_uc);
3130 env->active_tc.gpr[29] = frame_addr;
3131 env->active_tc.gpr[31] = frame_addr
3132 + offsetof(struct target_rt_sigframe, rs_code);
3133 /* The original kernel code sets CP0_EPC to the handler
3134 * since it returns to userland using eret
3135 * we cannot do this here, and we must set PC directly */
3136 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3137 mips_set_hflags_isa_mode_from_pc(env);
3138 unlock_user_struct(frame, frame_addr, 1);
3139 return;
3140
3141 give_sigsegv:
3142 unlock_user_struct(frame, frame_addr, 1);
3143 force_sig(TARGET_SIGSEGV/*, current*/);
3144 }
3145
3146 long do_rt_sigreturn(CPUMIPSState *env)
3147 {
3148 struct target_rt_sigframe *frame;
3149 abi_ulong frame_addr;
3150 sigset_t blocked;
3151
3152 #if defined(DEBUG_SIGNAL)
3153 fprintf(stderr, "do_rt_sigreturn\n");
3154 #endif
3155 frame_addr = env->active_tc.gpr[29];
3156 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3157 goto badframe;
3158
3159 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3160 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
3161
3162 if (restore_sigcontext(env, &frame->rs_uc.tuc_mcontext))
3163 goto badframe;
3164
3165 if (do_sigaltstack(frame_addr +
3166 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3167 0, get_sp_from_cpustate(env)) == -EFAULT)
3168 goto badframe;
3169
3170 env->active_tc.PC = env->CP0_EPC;
3171 mips_set_hflags_isa_mode_from_pc(env);
3172 /* I am not sure this is right, but it seems to work
3173 * maybe a problem with nested signals ? */
3174 env->CP0_EPC = 0;
3175 return -TARGET_QEMU_ESIGRETURN;
3176
3177 badframe:
3178 force_sig(TARGET_SIGSEGV/*, current*/);
3179 return 0;
3180 }
3181
3182 #elif defined(TARGET_SH4)
3183
3184 /*
3185 * code and data structures from linux kernel:
3186 * include/asm-sh/sigcontext.h
3187 * arch/sh/kernel/signal.c
3188 */
3189
3190 struct target_sigcontext {
3191 target_ulong oldmask;
3192
3193 /* CPU registers */
3194 target_ulong sc_gregs[16];
3195 target_ulong sc_pc;
3196 target_ulong sc_pr;
3197 target_ulong sc_sr;
3198 target_ulong sc_gbr;
3199 target_ulong sc_mach;
3200 target_ulong sc_macl;
3201
3202 /* FPU registers */
3203 target_ulong sc_fpregs[16];
3204 target_ulong sc_xfpregs[16];
3205 unsigned int sc_fpscr;
3206 unsigned int sc_fpul;
3207 unsigned int sc_ownedfp;
3208 };
3209
3210 struct target_sigframe
3211 {
3212 struct target_sigcontext sc;
3213 target_ulong extramask[TARGET_NSIG_WORDS-1];
3214 uint16_t retcode[3];
3215 };
3216
3217
3218 struct target_ucontext {
3219 target_ulong tuc_flags;
3220 struct target_ucontext *tuc_link;
3221 target_stack_t tuc_stack;
3222 struct target_sigcontext tuc_mcontext;
3223 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3224 };
3225
3226 struct target_rt_sigframe
3227 {