signal/sparc64_set_context: remove __get_user checks
[qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <stdarg.h>
23 #include <unistd.h>
24 #include <errno.h>
25 #include <assert.h>
26 #include <sys/ucontext.h>
27 #include <sys/resource.h>
28
29 #include "qemu.h"
30 #include "qemu-common.h"
31 #include "target_signal.h"
32
33 //#define DEBUG_SIGNAL
34
35 static struct target_sigaltstack target_sigaltstack_used = {
36 .ss_sp = 0,
37 .ss_size = 0,
38 .ss_flags = TARGET_SS_DISABLE,
39 };
40
41 static struct target_sigaction sigact_table[TARGET_NSIG];
42
43 static void host_signal_handler(int host_signum, siginfo_t *info,
44 void *puc);
45
46 static uint8_t host_to_target_signal_table[_NSIG] = {
47 [SIGHUP] = TARGET_SIGHUP,
48 [SIGINT] = TARGET_SIGINT,
49 [SIGQUIT] = TARGET_SIGQUIT,
50 [SIGILL] = TARGET_SIGILL,
51 [SIGTRAP] = TARGET_SIGTRAP,
52 [SIGABRT] = TARGET_SIGABRT,
53 /* [SIGIOT] = TARGET_SIGIOT,*/
54 [SIGBUS] = TARGET_SIGBUS,
55 [SIGFPE] = TARGET_SIGFPE,
56 [SIGKILL] = TARGET_SIGKILL,
57 [SIGUSR1] = TARGET_SIGUSR1,
58 [SIGSEGV] = TARGET_SIGSEGV,
59 [SIGUSR2] = TARGET_SIGUSR2,
60 [SIGPIPE] = TARGET_SIGPIPE,
61 [SIGALRM] = TARGET_SIGALRM,
62 [SIGTERM] = TARGET_SIGTERM,
63 #ifdef SIGSTKFLT
64 [SIGSTKFLT] = TARGET_SIGSTKFLT,
65 #endif
66 [SIGCHLD] = TARGET_SIGCHLD,
67 [SIGCONT] = TARGET_SIGCONT,
68 [SIGSTOP] = TARGET_SIGSTOP,
69 [SIGTSTP] = TARGET_SIGTSTP,
70 [SIGTTIN] = TARGET_SIGTTIN,
71 [SIGTTOU] = TARGET_SIGTTOU,
72 [SIGURG] = TARGET_SIGURG,
73 [SIGXCPU] = TARGET_SIGXCPU,
74 [SIGXFSZ] = TARGET_SIGXFSZ,
75 [SIGVTALRM] = TARGET_SIGVTALRM,
76 [SIGPROF] = TARGET_SIGPROF,
77 [SIGWINCH] = TARGET_SIGWINCH,
78 [SIGIO] = TARGET_SIGIO,
79 [SIGPWR] = TARGET_SIGPWR,
80 [SIGSYS] = TARGET_SIGSYS,
81 /* next signals stay the same */
82 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
83 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
84 To fix this properly we need to do manual signal delivery multiplexed
85 over a single host signal. */
86 [__SIGRTMIN] = __SIGRTMAX,
87 [__SIGRTMAX] = __SIGRTMIN,
88 };
89 static uint8_t target_to_host_signal_table[_NSIG];
90
91 static inline int on_sig_stack(unsigned long sp)
92 {
93 return (sp - target_sigaltstack_used.ss_sp
94 < target_sigaltstack_used.ss_size);
95 }
96
97 static inline int sas_ss_flags(unsigned long sp)
98 {
99 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
100 : on_sig_stack(sp) ? SS_ONSTACK : 0);
101 }
102
103 int host_to_target_signal(int sig)
104 {
105 if (sig < 0 || sig >= _NSIG)
106 return sig;
107 return host_to_target_signal_table[sig];
108 }
109
110 int target_to_host_signal(int sig)
111 {
112 if (sig < 0 || sig >= _NSIG)
113 return sig;
114 return target_to_host_signal_table[sig];
115 }
116
117 static inline void target_sigemptyset(target_sigset_t *set)
118 {
119 memset(set, 0, sizeof(*set));
120 }
121
122 static inline void target_sigaddset(target_sigset_t *set, int signum)
123 {
124 signum--;
125 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
126 set->sig[signum / TARGET_NSIG_BPW] |= mask;
127 }
128
129 static inline int target_sigismember(const target_sigset_t *set, int signum)
130 {
131 signum--;
132 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
133 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
134 }
135
136 static void host_to_target_sigset_internal(target_sigset_t *d,
137 const sigset_t *s)
138 {
139 int i;
140 target_sigemptyset(d);
141 for (i = 1; i <= TARGET_NSIG; i++) {
142 if (sigismember(s, i)) {
143 target_sigaddset(d, host_to_target_signal(i));
144 }
145 }
146 }
147
148 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
149 {
150 target_sigset_t d1;
151 int i;
152
153 host_to_target_sigset_internal(&d1, s);
154 for(i = 0;i < TARGET_NSIG_WORDS; i++)
155 d->sig[i] = tswapal(d1.sig[i]);
156 }
157
158 static void target_to_host_sigset_internal(sigset_t *d,
159 const target_sigset_t *s)
160 {
161 int i;
162 sigemptyset(d);
163 for (i = 1; i <= TARGET_NSIG; i++) {
164 if (target_sigismember(s, i)) {
165 sigaddset(d, target_to_host_signal(i));
166 }
167 }
168 }
169
170 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
171 {
172 target_sigset_t s1;
173 int i;
174
175 for(i = 0;i < TARGET_NSIG_WORDS; i++)
176 s1.sig[i] = tswapal(s->sig[i]);
177 target_to_host_sigset_internal(d, &s1);
178 }
179
180 void host_to_target_old_sigset(abi_ulong *old_sigset,
181 const sigset_t *sigset)
182 {
183 target_sigset_t d;
184 host_to_target_sigset(&d, sigset);
185 *old_sigset = d.sig[0];
186 }
187
188 void target_to_host_old_sigset(sigset_t *sigset,
189 const abi_ulong *old_sigset)
190 {
191 target_sigset_t d;
192 int i;
193
194 d.sig[0] = *old_sigset;
195 for(i = 1;i < TARGET_NSIG_WORDS; i++)
196 d.sig[i] = 0;
197 target_to_host_sigset(sigset, &d);
198 }
199
200 /* Wrapper for sigprocmask function
201 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
202 * are host signal set, not guest ones. This wraps the sigprocmask host calls
203 * that should be protected (calls originated from guest)
204 */
205 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
206 {
207 int ret;
208 sigset_t val;
209 sigset_t *temp = NULL;
210 CPUState *cpu = thread_cpu;
211 TaskState *ts = (TaskState *)cpu->opaque;
212 bool segv_was_blocked = ts->sigsegv_blocked;
213
214 if (set) {
215 bool has_sigsegv = sigismember(set, SIGSEGV);
216 val = *set;
217 temp = &val;
218
219 sigdelset(temp, SIGSEGV);
220
221 switch (how) {
222 case SIG_BLOCK:
223 if (has_sigsegv) {
224 ts->sigsegv_blocked = true;
225 }
226 break;
227 case SIG_UNBLOCK:
228 if (has_sigsegv) {
229 ts->sigsegv_blocked = false;
230 }
231 break;
232 case SIG_SETMASK:
233 ts->sigsegv_blocked = has_sigsegv;
234 break;
235 default:
236 g_assert_not_reached();
237 }
238 }
239
240 ret = sigprocmask(how, temp, oldset);
241
242 if (oldset && segv_was_blocked) {
243 sigaddset(oldset, SIGSEGV);
244 }
245
246 return ret;
247 }
248
249 /* siginfo conversion */
250
251 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
252 const siginfo_t *info)
253 {
254 int sig = host_to_target_signal(info->si_signo);
255 tinfo->si_signo = sig;
256 tinfo->si_errno = 0;
257 tinfo->si_code = info->si_code;
258
259 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
260 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
261 /* Should never come here, but who knows. The information for
262 the target is irrelevant. */
263 tinfo->_sifields._sigfault._addr = 0;
264 } else if (sig == TARGET_SIGIO) {
265 tinfo->_sifields._sigpoll._band = info->si_band;
266 tinfo->_sifields._sigpoll._fd = info->si_fd;
267 } else if (sig == TARGET_SIGCHLD) {
268 tinfo->_sifields._sigchld._pid = info->si_pid;
269 tinfo->_sifields._sigchld._uid = info->si_uid;
270 tinfo->_sifields._sigchld._status
271 = host_to_target_waitstatus(info->si_status);
272 tinfo->_sifields._sigchld._utime = info->si_utime;
273 tinfo->_sifields._sigchld._stime = info->si_stime;
274 } else if (sig >= TARGET_SIGRTMIN) {
275 tinfo->_sifields._rt._pid = info->si_pid;
276 tinfo->_sifields._rt._uid = info->si_uid;
277 /* XXX: potential problem if 64 bit */
278 tinfo->_sifields._rt._sigval.sival_ptr
279 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
280 }
281 }
282
283 static void tswap_siginfo(target_siginfo_t *tinfo,
284 const target_siginfo_t *info)
285 {
286 int sig = info->si_signo;
287 tinfo->si_signo = tswap32(sig);
288 tinfo->si_errno = tswap32(info->si_errno);
289 tinfo->si_code = tswap32(info->si_code);
290
291 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
292 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
293 tinfo->_sifields._sigfault._addr
294 = tswapal(info->_sifields._sigfault._addr);
295 } else if (sig == TARGET_SIGIO) {
296 tinfo->_sifields._sigpoll._band
297 = tswap32(info->_sifields._sigpoll._band);
298 tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd);
299 } else if (sig == TARGET_SIGCHLD) {
300 tinfo->_sifields._sigchld._pid
301 = tswap32(info->_sifields._sigchld._pid);
302 tinfo->_sifields._sigchld._uid
303 = tswap32(info->_sifields._sigchld._uid);
304 tinfo->_sifields._sigchld._status
305 = tswap32(info->_sifields._sigchld._status);
306 tinfo->_sifields._sigchld._utime
307 = tswapal(info->_sifields._sigchld._utime);
308 tinfo->_sifields._sigchld._stime
309 = tswapal(info->_sifields._sigchld._stime);
310 } else if (sig >= TARGET_SIGRTMIN) {
311 tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid);
312 tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid);
313 tinfo->_sifields._rt._sigval.sival_ptr
314 = tswapal(info->_sifields._rt._sigval.sival_ptr);
315 }
316 }
317
318
319 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
320 {
321 host_to_target_siginfo_noswap(tinfo, info);
322 tswap_siginfo(tinfo, tinfo);
323 }
324
325 /* XXX: we support only POSIX RT signals are used. */
326 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
327 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
328 {
329 info->si_signo = tswap32(tinfo->si_signo);
330 info->si_errno = tswap32(tinfo->si_errno);
331 info->si_code = tswap32(tinfo->si_code);
332 info->si_pid = tswap32(tinfo->_sifields._rt._pid);
333 info->si_uid = tswap32(tinfo->_sifields._rt._uid);
334 info->si_value.sival_ptr =
335 (void *)(long)tswapal(tinfo->_sifields._rt._sigval.sival_ptr);
336 }
337
338 static int fatal_signal (int sig)
339 {
340 switch (sig) {
341 case TARGET_SIGCHLD:
342 case TARGET_SIGURG:
343 case TARGET_SIGWINCH:
344 /* Ignored by default. */
345 return 0;
346 case TARGET_SIGCONT:
347 case TARGET_SIGSTOP:
348 case TARGET_SIGTSTP:
349 case TARGET_SIGTTIN:
350 case TARGET_SIGTTOU:
351 /* Job control signals. */
352 return 0;
353 default:
354 return 1;
355 }
356 }
357
358 /* returns 1 if given signal should dump core if not handled */
359 static int core_dump_signal(int sig)
360 {
361 switch (sig) {
362 case TARGET_SIGABRT:
363 case TARGET_SIGFPE:
364 case TARGET_SIGILL:
365 case TARGET_SIGQUIT:
366 case TARGET_SIGSEGV:
367 case TARGET_SIGTRAP:
368 case TARGET_SIGBUS:
369 return (1);
370 default:
371 return (0);
372 }
373 }
374
375 void signal_init(void)
376 {
377 struct sigaction act;
378 struct sigaction oact;
379 int i, j;
380 int host_sig;
381
382 /* generate signal conversion tables */
383 for(i = 1; i < _NSIG; i++) {
384 if (host_to_target_signal_table[i] == 0)
385 host_to_target_signal_table[i] = i;
386 }
387 for(i = 1; i < _NSIG; i++) {
388 j = host_to_target_signal_table[i];
389 target_to_host_signal_table[j] = i;
390 }
391
392 /* set all host signal handlers. ALL signals are blocked during
393 the handlers to serialize them. */
394 memset(sigact_table, 0, sizeof(sigact_table));
395
396 sigfillset(&act.sa_mask);
397 act.sa_flags = SA_SIGINFO;
398 act.sa_sigaction = host_signal_handler;
399 for(i = 1; i <= TARGET_NSIG; i++) {
400 host_sig = target_to_host_signal(i);
401 sigaction(host_sig, NULL, &oact);
402 if (oact.sa_sigaction == (void *)SIG_IGN) {
403 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
404 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
405 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
406 }
407 /* If there's already a handler installed then something has
408 gone horribly wrong, so don't even try to handle that case. */
409 /* Install some handlers for our own use. We need at least
410 SIGSEGV and SIGBUS, to detect exceptions. We can not just
411 trap all signals because it affects syscall interrupt
412 behavior. But do trap all default-fatal signals. */
413 if (fatal_signal (i))
414 sigaction(host_sig, &act, NULL);
415 }
416 }
417
418 /* signal queue handling */
419
420 static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
421 {
422 CPUState *cpu = ENV_GET_CPU(env);
423 TaskState *ts = cpu->opaque;
424 struct sigqueue *q = ts->first_free;
425 if (!q)
426 return NULL;
427 ts->first_free = q->next;
428 return q;
429 }
430
431 static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
432 {
433 CPUState *cpu = ENV_GET_CPU(env);
434 TaskState *ts = cpu->opaque;
435
436 q->next = ts->first_free;
437 ts->first_free = q;
438 }
439
440 /* abort execution with signal */
441 static void QEMU_NORETURN force_sig(int target_sig)
442 {
443 CPUState *cpu = thread_cpu;
444 CPUArchState *env = cpu->env_ptr;
445 TaskState *ts = (TaskState *)cpu->opaque;
446 int host_sig, core_dumped = 0;
447 struct sigaction act;
448 host_sig = target_to_host_signal(target_sig);
449 gdb_signalled(env, target_sig);
450
451 /* dump core if supported by target binary format */
452 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
453 stop_all_tasks();
454 core_dumped =
455 ((*ts->bprm->core_dump)(target_sig, env) == 0);
456 }
457 if (core_dumped) {
458 /* we already dumped the core of target process, we don't want
459 * a coredump of qemu itself */
460 struct rlimit nodump;
461 getrlimit(RLIMIT_CORE, &nodump);
462 nodump.rlim_cur=0;
463 setrlimit(RLIMIT_CORE, &nodump);
464 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
465 target_sig, strsignal(host_sig), "core dumped" );
466 }
467
468 /* The proper exit code for dying from an uncaught signal is
469 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
470 * a negative value. To get the proper exit code we need to
471 * actually die from an uncaught signal. Here the default signal
472 * handler is installed, we send ourself a signal and we wait for
473 * it to arrive. */
474 sigfillset(&act.sa_mask);
475 act.sa_handler = SIG_DFL;
476 act.sa_flags = 0;
477 sigaction(host_sig, &act, NULL);
478
479 /* For some reason raise(host_sig) doesn't send the signal when
480 * statically linked on x86-64. */
481 kill(getpid(), host_sig);
482
483 /* Make sure the signal isn't masked (just reuse the mask inside
484 of act) */
485 sigdelset(&act.sa_mask, host_sig);
486 sigsuspend(&act.sa_mask);
487
488 /* unreachable */
489 abort();
490 }
491
492 /* queue a signal so that it will be send to the virtual CPU as soon
493 as possible */
494 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
495 {
496 CPUState *cpu = ENV_GET_CPU(env);
497 TaskState *ts = cpu->opaque;
498 struct emulated_sigtable *k;
499 struct sigqueue *q, **pq;
500 abi_ulong handler;
501 int queue;
502
503 #if defined(DEBUG_SIGNAL)
504 fprintf(stderr, "queue_signal: sig=%d\n",
505 sig);
506 #endif
507 k = &ts->sigtab[sig - 1];
508 queue = gdb_queuesig ();
509 handler = sigact_table[sig - 1]._sa_handler;
510
511 if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
512 /* Guest has blocked SIGSEGV but we got one anyway. Assume this
513 * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
514 * because it got a real MMU fault). A blocked SIGSEGV in that
515 * situation is treated as if using the default handler. This is
516 * not correct if some other process has randomly sent us a SIGSEGV
517 * via kill(), but that is not easy to distinguish at this point,
518 * so we assume it doesn't happen.
519 */
520 handler = TARGET_SIG_DFL;
521 }
522
523 if (!queue && handler == TARGET_SIG_DFL) {
524 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
525 kill(getpid(),SIGSTOP);
526 return 0;
527 } else
528 /* default handler : ignore some signal. The other are fatal */
529 if (sig != TARGET_SIGCHLD &&
530 sig != TARGET_SIGURG &&
531 sig != TARGET_SIGWINCH &&
532 sig != TARGET_SIGCONT) {
533 force_sig(sig);
534 } else {
535 return 0; /* indicate ignored */
536 }
537 } else if (!queue && handler == TARGET_SIG_IGN) {
538 /* ignore signal */
539 return 0;
540 } else if (!queue && handler == TARGET_SIG_ERR) {
541 force_sig(sig);
542 } else {
543 pq = &k->first;
544 if (sig < TARGET_SIGRTMIN) {
545 /* if non real time signal, we queue exactly one signal */
546 if (!k->pending)
547 q = &k->info;
548 else
549 return 0;
550 } else {
551 if (!k->pending) {
552 /* first signal */
553 q = &k->info;
554 } else {
555 q = alloc_sigqueue(env);
556 if (!q)
557 return -EAGAIN;
558 while (*pq != NULL)
559 pq = &(*pq)->next;
560 }
561 }
562 *pq = q;
563 q->info = *info;
564 q->next = NULL;
565 k->pending = 1;
566 /* signal that a new signal is pending */
567 ts->signal_pending = 1;
568 return 1; /* indicates that the signal was queued */
569 }
570 }
571
572 static void host_signal_handler(int host_signum, siginfo_t *info,
573 void *puc)
574 {
575 CPUArchState *env = thread_cpu->env_ptr;
576 int sig;
577 target_siginfo_t tinfo;
578
579 /* the CPU emulator uses some host signals to detect exceptions,
580 we forward to it some signals */
581 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
582 && info->si_code > 0) {
583 if (cpu_signal_handler(host_signum, info, puc))
584 return;
585 }
586
587 /* get target signal number */
588 sig = host_to_target_signal(host_signum);
589 if (sig < 1 || sig > TARGET_NSIG)
590 return;
591 #if defined(DEBUG_SIGNAL)
592 fprintf(stderr, "qemu: got signal %d\n", sig);
593 #endif
594 host_to_target_siginfo_noswap(&tinfo, info);
595 if (queue_signal(env, sig, &tinfo) == 1) {
596 /* interrupt the virtual CPU as soon as possible */
597 cpu_exit(thread_cpu);
598 }
599 }
600
601 /* do_sigaltstack() returns target values and errnos. */
602 /* compare linux/kernel/signal.c:do_sigaltstack() */
603 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
604 {
605 int ret;
606 struct target_sigaltstack oss;
607
608 /* XXX: test errors */
609 if(uoss_addr)
610 {
611 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
612 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
613 __put_user(sas_ss_flags(sp), &oss.ss_flags);
614 }
615
616 if(uss_addr)
617 {
618 struct target_sigaltstack *uss;
619 struct target_sigaltstack ss;
620
621 ret = -TARGET_EFAULT;
622 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
623 goto out;
624 }
625 __get_user(ss.ss_sp, &uss->ss_sp);
626 __get_user(ss.ss_size, &uss->ss_size);
627 __get_user(ss.ss_flags, &uss->ss_flags);
628 unlock_user_struct(uss, uss_addr, 0);
629
630 ret = -TARGET_EPERM;
631 if (on_sig_stack(sp))
632 goto out;
633
634 ret = -TARGET_EINVAL;
635 if (ss.ss_flags != TARGET_SS_DISABLE
636 && ss.ss_flags != TARGET_SS_ONSTACK
637 && ss.ss_flags != 0)
638 goto out;
639
640 if (ss.ss_flags == TARGET_SS_DISABLE) {
641 ss.ss_size = 0;
642 ss.ss_sp = 0;
643 } else {
644 ret = -TARGET_ENOMEM;
645 if (ss.ss_size < MINSIGSTKSZ)
646 goto out;
647 }
648
649 target_sigaltstack_used.ss_sp = ss.ss_sp;
650 target_sigaltstack_used.ss_size = ss.ss_size;
651 }
652
653 if (uoss_addr) {
654 ret = -TARGET_EFAULT;
655 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
656 goto out;
657 }
658
659 ret = 0;
660 out:
661 return ret;
662 }
663
664 /* do_sigaction() return host values and errnos */
665 int do_sigaction(int sig, const struct target_sigaction *act,
666 struct target_sigaction *oact)
667 {
668 struct target_sigaction *k;
669 struct sigaction act1;
670 int host_sig;
671 int ret = 0;
672
673 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)
674 return -EINVAL;
675 k = &sigact_table[sig - 1];
676 #if defined(DEBUG_SIGNAL)
677 fprintf(stderr, "sigaction sig=%d act=0x%p, oact=0x%p\n",
678 sig, act, oact);
679 #endif
680 if (oact) {
681 __put_user(k->_sa_handler, &oact->_sa_handler);
682 __put_user(k->sa_flags, &oact->sa_flags);
683 #if !defined(TARGET_MIPS)
684 __put_user(k->sa_restorer, &oact->sa_restorer);
685 #endif
686 /* Not swapped. */
687 oact->sa_mask = k->sa_mask;
688 }
689 if (act) {
690 /* FIXME: This is not threadsafe. */
691 __get_user(k->_sa_handler, &act->_sa_handler);
692 __get_user(k->sa_flags, &act->sa_flags);
693 #if !defined(TARGET_MIPS)
694 __get_user(k->sa_restorer, &act->sa_restorer);
695 #endif
696 /* To be swapped in target_to_host_sigset. */
697 k->sa_mask = act->sa_mask;
698
699 /* we update the host linux signal state */
700 host_sig = target_to_host_signal(sig);
701 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
702 sigfillset(&act1.sa_mask);
703 act1.sa_flags = SA_SIGINFO;
704 if (k->sa_flags & TARGET_SA_RESTART)
705 act1.sa_flags |= SA_RESTART;
706 /* NOTE: it is important to update the host kernel signal
707 ignore state to avoid getting unexpected interrupted
708 syscalls */
709 if (k->_sa_handler == TARGET_SIG_IGN) {
710 act1.sa_sigaction = (void *)SIG_IGN;
711 } else if (k->_sa_handler == TARGET_SIG_DFL) {
712 if (fatal_signal (sig))
713 act1.sa_sigaction = host_signal_handler;
714 else
715 act1.sa_sigaction = (void *)SIG_DFL;
716 } else {
717 act1.sa_sigaction = host_signal_handler;
718 }
719 ret = sigaction(host_sig, &act1, NULL);
720 }
721 }
722 return ret;
723 }
724
725 static inline void copy_siginfo_to_user(target_siginfo_t *tinfo,
726 const target_siginfo_t *info)
727 {
728 tswap_siginfo(tinfo, info);
729 }
730
731 static inline int current_exec_domain_sig(int sig)
732 {
733 return /* current->exec_domain && current->exec_domain->signal_invmap
734 && sig < 32 ? current->exec_domain->signal_invmap[sig] : */ sig;
735 }
736
737 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
738
739 /* from the Linux kernel */
740
741 struct target_fpreg {
742 uint16_t significand[4];
743 uint16_t exponent;
744 };
745
746 struct target_fpxreg {
747 uint16_t significand[4];
748 uint16_t exponent;
749 uint16_t padding[3];
750 };
751
752 struct target_xmmreg {
753 abi_ulong element[4];
754 };
755
756 struct target_fpstate {
757 /* Regular FPU environment */
758 abi_ulong cw;
759 abi_ulong sw;
760 abi_ulong tag;
761 abi_ulong ipoff;
762 abi_ulong cssel;
763 abi_ulong dataoff;
764 abi_ulong datasel;
765 struct target_fpreg _st[8];
766 uint16_t status;
767 uint16_t magic; /* 0xffff = regular FPU data only */
768
769 /* FXSR FPU environment */
770 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
771 abi_ulong mxcsr;
772 abi_ulong reserved;
773 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
774 struct target_xmmreg _xmm[8];
775 abi_ulong padding[56];
776 };
777
778 #define X86_FXSR_MAGIC 0x0000
779
780 struct target_sigcontext {
781 uint16_t gs, __gsh;
782 uint16_t fs, __fsh;
783 uint16_t es, __esh;
784 uint16_t ds, __dsh;
785 abi_ulong edi;
786 abi_ulong esi;
787 abi_ulong ebp;
788 abi_ulong esp;
789 abi_ulong ebx;
790 abi_ulong edx;
791 abi_ulong ecx;
792 abi_ulong eax;
793 abi_ulong trapno;
794 abi_ulong err;
795 abi_ulong eip;
796 uint16_t cs, __csh;
797 abi_ulong eflags;
798 abi_ulong esp_at_signal;
799 uint16_t ss, __ssh;
800 abi_ulong fpstate; /* pointer */
801 abi_ulong oldmask;
802 abi_ulong cr2;
803 };
804
805 struct target_ucontext {
806 abi_ulong tuc_flags;
807 abi_ulong tuc_link;
808 target_stack_t tuc_stack;
809 struct target_sigcontext tuc_mcontext;
810 target_sigset_t tuc_sigmask; /* mask last for extensibility */
811 };
812
813 struct sigframe
814 {
815 abi_ulong pretcode;
816 int sig;
817 struct target_sigcontext sc;
818 struct target_fpstate fpstate;
819 abi_ulong extramask[TARGET_NSIG_WORDS-1];
820 char retcode[8];
821 };
822
823 struct rt_sigframe
824 {
825 abi_ulong pretcode;
826 int sig;
827 abi_ulong pinfo;
828 abi_ulong puc;
829 struct target_siginfo info;
830 struct target_ucontext uc;
831 struct target_fpstate fpstate;
832 char retcode[8];
833 };
834
835 /*
836 * Set up a signal frame.
837 */
838
839 /* XXX: save x87 state */
840 static void setup_sigcontext(struct target_sigcontext *sc,
841 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
842 abi_ulong fpstate_addr)
843 {
844 CPUState *cs = CPU(x86_env_get_cpu(env));
845 uint16_t magic;
846
847 /* already locked in setup_frame() */
848 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
849 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
850 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
851 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
852 __put_user(env->regs[R_EDI], &sc->edi);
853 __put_user(env->regs[R_ESI], &sc->esi);
854 __put_user(env->regs[R_EBP], &sc->ebp);
855 __put_user(env->regs[R_ESP], &sc->esp);
856 __put_user(env->regs[R_EBX], &sc->ebx);
857 __put_user(env->regs[R_EDX], &sc->edx);
858 __put_user(env->regs[R_ECX], &sc->ecx);
859 __put_user(env->regs[R_EAX], &sc->eax);
860 __put_user(cs->exception_index, &sc->trapno);
861 __put_user(env->error_code, &sc->err);
862 __put_user(env->eip, &sc->eip);
863 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
864 __put_user(env->eflags, &sc->eflags);
865 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
866 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
867
868 cpu_x86_fsave(env, fpstate_addr, 1);
869 fpstate->status = fpstate->sw;
870 magic = 0xffff;
871 __put_user(magic, &fpstate->magic);
872 __put_user(fpstate_addr, &sc->fpstate);
873
874 /* non-iBCS2 extensions.. */
875 __put_user(mask, &sc->oldmask);
876 __put_user(env->cr[2], &sc->cr2);
877 }
878
879 /*
880 * Determine which stack to use..
881 */
882
883 static inline abi_ulong
884 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
885 {
886 unsigned long esp;
887
888 /* Default to using normal stack */
889 esp = env->regs[R_ESP];
890 /* This is the X/Open sanctioned signal stack switching. */
891 if (ka->sa_flags & TARGET_SA_ONSTACK) {
892 if (sas_ss_flags(esp) == 0)
893 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
894 }
895
896 /* This is the legacy signal stack switching. */
897 else
898 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
899 !(ka->sa_flags & TARGET_SA_RESTORER) &&
900 ka->sa_restorer) {
901 esp = (unsigned long) ka->sa_restorer;
902 }
903 return (esp - frame_size) & -8ul;
904 }
905
906 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
907 static void setup_frame(int sig, struct target_sigaction *ka,
908 target_sigset_t *set, CPUX86State *env)
909 {
910 abi_ulong frame_addr;
911 struct sigframe *frame;
912 int i;
913
914 frame_addr = get_sigframe(ka, env, sizeof(*frame));
915
916 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
917 goto give_sigsegv;
918
919 __put_user(current_exec_domain_sig(sig),
920 &frame->sig);
921
922 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
923 frame_addr + offsetof(struct sigframe, fpstate));
924
925 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
926 __put_user(set->sig[i], &frame->extramask[i - 1]);
927 }
928
929 /* Set up to return from userspace. If provided, use a stub
930 already in userspace. */
931 if (ka->sa_flags & TARGET_SA_RESTORER) {
932 __put_user(ka->sa_restorer, &frame->pretcode);
933 } else {
934 uint16_t val16;
935 abi_ulong retcode_addr;
936 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
937 __put_user(retcode_addr, &frame->pretcode);
938 /* This is popl %eax ; movl $,%eax ; int $0x80 */
939 val16 = 0xb858;
940 __put_user(val16, (uint16_t *)(frame->retcode+0));
941 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
942 val16 = 0x80cd;
943 __put_user(val16, (uint16_t *)(frame->retcode+6));
944 }
945
946
947 /* Set up registers for signal handler */
948 env->regs[R_ESP] = frame_addr;
949 env->eip = ka->_sa_handler;
950
951 cpu_x86_load_seg(env, R_DS, __USER_DS);
952 cpu_x86_load_seg(env, R_ES, __USER_DS);
953 cpu_x86_load_seg(env, R_SS, __USER_DS);
954 cpu_x86_load_seg(env, R_CS, __USER_CS);
955 env->eflags &= ~TF_MASK;
956
957 unlock_user_struct(frame, frame_addr, 1);
958
959 return;
960
961 give_sigsegv:
962 if (sig == TARGET_SIGSEGV)
963 ka->_sa_handler = TARGET_SIG_DFL;
964 force_sig(TARGET_SIGSEGV /* , current */);
965 }
966
967 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
968 static void setup_rt_frame(int sig, struct target_sigaction *ka,
969 target_siginfo_t *info,
970 target_sigset_t *set, CPUX86State *env)
971 {
972 abi_ulong frame_addr, addr;
973 struct rt_sigframe *frame;
974 int i;
975
976 frame_addr = get_sigframe(ka, env, sizeof(*frame));
977
978 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
979 goto give_sigsegv;
980
981 __put_user(current_exec_domain_sig(sig), &frame->sig);
982 addr = frame_addr + offsetof(struct rt_sigframe, info);
983 __put_user(addr, &frame->pinfo);
984 addr = frame_addr + offsetof(struct rt_sigframe, uc);
985 __put_user(addr, &frame->puc);
986 copy_siginfo_to_user(&frame->info, info);
987
988 /* Create the ucontext. */
989 __put_user(0, &frame->uc.tuc_flags);
990 __put_user(0, &frame->uc.tuc_link);
991 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
992 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
993 &frame->uc.tuc_stack.ss_flags);
994 __put_user(target_sigaltstack_used.ss_size,
995 &frame->uc.tuc_stack.ss_size);
996 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
997 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
998
999 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1000 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1001 }
1002
1003 /* Set up to return from userspace. If provided, use a stub
1004 already in userspace. */
1005 if (ka->sa_flags & TARGET_SA_RESTORER) {
1006 __put_user(ka->sa_restorer, &frame->pretcode);
1007 } else {
1008 uint16_t val16;
1009 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1010 __put_user(addr, &frame->pretcode);
1011 /* This is movl $,%eax ; int $0x80 */
1012 __put_user(0xb8, (char *)(frame->retcode+0));
1013 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1014 val16 = 0x80cd;
1015 __put_user(val16, (uint16_t *)(frame->retcode+5));
1016 }
1017
1018 /* Set up registers for signal handler */
1019 env->regs[R_ESP] = frame_addr;
1020 env->eip = ka->_sa_handler;
1021
1022 cpu_x86_load_seg(env, R_DS, __USER_DS);
1023 cpu_x86_load_seg(env, R_ES, __USER_DS);
1024 cpu_x86_load_seg(env, R_SS, __USER_DS);
1025 cpu_x86_load_seg(env, R_CS, __USER_CS);
1026 env->eflags &= ~TF_MASK;
1027
1028 unlock_user_struct(frame, frame_addr, 1);
1029
1030 return;
1031
1032 give_sigsegv:
1033 if (sig == TARGET_SIGSEGV)
1034 ka->_sa_handler = TARGET_SIG_DFL;
1035 force_sig(TARGET_SIGSEGV /* , current */);
1036 }
1037
1038 static int
1039 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc, int *peax)
1040 {
1041 unsigned int err = 0;
1042 abi_ulong fpstate_addr;
1043 unsigned int tmpflags;
1044
1045 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1046 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1047 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1048 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1049
1050 env->regs[R_EDI] = tswapl(sc->edi);
1051 env->regs[R_ESI] = tswapl(sc->esi);
1052 env->regs[R_EBP] = tswapl(sc->ebp);
1053 env->regs[R_ESP] = tswapl(sc->esp);
1054 env->regs[R_EBX] = tswapl(sc->ebx);
1055 env->regs[R_EDX] = tswapl(sc->edx);
1056 env->regs[R_ECX] = tswapl(sc->ecx);
1057 env->eip = tswapl(sc->eip);
1058
1059 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1060 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1061
1062 tmpflags = tswapl(sc->eflags);
1063 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1064 // regs->orig_eax = -1; /* disable syscall checks */
1065
1066 fpstate_addr = tswapl(sc->fpstate);
1067 if (fpstate_addr != 0) {
1068 if (!access_ok(VERIFY_READ, fpstate_addr,
1069 sizeof(struct target_fpstate)))
1070 goto badframe;
1071 cpu_x86_frstor(env, fpstate_addr, 1);
1072 }
1073
1074 *peax = tswapl(sc->eax);
1075 return err;
1076 badframe:
1077 return 1;
1078 }
1079
1080 long do_sigreturn(CPUX86State *env)
1081 {
1082 struct sigframe *frame;
1083 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1084 target_sigset_t target_set;
1085 sigset_t set;
1086 int eax, i;
1087
1088 #if defined(DEBUG_SIGNAL)
1089 fprintf(stderr, "do_sigreturn\n");
1090 #endif
1091 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1092 goto badframe;
1093 /* set blocked signals */
1094 __get_user(target_set.sig[0], &frame->sc.oldmask);
1095 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1096 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1097 }
1098
1099 target_to_host_sigset_internal(&set, &target_set);
1100 do_sigprocmask(SIG_SETMASK, &set, NULL);
1101
1102 /* restore registers */
1103 if (restore_sigcontext(env, &frame->sc, &eax))
1104 goto badframe;
1105 unlock_user_struct(frame, frame_addr, 0);
1106 return eax;
1107
1108 badframe:
1109 unlock_user_struct(frame, frame_addr, 0);
1110 force_sig(TARGET_SIGSEGV);
1111 return 0;
1112 }
1113
1114 long do_rt_sigreturn(CPUX86State *env)
1115 {
1116 abi_ulong frame_addr;
1117 struct rt_sigframe *frame;
1118 sigset_t set;
1119 int eax;
1120
1121 frame_addr = env->regs[R_ESP] - 4;
1122 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1123 goto badframe;
1124 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1125 do_sigprocmask(SIG_SETMASK, &set, NULL);
1126
1127 if (restore_sigcontext(env, &frame->uc.tuc_mcontext, &eax))
1128 goto badframe;
1129
1130 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1131 get_sp_from_cpustate(env)) == -EFAULT)
1132 goto badframe;
1133
1134 unlock_user_struct(frame, frame_addr, 0);
1135 return eax;
1136
1137 badframe:
1138 unlock_user_struct(frame, frame_addr, 0);
1139 force_sig(TARGET_SIGSEGV);
1140 return 0;
1141 }
1142
1143 #elif defined(TARGET_AARCH64)
1144
1145 struct target_sigcontext {
1146 uint64_t fault_address;
1147 /* AArch64 registers */
1148 uint64_t regs[31];
1149 uint64_t sp;
1150 uint64_t pc;
1151 uint64_t pstate;
1152 /* 4K reserved for FP/SIMD state and future expansion */
1153 char __reserved[4096] __attribute__((__aligned__(16)));
1154 };
1155
1156 struct target_ucontext {
1157 abi_ulong tuc_flags;
1158 abi_ulong tuc_link;
1159 target_stack_t tuc_stack;
1160 target_sigset_t tuc_sigmask;
1161 /* glibc uses a 1024-bit sigset_t */
1162 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1163 /* last for future expansion */
1164 struct target_sigcontext tuc_mcontext;
1165 };
1166
1167 /*
1168 * Header to be used at the beginning of structures extending the user
1169 * context. Such structures must be placed after the rt_sigframe on the stack
1170 * and be 16-byte aligned. The last structure must be a dummy one with the
1171 * magic and size set to 0.
1172 */
1173 struct target_aarch64_ctx {
1174 uint32_t magic;
1175 uint32_t size;
1176 };
1177
1178 #define TARGET_FPSIMD_MAGIC 0x46508001
1179
1180 struct target_fpsimd_context {
1181 struct target_aarch64_ctx head;
1182 uint32_t fpsr;
1183 uint32_t fpcr;
1184 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1185 };
1186
1187 /*
1188 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1189 * user space as it will change with the addition of new context. User space
1190 * should check the magic/size information.
1191 */
1192 struct target_aux_context {
1193 struct target_fpsimd_context fpsimd;
1194 /* additional context to be added before "end" */
1195 struct target_aarch64_ctx end;
1196 };
1197
1198 struct target_rt_sigframe {
1199 struct target_siginfo info;
1200 struct target_ucontext uc;
1201 uint64_t fp;
1202 uint64_t lr;
1203 uint32_t tramp[2];
1204 };
1205
1206 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1207 CPUARMState *env, target_sigset_t *set)
1208 {
1209 int i;
1210 struct target_aux_context *aux =
1211 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1212
1213 /* set up the stack frame for unwinding */
1214 __put_user(env->xregs[29], &sf->fp);
1215 __put_user(env->xregs[30], &sf->lr);
1216
1217 for (i = 0; i < 31; i++) {
1218 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1219 }
1220 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1221 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1222 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1223
1224 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1225
1226 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1227 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1228 }
1229
1230 for (i = 0; i < 32; i++) {
1231 #ifdef TARGET_WORDS_BIGENDIAN
1232 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1233 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1234 #else
1235 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1236 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1237 #endif
1238 }
1239 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1240 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1241 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1242 __put_user(sizeof(struct target_fpsimd_context),
1243 &aux->fpsimd.head.size);
1244
1245 /* set the "end" magic */
1246 __put_user(0, &aux->end.magic);
1247 __put_user(0, &aux->end.size);
1248
1249 return 0;
1250 }
1251
1252 static int target_restore_sigframe(CPUARMState *env,
1253 struct target_rt_sigframe *sf)
1254 {
1255 sigset_t set;
1256 int i;
1257 struct target_aux_context *aux =
1258 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1259 uint32_t magic, size, fpsr, fpcr;
1260 uint64_t pstate;
1261
1262 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1263 do_sigprocmask(SIG_SETMASK, &set, NULL);
1264
1265 for (i = 0; i < 31; i++) {
1266 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1267 }
1268
1269 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1270 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1271 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1272 pstate_write(env, pstate);
1273
1274 __get_user(magic, &aux->fpsimd.head.magic);
1275 __get_user(size, &aux->fpsimd.head.size);
1276
1277 if (magic != TARGET_FPSIMD_MAGIC
1278 || size != sizeof(struct target_fpsimd_context)) {
1279 return 1;
1280 }
1281
1282 for (i = 0; i < 32; i++) {
1283 #ifdef TARGET_WORDS_BIGENDIAN
1284 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1285 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1286 #else
1287 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1288 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1289 #endif
1290 }
1291 __get_user(fpsr, &aux->fpsimd.fpsr);
1292 vfp_set_fpsr(env, fpsr);
1293 __get_user(fpcr, &aux->fpsimd.fpcr);
1294 vfp_set_fpcr(env, fpcr);
1295
1296 return 0;
1297 }
1298
1299 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1300 {
1301 abi_ulong sp;
1302
1303 sp = env->xregs[31];
1304
1305 /*
1306 * This is the X/Open sanctioned signal stack switching.
1307 */
1308 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
1309 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1310 }
1311
1312 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1313
1314 return sp;
1315 }
1316
1317 static void target_setup_frame(int usig, struct target_sigaction *ka,
1318 target_siginfo_t *info, target_sigset_t *set,
1319 CPUARMState *env)
1320 {
1321 struct target_rt_sigframe *frame;
1322 abi_ulong frame_addr, return_addr;
1323
1324 frame_addr = get_sigframe(ka, env);
1325 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1326 goto give_sigsegv;
1327 }
1328
1329 __put_user(0, &frame->uc.tuc_flags);
1330 __put_user(0, &frame->uc.tuc_link);
1331
1332 __put_user(target_sigaltstack_used.ss_sp,
1333 &frame->uc.tuc_stack.ss_sp);
1334 __put_user(sas_ss_flags(env->xregs[31]),
1335 &frame->uc.tuc_stack.ss_flags);
1336 __put_user(target_sigaltstack_used.ss_size,
1337 &frame->uc.tuc_stack.ss_size);
1338 target_setup_sigframe(frame, env, set);
1339 if (ka->sa_flags & TARGET_SA_RESTORER) {
1340 return_addr = ka->sa_restorer;
1341 } else {
1342 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1343 __put_user(0xd2801168, &frame->tramp[0]);
1344 __put_user(0xd4000001, &frame->tramp[1]);
1345 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1346 }
1347 env->xregs[0] = usig;
1348 env->xregs[31] = frame_addr;
1349 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1350 env->pc = ka->_sa_handler;
1351 env->xregs[30] = return_addr;
1352 if (info) {
1353 copy_siginfo_to_user(&frame->info, info);
1354 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1355 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1356 }
1357
1358 unlock_user_struct(frame, frame_addr, 1);
1359 return;
1360
1361 give_sigsegv:
1362 unlock_user_struct(frame, frame_addr, 1);
1363 force_sig(TARGET_SIGSEGV);
1364 }
1365
1366 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1367 target_siginfo_t *info, target_sigset_t *set,
1368 CPUARMState *env)
1369 {
1370 target_setup_frame(sig, ka, info, set, env);
1371 }
1372
1373 static void setup_frame(int sig, struct target_sigaction *ka,
1374 target_sigset_t *set, CPUARMState *env)
1375 {
1376 target_setup_frame(sig, ka, 0, set, env);
1377 }
1378
1379 long do_rt_sigreturn(CPUARMState *env)
1380 {
1381 struct target_rt_sigframe *frame = NULL;
1382 abi_ulong frame_addr = env->xregs[31];
1383
1384 if (frame_addr & 15) {
1385 goto badframe;
1386 }
1387
1388 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1389 goto badframe;
1390 }
1391
1392 if (target_restore_sigframe(env, frame)) {
1393 goto badframe;
1394 }
1395
1396 if (do_sigaltstack(frame_addr +
1397 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1398 0, get_sp_from_cpustate(env)) == -EFAULT) {
1399 goto badframe;
1400 }
1401
1402 unlock_user_struct(frame, frame_addr, 0);
1403 return env->xregs[0];
1404
1405 badframe:
1406 unlock_user_struct(frame, frame_addr, 0);
1407 force_sig(TARGET_SIGSEGV);
1408 return 0;
1409 }
1410
1411 long do_sigreturn(CPUARMState *env)
1412 {
1413 return do_rt_sigreturn(env);
1414 }
1415
1416 #elif defined(TARGET_ARM)
1417
1418 struct target_sigcontext {
1419 abi_ulong trap_no;
1420 abi_ulong error_code;
1421 abi_ulong oldmask;
1422 abi_ulong arm_r0;
1423 abi_ulong arm_r1;
1424 abi_ulong arm_r2;
1425 abi_ulong arm_r3;
1426 abi_ulong arm_r4;
1427 abi_ulong arm_r5;
1428 abi_ulong arm_r6;
1429 abi_ulong arm_r7;
1430 abi_ulong arm_r8;
1431 abi_ulong arm_r9;
1432 abi_ulong arm_r10;
1433 abi_ulong arm_fp;
1434 abi_ulong arm_ip;
1435 abi_ulong arm_sp;
1436 abi_ulong arm_lr;
1437 abi_ulong arm_pc;
1438 abi_ulong arm_cpsr;
1439 abi_ulong fault_address;
1440 };
1441
1442 struct target_ucontext_v1 {
1443 abi_ulong tuc_flags;
1444 abi_ulong tuc_link;
1445 target_stack_t tuc_stack;
1446 struct target_sigcontext tuc_mcontext;
1447 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1448 };
1449
1450 struct target_ucontext_v2 {
1451 abi_ulong tuc_flags;
1452 abi_ulong tuc_link;
1453 target_stack_t tuc_stack;
1454 struct target_sigcontext tuc_mcontext;
1455 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1456 char __unused[128 - sizeof(target_sigset_t)];
1457 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1458 };
1459
1460 struct target_user_vfp {
1461 uint64_t fpregs[32];
1462 abi_ulong fpscr;
1463 };
1464
1465 struct target_user_vfp_exc {
1466 abi_ulong fpexc;
1467 abi_ulong fpinst;
1468 abi_ulong fpinst2;
1469 };
1470
1471 struct target_vfp_sigframe {
1472 abi_ulong magic;
1473 abi_ulong size;
1474 struct target_user_vfp ufp;
1475 struct target_user_vfp_exc ufp_exc;
1476 } __attribute__((__aligned__(8)));
1477
1478 struct target_iwmmxt_sigframe {
1479 abi_ulong magic;
1480 abi_ulong size;
1481 uint64_t regs[16];
1482 /* Note that not all the coprocessor control registers are stored here */
1483 uint32_t wcssf;
1484 uint32_t wcasf;
1485 uint32_t wcgr0;
1486 uint32_t wcgr1;
1487 uint32_t wcgr2;
1488 uint32_t wcgr3;
1489 } __attribute__((__aligned__(8)));
1490
1491 #define TARGET_VFP_MAGIC 0x56465001
1492 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1493
1494 struct sigframe_v1
1495 {
1496 struct target_sigcontext sc;
1497 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1498 abi_ulong retcode;
1499 };
1500
1501 struct sigframe_v2
1502 {
1503 struct target_ucontext_v2 uc;
1504 abi_ulong retcode;
1505 };
1506
1507 struct rt_sigframe_v1
1508 {
1509 abi_ulong pinfo;
1510 abi_ulong puc;
1511 struct target_siginfo info;
1512 struct target_ucontext_v1 uc;
1513 abi_ulong retcode;
1514 };
1515
1516 struct rt_sigframe_v2
1517 {
1518 struct target_siginfo info;
1519 struct target_ucontext_v2 uc;
1520 abi_ulong retcode;
1521 };
1522
1523 #define TARGET_CONFIG_CPU_32 1
1524
1525 /*
1526 * For ARM syscalls, we encode the syscall number into the instruction.
1527 */
1528 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1529 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1530
1531 /*
1532 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1533 * need two 16-bit instructions.
1534 */
1535 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1536 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1537
1538 static const abi_ulong retcodes[4] = {
1539 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1540 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1541 };
1542
1543
1544 static inline int valid_user_regs(CPUARMState *regs)
1545 {
1546 return 1;
1547 }
1548
1549 static void
1550 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1551 CPUARMState *env, abi_ulong mask)
1552 {
1553 __put_user(env->regs[0], &sc->arm_r0);
1554 __put_user(env->regs[1], &sc->arm_r1);
1555 __put_user(env->regs[2], &sc->arm_r2);
1556 __put_user(env->regs[3], &sc->arm_r3);
1557 __put_user(env->regs[4], &sc->arm_r4);
1558 __put_user(env->regs[5], &sc->arm_r5);
1559 __put_user(env->regs[6], &sc->arm_r6);
1560 __put_user(env->regs[7], &sc->arm_r7);
1561 __put_user(env->regs[8], &sc->arm_r8);
1562 __put_user(env->regs[9], &sc->arm_r9);
1563 __put_user(env->regs[10], &sc->arm_r10);
1564 __put_user(env->regs[11], &sc->arm_fp);
1565 __put_user(env->regs[12], &sc->arm_ip);
1566 __put_user(env->regs[13], &sc->arm_sp);
1567 __put_user(env->regs[14], &sc->arm_lr);
1568 __put_user(env->regs[15], &sc->arm_pc);
1569 #ifdef TARGET_CONFIG_CPU_32
1570 __put_user(cpsr_read(env), &sc->arm_cpsr);
1571 #endif
1572
1573 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1574 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1575 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1576 __put_user(mask, &sc->oldmask);
1577 }
1578
1579 static inline abi_ulong
1580 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1581 {
1582 unsigned long sp = regs->regs[13];
1583
1584 /*
1585 * This is the X/Open sanctioned signal stack switching.
1586 */
1587 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp))
1588 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1589 /*
1590 * ATPCS B01 mandates 8-byte alignment
1591 */
1592 return (sp - framesize) & ~7;
1593 }
1594
1595 static void
1596 setup_return(CPUARMState *env, struct target_sigaction *ka,
1597 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1598 {
1599 abi_ulong handler = ka->_sa_handler;
1600 abi_ulong retcode;
1601 int thumb = handler & 1;
1602 uint32_t cpsr = cpsr_read(env);
1603
1604 cpsr &= ~CPSR_IT;
1605 if (thumb) {
1606 cpsr |= CPSR_T;
1607 } else {
1608 cpsr &= ~CPSR_T;
1609 }
1610
1611 if (ka->sa_flags & TARGET_SA_RESTORER) {
1612 retcode = ka->sa_restorer;
1613 } else {
1614 unsigned int idx = thumb;
1615
1616 if (ka->sa_flags & TARGET_SA_SIGINFO)
1617 idx += 2;
1618
1619 __put_user(retcodes[idx], rc);
1620
1621 retcode = rc_addr + thumb;
1622 }
1623
1624 env->regs[0] = usig;
1625 env->regs[13] = frame_addr;
1626 env->regs[14] = retcode;
1627 env->regs[15] = handler & (thumb ? ~1 : ~3);
1628 cpsr_write(env, cpsr, 0xffffffff);
1629 }
1630
1631 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1632 {
1633 int i;
1634 struct target_vfp_sigframe *vfpframe;
1635 vfpframe = (struct target_vfp_sigframe *)regspace;
1636 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1637 __put_user(sizeof(*vfpframe), &vfpframe->size);
1638 for (i = 0; i < 32; i++) {
1639 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1640 }
1641 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1642 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1643 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1644 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1645 return (abi_ulong*)(vfpframe+1);
1646 }
1647
1648 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1649 CPUARMState *env)
1650 {
1651 int i;
1652 struct target_iwmmxt_sigframe *iwmmxtframe;
1653 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1654 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1655 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1656 for (i = 0; i < 16; i++) {
1657 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1658 }
1659 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1660 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1661 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1662 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1663 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1664 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1665 return (abi_ulong*)(iwmmxtframe+1);
1666 }
1667
1668 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1669 target_sigset_t *set, CPUARMState *env)
1670 {
1671 struct target_sigaltstack stack;
1672 int i;
1673 abi_ulong *regspace;
1674
1675 /* Clear all the bits of the ucontext we don't use. */
1676 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1677
1678 memset(&stack, 0, sizeof(stack));
1679 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1680 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1681 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1682 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1683
1684 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1685 /* Save coprocessor signal frame. */
1686 regspace = uc->tuc_regspace;
1687 if (arm_feature(env, ARM_FEATURE_VFP)) {
1688 regspace = setup_sigframe_v2_vfp(regspace, env);
1689 }
1690 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1691 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1692 }
1693
1694 /* Write terminating magic word */
1695 __put_user(0, regspace);
1696
1697 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1698 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1699 }
1700 }
1701
1702 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1703 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1704 target_sigset_t *set, CPUARMState *regs)
1705 {
1706 struct sigframe_v1 *frame;
1707 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1708 int i;
1709
1710 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1711 return;
1712
1713 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1714
1715 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1716 __put_user(set->sig[i], &frame->extramask[i - 1]);
1717 }
1718
1719 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1720 frame_addr + offsetof(struct sigframe_v1, retcode));
1721
1722 unlock_user_struct(frame, frame_addr, 1);
1723 }
1724
1725 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1726 target_sigset_t *set, CPUARMState *regs)
1727 {
1728 struct sigframe_v2 *frame;
1729 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1730
1731 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1732 return;
1733
1734 setup_sigframe_v2(&frame->uc, set, regs);
1735
1736 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1737 frame_addr + offsetof(struct sigframe_v2, retcode));
1738
1739 unlock_user_struct(frame, frame_addr, 1);
1740 }
1741
1742 static void setup_frame(int usig, struct target_sigaction *ka,
1743 target_sigset_t *set, CPUARMState *regs)
1744 {
1745 if (get_osversion() >= 0x020612) {
1746 setup_frame_v2(usig, ka, set, regs);
1747 } else {
1748 setup_frame_v1(usig, ka, set, regs);
1749 }
1750 }
1751
1752 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1753 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1754 target_siginfo_t *info,
1755 target_sigset_t *set, CPUARMState *env)
1756 {
1757 struct rt_sigframe_v1 *frame;
1758 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1759 struct target_sigaltstack stack;
1760 int i;
1761 abi_ulong info_addr, uc_addr;
1762
1763 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1764 return /* 1 */;
1765
1766 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1767 __put_user(info_addr, &frame->pinfo);
1768 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1769 __put_user(uc_addr, &frame->puc);
1770 copy_siginfo_to_user(&frame->info, info);
1771
1772 /* Clear all the bits of the ucontext we don't use. */
1773 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1774
1775 memset(&stack, 0, sizeof(stack));
1776 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1777 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1778 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1779 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1780
1781 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1782 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1783 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1784 }
1785
1786 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1787 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1788
1789 env->regs[1] = info_addr;
1790 env->regs[2] = uc_addr;
1791
1792 unlock_user_struct(frame, frame_addr, 1);
1793 }
1794
1795 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1796 target_siginfo_t *info,
1797 target_sigset_t *set, CPUARMState *env)
1798 {
1799 struct rt_sigframe_v2 *frame;
1800 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1801 abi_ulong info_addr, uc_addr;
1802
1803 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1804 return /* 1 */;
1805
1806 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1807 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1808 copy_siginfo_to_user(&frame->info, info);
1809
1810 setup_sigframe_v2(&frame->uc, set, env);
1811
1812 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1813 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1814
1815 env->regs[1] = info_addr;
1816 env->regs[2] = uc_addr;
1817
1818 unlock_user_struct(frame, frame_addr, 1);
1819 }
1820
1821 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1822 target_siginfo_t *info,
1823 target_sigset_t *set, CPUARMState *env)
1824 {
1825 if (get_osversion() >= 0x020612) {
1826 setup_rt_frame_v2(usig, ka, info, set, env);
1827 } else {
1828 setup_rt_frame_v1(usig, ka, info, set, env);
1829 }
1830 }
1831
1832 static int
1833 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1834 {
1835 int err = 0;
1836 uint32_t cpsr;
1837
1838 __get_user(env->regs[0], &sc->arm_r0);
1839 __get_user(env->regs[1], &sc->arm_r1);
1840 __get_user(env->regs[2], &sc->arm_r2);
1841 __get_user(env->regs[3], &sc->arm_r3);
1842 __get_user(env->regs[4], &sc->arm_r4);
1843 __get_user(env->regs[5], &sc->arm_r5);
1844 __get_user(env->regs[6], &sc->arm_r6);
1845 __get_user(env->regs[7], &sc->arm_r7);
1846 __get_user(env->regs[8], &sc->arm_r8);
1847 __get_user(env->regs[9], &sc->arm_r9);
1848 __get_user(env->regs[10], &sc->arm_r10);
1849 __get_user(env->regs[11], &sc->arm_fp);
1850 __get_user(env->regs[12], &sc->arm_ip);
1851 __get_user(env->regs[13], &sc->arm_sp);
1852 __get_user(env->regs[14], &sc->arm_lr);
1853 __get_user(env->regs[15], &sc->arm_pc);
1854 #ifdef TARGET_CONFIG_CPU_32
1855 __get_user(cpsr, &sc->arm_cpsr);
1856 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC);
1857 #endif
1858
1859 err |= !valid_user_regs(env);
1860
1861 return err;
1862 }
1863
1864 static long do_sigreturn_v1(CPUARMState *env)
1865 {
1866 abi_ulong frame_addr;
1867 struct sigframe_v1 *frame = NULL;
1868 target_sigset_t set;
1869 sigset_t host_set;
1870 int i;
1871
1872 /*
1873 * Since we stacked the signal on a 64-bit boundary,
1874 * then 'sp' should be word aligned here. If it's
1875 * not, then the user is trying to mess with us.
1876 */
1877 frame_addr = env->regs[13];
1878 if (frame_addr & 7) {
1879 goto badframe;
1880 }
1881
1882 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1883 goto badframe;
1884
1885 __get_user(set.sig[0], &frame->sc.oldmask);
1886 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1887 __get_user(set.sig[i], &frame->extramask[i - 1]);
1888 }
1889
1890 target_to_host_sigset_internal(&host_set, &set);
1891 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
1892
1893 if (restore_sigcontext(env, &frame->sc))
1894 goto badframe;
1895
1896 #if 0
1897 /* Send SIGTRAP if we're single-stepping */
1898 if (ptrace_cancel_bpt(current))
1899 send_sig(SIGTRAP, current, 1);
1900 #endif
1901 unlock_user_struct(frame, frame_addr, 0);
1902 return env->regs[0];
1903
1904 badframe:
1905 force_sig(TARGET_SIGSEGV /* , current */);
1906 return 0;
1907 }
1908
1909 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1910 {
1911 int i;
1912 abi_ulong magic, sz;
1913 uint32_t fpscr, fpexc;
1914 struct target_vfp_sigframe *vfpframe;
1915 vfpframe = (struct target_vfp_sigframe *)regspace;
1916
1917 __get_user(magic, &vfpframe->magic);
1918 __get_user(sz, &vfpframe->size);
1919 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1920 return 0;
1921 }
1922 for (i = 0; i < 32; i++) {
1923 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1924 }
1925 __get_user(fpscr, &vfpframe->ufp.fpscr);
1926 vfp_set_fpscr(env, fpscr);
1927 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1928 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1929 * and the exception flag is cleared
1930 */
1931 fpexc |= (1 << 30);
1932 fpexc &= ~((1 << 31) | (1 << 28));
1933 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
1934 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1935 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1936 return (abi_ulong*)(vfpframe + 1);
1937 }
1938
1939 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
1940 abi_ulong *regspace)
1941 {
1942 int i;
1943 abi_ulong magic, sz;
1944 struct target_iwmmxt_sigframe *iwmmxtframe;
1945 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1946
1947 __get_user(magic, &iwmmxtframe->magic);
1948 __get_user(sz, &iwmmxtframe->size);
1949 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
1950 return 0;
1951 }
1952 for (i = 0; i < 16; i++) {
1953 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1954 }
1955 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1956 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1957 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1958 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1959 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1960 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1961 return (abi_ulong*)(iwmmxtframe + 1);
1962 }
1963
1964 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
1965 struct target_ucontext_v2 *uc)
1966 {
1967 sigset_t host_set;
1968 abi_ulong *regspace;
1969
1970 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
1971 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
1972
1973 if (restore_sigcontext(env, &uc->tuc_mcontext))
1974 return 1;
1975
1976 /* Restore coprocessor signal frame */
1977 regspace = uc->tuc_regspace;
1978 if (arm_feature(env, ARM_FEATURE_VFP)) {
1979 regspace = restore_sigframe_v2_vfp(env, regspace);
1980 if (!regspace) {
1981 return 1;
1982 }
1983 }
1984 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1985 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
1986 if (!regspace) {
1987 return 1;
1988 }
1989 }
1990
1991 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
1992 return 1;
1993
1994 #if 0
1995 /* Send SIGTRAP if we're single-stepping */
1996 if (ptrace_cancel_bpt(current))
1997 send_sig(SIGTRAP, current, 1);
1998 #endif
1999
2000 return 0;
2001 }
2002
2003 static long do_sigreturn_v2(CPUARMState *env)
2004 {
2005 abi_ulong frame_addr;
2006 struct sigframe_v2 *frame = NULL;
2007
2008 /*
2009 * Since we stacked the signal on a 64-bit boundary,
2010 * then 'sp' should be word aligned here. If it's
2011 * not, then the user is trying to mess with us.
2012 */
2013 frame_addr = env->regs[13];
2014 if (frame_addr & 7) {
2015 goto badframe;
2016 }
2017
2018 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2019 goto badframe;
2020
2021 if (do_sigframe_return_v2(env, frame_addr, &frame->uc))
2022 goto badframe;
2023
2024 unlock_user_struct(frame, frame_addr, 0);
2025 return env->regs[0];
2026
2027 badframe:
2028 unlock_user_struct(frame, frame_addr, 0);
2029 force_sig(TARGET_SIGSEGV /* , current */);
2030 return 0;
2031 }
2032
2033 long do_sigreturn(CPUARMState *env)
2034 {
2035 if (get_osversion() >= 0x020612) {
2036 return do_sigreturn_v2(env);
2037 } else {
2038 return do_sigreturn_v1(env);
2039 }
2040 }
2041
2042 static long do_rt_sigreturn_v1(CPUARMState *env)
2043 {
2044 abi_ulong frame_addr;
2045 struct rt_sigframe_v1 *frame = NULL;
2046 sigset_t host_set;
2047
2048 /*
2049 * Since we stacked the signal on a 64-bit boundary,
2050 * then 'sp' should be word aligned here. If it's
2051 * not, then the user is trying to mess with us.
2052 */
2053 frame_addr = env->regs[13];
2054 if (frame_addr & 7) {
2055 goto badframe;
2056 }
2057
2058 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2059 goto badframe;
2060
2061 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2062 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
2063
2064 if (restore_sigcontext(env, &frame->uc.tuc_mcontext))
2065 goto badframe;
2066
2067 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2068 goto badframe;
2069
2070 #if 0
2071 /* Send SIGTRAP if we're single-stepping */
2072 if (ptrace_cancel_bpt(current))
2073 send_sig(SIGTRAP, current, 1);
2074 #endif
2075 unlock_user_struct(frame, frame_addr, 0);
2076 return env->regs[0];
2077
2078 badframe:
2079 unlock_user_struct(frame, frame_addr, 0);
2080 force_sig(TARGET_SIGSEGV /* , current */);
2081 return 0;
2082 }
2083
2084 static long do_rt_sigreturn_v2(CPUARMState *env)
2085 {
2086 abi_ulong frame_addr;
2087 struct rt_sigframe_v2 *frame = NULL;
2088
2089 /*
2090 * Since we stacked the signal on a 64-bit boundary,
2091 * then 'sp' should be word aligned here. If it's
2092 * not, then the user is trying to mess with us.
2093 */
2094 frame_addr = env->regs[13];
2095 if (frame_addr & 7) {
2096 goto badframe;
2097 }
2098
2099 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2100 goto badframe;
2101
2102 if (do_sigframe_return_v2(env, frame_addr, &frame->uc))
2103 goto badframe;
2104
2105 unlock_user_struct(frame, frame_addr, 0);
2106 return env->regs[0];
2107
2108 badframe:
2109 unlock_user_struct(frame, frame_addr, 0);
2110 force_sig(TARGET_SIGSEGV /* , current */);
2111 return 0;
2112 }
2113
2114 long do_rt_sigreturn(CPUARMState *env)
2115 {
2116 if (get_osversion() >= 0x020612) {
2117 return do_rt_sigreturn_v2(env);
2118 } else {
2119 return do_rt_sigreturn_v1(env);
2120 }
2121 }
2122
2123 #elif defined(TARGET_SPARC)
2124
2125 #define __SUNOS_MAXWIN 31
2126
2127 /* This is what SunOS does, so shall I. */
2128 struct target_sigcontext {
2129 abi_ulong sigc_onstack; /* state to restore */
2130
2131 abi_ulong sigc_mask; /* sigmask to restore */
2132 abi_ulong sigc_sp; /* stack pointer */
2133 abi_ulong sigc_pc; /* program counter */
2134 abi_ulong sigc_npc; /* next program counter */
2135 abi_ulong sigc_psr; /* for condition codes etc */
2136 abi_ulong sigc_g1; /* User uses these two registers */
2137 abi_ulong sigc_o0; /* within the trampoline code. */
2138
2139 /* Now comes information regarding the users window set
2140 * at the time of the signal.
2141 */
2142 abi_ulong sigc_oswins; /* outstanding windows */
2143
2144 /* stack ptrs for each regwin buf */
2145 char *sigc_spbuf[__SUNOS_MAXWIN];
2146
2147 /* Windows to restore after signal */
2148 struct {
2149 abi_ulong locals[8];
2150 abi_ulong ins[8];
2151 } sigc_wbuf[__SUNOS_MAXWIN];
2152 };
2153 /* A Sparc stack frame */
2154 struct sparc_stackf {
2155 abi_ulong locals[8];
2156 abi_ulong ins[8];
2157 /* It's simpler to treat fp and callers_pc as elements of ins[]
2158 * since we never need to access them ourselves.
2159 */
2160 char *structptr;
2161 abi_ulong xargs[6];
2162 abi_ulong xxargs[1];
2163 };
2164
2165 typedef struct {
2166 struct {
2167 abi_ulong psr;
2168 abi_ulong pc;
2169 abi_ulong npc;
2170 abi_ulong y;
2171 abi_ulong u_regs[16]; /* globals and ins */
2172 } si_regs;
2173 int si_mask;
2174 } __siginfo_t;
2175
2176 typedef struct {
2177 abi_ulong si_float_regs[32];
2178 unsigned long si_fsr;
2179 unsigned long si_fpqdepth;
2180 struct {
2181 unsigned long *insn_addr;
2182 unsigned long insn;
2183 } si_fpqueue [16];
2184 } qemu_siginfo_fpu_t;
2185
2186
2187 struct target_signal_frame {
2188 struct sparc_stackf ss;
2189 __siginfo_t info;
2190 abi_ulong fpu_save;
2191 abi_ulong insns[2] __attribute__ ((aligned (8)));
2192 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2193 abi_ulong extra_size; /* Should be 0 */
2194 qemu_siginfo_fpu_t fpu_state;
2195 };
2196 struct target_rt_signal_frame {
2197 struct sparc_stackf ss;
2198 siginfo_t info;
2199 abi_ulong regs[20];
2200 sigset_t mask;
2201 abi_ulong fpu_save;
2202 unsigned int insns[2];
2203 stack_t stack;
2204 unsigned int extra_size; /* Should be 0 */
2205 qemu_siginfo_fpu_t fpu_state;
2206 };
2207
2208 #define UREG_O0 16
2209 #define UREG_O6 22
2210 #define UREG_I0 0
2211 #define UREG_I1 1
2212 #define UREG_I2 2
2213 #define UREG_I3 3
2214 #define UREG_I4 4
2215 #define UREG_I5 5
2216 #define UREG_I6 6
2217 #define UREG_I7 7
2218 #define UREG_L0 8
2219 #define UREG_FP UREG_I6
2220 #define UREG_SP UREG_O6
2221
2222 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2223 CPUSPARCState *env,
2224 unsigned long framesize)
2225 {
2226 abi_ulong sp;
2227
2228 sp = env->regwptr[UREG_FP];
2229
2230 /* This is the X/Open sanctioned signal stack switching. */
2231 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2232 if (!on_sig_stack(sp)
2233 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7))
2234 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2235 }
2236 return sp - framesize;
2237 }
2238
2239 static int
2240 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2241 {
2242 int err = 0, i;
2243
2244 __put_user(env->psr, &si->si_regs.psr);
2245 __put_user(env->pc, &si->si_regs.pc);
2246 __put_user(env->npc, &si->si_regs.npc);
2247 __put_user(env->y, &si->si_regs.y);
2248 for (i=0; i < 8; i++) {
2249 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2250 }
2251 for (i=0; i < 8; i++) {
2252 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2253 }
2254 __put_user(mask, &si->si_mask);
2255 return err;
2256 }
2257
2258 #if 0
2259 static int
2260 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2261 CPUSPARCState *env, unsigned long mask)
2262 {
2263 int err = 0;
2264
2265 __put_user(mask, &sc->sigc_mask);
2266 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2267 __put_user(env->pc, &sc->sigc_pc);
2268 __put_user(env->npc, &sc->sigc_npc);
2269 __put_user(env->psr, &sc->sigc_psr);
2270 __put_user(env->gregs[1], &sc->sigc_g1);
2271 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2272
2273 return err;
2274 }
2275 #endif
2276 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2277
2278 static void setup_frame(int sig, struct target_sigaction *ka,
2279 target_sigset_t *set, CPUSPARCState *env)
2280 {
2281 abi_ulong sf_addr;
2282 struct target_signal_frame *sf;
2283 int sigframe_size, err, i;
2284
2285 /* 1. Make sure everything is clean */
2286 //synchronize_user_stack();
2287
2288 sigframe_size = NF_ALIGNEDSZ;
2289 sf_addr = get_sigframe(ka, env, sigframe_size);
2290
2291 sf = lock_user(VERIFY_WRITE, sf_addr,
2292 sizeof(struct target_signal_frame), 0);
2293 if (!sf)
2294 goto sigsegv;
2295
2296 //fprintf(stderr, "sf: %x pc %x fp %x sp %x\n", sf, env->pc, env->regwptr[UREG_FP], env->regwptr[UREG_SP]);
2297 #if 0
2298 if (invalid_frame_pointer(sf, sigframe_size))
2299 goto sigill_and_return;
2300 #endif
2301 /* 2. Save the current process state */
2302 err = setup___siginfo(&sf->info, env, set->sig[0]);
2303 __put_user(0, &sf->extra_size);
2304
2305 //save_fpu_state(regs, &sf->fpu_state);
2306 //__put_user(&sf->fpu_state, &sf->fpu_save);
2307
2308 __put_user(set->sig[0], &sf->info.si_mask);
2309 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2310 __put_user(set->sig[i + 1], &sf->extramask[i]);
2311 }
2312
2313 for (i = 0; i < 8; i++) {
2314 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2315 }
2316 for (i = 0; i < 8; i++) {
2317 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2318 }
2319 if (err)
2320 goto sigsegv;
2321
2322 /* 3. signal handler back-trampoline and parameters */
2323 env->regwptr[UREG_FP] = sf_addr;
2324 env->regwptr[UREG_I0] = sig;
2325 env->regwptr[UREG_I1] = sf_addr +
2326 offsetof(struct target_signal_frame, info);
2327 env->regwptr[UREG_I2] = sf_addr +
2328 offsetof(struct target_signal_frame, info);
2329
2330 /* 4. signal handler */
2331 env->pc = ka->_sa_handler;
2332 env->npc = (env->pc + 4);
2333 /* 5. return to kernel instructions */
2334 if (ka->sa_restorer)
2335 env->regwptr[UREG_I7] = ka->sa_restorer;
2336 else {
2337 uint32_t val32;
2338
2339 env->regwptr[UREG_I7] = sf_addr +
2340 offsetof(struct target_signal_frame, insns) - 2 * 4;
2341
2342 /* mov __NR_sigreturn, %g1 */
2343 val32 = 0x821020d8;
2344 __put_user(val32, &sf->insns[0]);
2345
2346 /* t 0x10 */
2347 val32 = 0x91d02010;
2348 __put_user(val32, &sf->insns[1]);
2349 if (err)
2350 goto sigsegv;
2351
2352 /* Flush instruction space. */
2353 //flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2354 // tb_flush(env);
2355 }
2356 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2357 return;
2358 #if 0
2359 sigill_and_return:
2360 force_sig(TARGET_SIGILL);
2361 #endif
2362 sigsegv:
2363 //fprintf(stderr, "force_sig\n");
2364 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2365 force_sig(TARGET_SIGSEGV);
2366 }
2367
2368 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2369 target_siginfo_t *info,
2370 target_sigset_t *set, CPUSPARCState *env)
2371 {
2372 fprintf(stderr, "setup_rt_frame: not implemented\n");
2373 }
2374
2375 long do_sigreturn(CPUSPARCState *env)
2376 {
2377 abi_ulong sf_addr;
2378 struct target_signal_frame *sf;
2379 uint32_t up_psr, pc, npc;
2380 target_sigset_t set;
2381 sigset_t host_set;
2382 int err=0, i;
2383
2384 sf_addr = env->regwptr[UREG_FP];
2385 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1))
2386 goto segv_and_exit;
2387 #if 0
2388 fprintf(stderr, "sigreturn\n");
2389 fprintf(stderr, "sf: %x pc %x fp %x sp %x\n", sf, env->pc, env->regwptr[UREG_FP], env->regwptr[UREG_SP]);
2390 #endif
2391 //cpu_dump_state(env, stderr, fprintf, 0);
2392
2393 /* 1. Make sure we are not getting garbage from the user */
2394
2395 if (sf_addr & 3)
2396 goto segv_and_exit;
2397
2398 __get_user(pc, &sf->info.si_regs.pc);
2399 __get_user(npc, &sf->info.si_regs.npc);
2400
2401 if ((pc | npc) & 3)
2402 goto segv_and_exit;
2403
2404 /* 2. Restore the state */
2405 __get_user(up_psr, &sf->info.si_regs.psr);
2406
2407 /* User can only change condition codes and FPU enabling in %psr. */
2408 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2409 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2410
2411 env->pc = pc;
2412 env->npc = npc;
2413 __get_user(env->y, &sf->info.si_regs.y);
2414 for (i=0; i < 8; i++) {
2415 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2416 }
2417 for (i=0; i < 8; i++) {
2418 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2419 }
2420
2421 /* FIXME: implement FPU save/restore:
2422 * __get_user(fpu_save, &sf->fpu_save);
2423 * if (fpu_save)
2424 * err |= restore_fpu_state(env, fpu_save);
2425 */
2426
2427 /* This is pretty much atomic, no amount locking would prevent
2428 * the races which exist anyways.
2429 */
2430 __get_user(set.sig[0], &sf->info.si_mask);
2431 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2432 __get_user(set.sig[i], &sf->extramask[i - 1]);
2433 }
2434
2435 target_to_host_sigset_internal(&host_set, &set);
2436 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
2437
2438 if (err)
2439 goto segv_and_exit;
2440 unlock_user_struct(sf, sf_addr, 0);
2441 return env->regwptr[0];
2442
2443 segv_and_exit:
2444 unlock_user_struct(sf, sf_addr, 0);
2445 force_sig(TARGET_SIGSEGV);
2446 }
2447
2448 long do_rt_sigreturn(CPUSPARCState *env)
2449 {
2450 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2451 return -TARGET_ENOSYS;
2452 }
2453
2454 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2455 #define MC_TSTATE 0
2456 #define MC_PC 1
2457 #define MC_NPC 2
2458 #define MC_Y 3
2459 #define MC_G1 4
2460 #define MC_G2 5
2461 #define MC_G3 6
2462 #define MC_G4 7
2463 #define MC_G5 8
2464 #define MC_G6 9
2465 #define MC_G7 10
2466 #define MC_O0 11
2467 #define MC_O1 12
2468 #define MC_O2 13
2469 #define MC_O3 14
2470 #define MC_O4 15
2471 #define MC_O5 16
2472 #define MC_O6 17
2473 #define MC_O7 18
2474 #define MC_NGREG 19
2475
2476 typedef abi_ulong target_mc_greg_t;
2477 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2478
2479 struct target_mc_fq {
2480 abi_ulong *mcfq_addr;
2481 uint32_t mcfq_insn;
2482 };
2483
2484 struct target_mc_fpu {
2485 union {
2486 uint32_t sregs[32];
2487 uint64_t dregs[32];
2488 //uint128_t qregs[16];
2489 } mcfpu_fregs;
2490 abi_ulong mcfpu_fsr;
2491 abi_ulong mcfpu_fprs;
2492 abi_ulong mcfpu_gsr;
2493 struct target_mc_fq *mcfpu_fq;
2494 unsigned char mcfpu_qcnt;
2495 unsigned char mcfpu_qentsz;
2496 unsigned char mcfpu_enab;
2497 };
2498 typedef struct target_mc_fpu target_mc_fpu_t;
2499
2500 typedef struct {
2501 target_mc_gregset_t mc_gregs;
2502 target_mc_greg_t mc_fp;
2503 target_mc_greg_t mc_i7;
2504 target_mc_fpu_t mc_fpregs;
2505 } target_mcontext_t;
2506
2507 struct target_ucontext {
2508 struct target_ucontext *tuc_link;
2509 abi_ulong tuc_flags;
2510 target_sigset_t tuc_sigmask;
2511 target_mcontext_t tuc_mcontext;
2512 };
2513
2514 /* A V9 register window */
2515 struct target_reg_window {
2516 abi_ulong locals[8];
2517 abi_ulong ins[8];
2518 };
2519
2520 #define TARGET_STACK_BIAS 2047
2521
2522 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2523 void sparc64_set_context(CPUSPARCState *env)
2524 {
2525 abi_ulong ucp_addr;
2526 struct target_ucontext *ucp;
2527 target_mc_gregset_t *grp;
2528 abi_ulong pc, npc, tstate;
2529 abi_ulong fp, i7, w_addr;
2530 unsigned int i;
2531
2532 ucp_addr = env->regwptr[UREG_I0];
2533 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1))
2534 goto do_sigsegv;
2535 grp = &ucp->tuc_mcontext.mc_gregs;
2536 __get_user(pc, &((*grp)[MC_PC]));
2537 __get_user(npc, &((*grp)[MC_NPC]));
2538 if ((pc | npc) & 3)
2539 goto do_sigsegv;
2540 if (env->regwptr[UREG_I1]) {
2541 target_sigset_t target_set;
2542 sigset_t set;
2543
2544 if (TARGET_NSIG_WORDS == 1) {
2545 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2546 } else {
2547 abi_ulong *src, *dst;
2548 src = ucp->tuc_sigmask.sig;
2549 dst = target_set.sig;
2550 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2551 __get_user(*dst, src);
2552 }
2553 }
2554 target_to_host_sigset_internal(&set, &target_set);
2555 do_sigprocmask(SIG_SETMASK, &set, NULL);
2556 }
2557 env->pc = pc;
2558 env->npc = npc;
2559 __get_user(env->y, &((*grp)[MC_Y]));
2560 __get_user(tstate, &((*grp)[MC_TSTATE]));
2561 env->asi = (tstate >> 24) & 0xff;
2562 cpu_put_ccr(env, tstate >> 32);
2563 cpu_put_cwp64(env, tstate & 0x1f);
2564 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2565 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2566 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2567 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2568 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2569 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2570 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2571 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2572 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2573 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2574 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2575 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2576 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2577 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2578 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2579
2580 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2581 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2582
2583 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2584 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2585 abi_ulong) != 0)
2586 goto do_sigsegv;
2587 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2588 abi_ulong) != 0)
2589 goto do_sigsegv;
2590 /* FIXME this does not match how the kernel handles the FPU in
2591 * its sparc64_set_context implementation. In particular the FPU
2592 * is only restored if fenab is non-zero in:
2593 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2594 */
2595 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2596 {
2597 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2598 for (i = 0; i < 64; i++, src++) {
2599 if (i & 1) {
2600 __get_user(env->fpr[i/2].l.lower, src);
2601 } else {
2602 __get_user(env->fpr[i/2].l.upper, src);
2603 }
2604 }
2605 }
2606 __get_user(env->fsr,
2607 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2608 __get_user(env->gsr,
2609 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2610 unlock_user_struct(ucp, ucp_addr, 0);
2611 return;
2612 do_sigsegv:
2613 unlock_user_struct(ucp, ucp_addr, 0);
2614 force_sig(TARGET_SIGSEGV);
2615 }
2616
2617 void sparc64_get_context(CPUSPARCState *env)
2618 {
2619 abi_ulong ucp_addr;
2620 struct target_ucontext *ucp;
2621 target_mc_gregset_t *grp;
2622 target_mcontext_t *mcp;
2623 abi_ulong fp, i7, w_addr;
2624 int err;
2625 unsigned int i;
2626 target_sigset_t target_set;
2627 sigset_t set;
2628
2629 ucp_addr = env->regwptr[UREG_I0];
2630 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0))
2631 goto do_sigsegv;
2632
2633 mcp = &ucp->tuc_mcontext;
2634 grp = &mcp->mc_gregs;
2635
2636 /* Skip over the trap instruction, first. */
2637 env->pc = env->npc;
2638 env->npc += 4;
2639
2640 err = 0;
2641
2642 do_sigprocmask(0, NULL, &set);
2643 host_to_target_sigset_internal(&target_set, &set);
2644 if (TARGET_NSIG_WORDS == 1) {
2645 __put_user(target_set.sig[0],
2646 (abi_ulong *)&ucp->tuc_sigmask);
2647 } else {
2648 abi_ulong *src, *dst;
2649 src = target_set.sig;
2650 dst = ucp->tuc_sigmask.sig;
2651 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2652 __put_user(*src, dst);
2653 }
2654 if (err)
2655 goto do_sigsegv;
2656 }
2657
2658 /* XXX: tstate must be saved properly */
2659 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2660 __put_user(env->pc, &((*grp)[MC_PC]));
2661 __put_user(env->npc, &((*grp)[MC_NPC]));
2662 __put_user(env->y, &((*grp)[MC_Y]));
2663 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2664 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2665 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2666 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2667 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2668 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2669 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2670 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2671 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2672 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2673 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2674 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2675 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2676 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2677 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2678
2679 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2680 fp = i7 = 0;
2681 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2682 abi_ulong) != 0)
2683 goto do_sigsegv;
2684 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2685 abi_ulong) != 0)
2686 goto do_sigsegv;
2687 __put_user(fp, &(mcp->mc_fp));
2688 __put_user(i7, &(mcp->mc_i7));
2689
2690 {
2691 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2692 for (i = 0; i < 64; i++, dst++) {
2693 if (i & 1) {
2694 __put_user(env->fpr[i/2].l.lower, dst);
2695 } else {
2696 __put_user(env->fpr[i/2].l.upper, dst);
2697 }
2698 }
2699 }
2700 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2701 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2702 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2703
2704 if (err)
2705 goto do_sigsegv;
2706 unlock_user_struct(ucp, ucp_addr, 1);
2707 return;
2708 do_sigsegv:
2709 unlock_user_struct(ucp, ucp_addr, 1);
2710 force_sig(TARGET_SIGSEGV);
2711 }
2712 #endif
2713 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2714
2715 # if defined(TARGET_ABI_MIPSO32)
2716 struct target_sigcontext {
2717 uint32_t sc_regmask; /* Unused */
2718 uint32_t sc_status;
2719 uint64_t sc_pc;
2720 uint64_t sc_regs[32];
2721 uint64_t sc_fpregs[32];
2722 uint32_t sc_ownedfp; /* Unused */
2723 uint32_t sc_fpc_csr;
2724 uint32_t sc_fpc_eir; /* Unused */
2725 uint32_t sc_used_math;
2726 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2727 uint32_t pad0;
2728 uint64_t sc_mdhi;
2729 uint64_t sc_mdlo;
2730 target_ulong sc_hi1; /* Was sc_cause */
2731 target_ulong sc_lo1; /* Was sc_badvaddr */
2732 target_ulong sc_hi2; /* Was sc_sigset[4] */
2733 target_ulong sc_lo2;
2734 target_ulong sc_hi3;
2735 target_ulong sc_lo3;
2736 };
2737 # else /* N32 || N64 */
2738 struct target_sigcontext {
2739 uint64_t sc_regs[32];
2740 uint64_t sc_fpregs[32];
2741 uint64_t sc_mdhi;
2742 uint64_t sc_hi1;
2743 uint64_t sc_hi2;
2744 uint64_t sc_hi3;
2745 uint64_t sc_mdlo;
2746 uint64_t sc_lo1;
2747 uint64_t sc_lo2;
2748 uint64_t sc_lo3;
2749 uint64_t sc_pc;
2750 uint32_t sc_fpc_csr;
2751 uint32_t sc_used_math;
2752 uint32_t sc_dsp;
2753 uint32_t sc_reserved;
2754 };
2755 # endif /* O32 */
2756
2757 struct sigframe {
2758 uint32_t sf_ass[4]; /* argument save space for o32 */
2759 uint32_t sf_code[2]; /* signal trampoline */
2760 struct target_sigcontext sf_sc;
2761 target_sigset_t sf_mask;
2762 };
2763
2764 struct target_ucontext {
2765 target_ulong tuc_flags;
2766 target_ulong tuc_link;
2767 target_stack_t tuc_stack;
2768 target_ulong pad0;
2769 struct target_sigcontext tuc_mcontext;
2770 target_sigset_t tuc_sigmask;
2771 };
2772
2773 struct target_rt_sigframe {
2774 uint32_t rs_ass[4]; /* argument save space for o32 */
2775 uint32_t rs_code[2]; /* signal trampoline */
2776 struct target_siginfo rs_info;
2777 struct target_ucontext rs_uc;
2778 };
2779
2780 /* Install trampoline to jump back from signal handler */
2781 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2782 {
2783 int err = 0;
2784
2785 /*
2786 * Set up the return code ...
2787 *
2788 * li v0, __NR__foo_sigreturn
2789 * syscall
2790 */
2791
2792 __put_user(0x24020000 + syscall, tramp + 0);
2793 __put_user(0x0000000c , tramp + 1);
2794 return err;
2795 }
2796
2797 static inline void setup_sigcontext(CPUMIPSState *regs,
2798 struct target_sigcontext *sc)
2799 {
2800 int i;
2801
2802 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2803 regs->hflags &= ~MIPS_HFLAG_BMASK;
2804
2805 __put_user(0, &sc->sc_regs[0]);
2806 for (i = 1; i < 32; ++i) {
2807 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2808 }
2809
2810 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2811 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2812
2813 /* Rather than checking for dsp existence, always copy. The storage
2814 would just be garbage otherwise. */
2815 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2816 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2817 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2818 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2819 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2820 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2821 {
2822 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2823 __put_user(dsp, &sc->sc_dsp);
2824 }
2825
2826 __put_user(1, &sc->sc_used_math);
2827
2828 for (i = 0; i < 32; ++i) {
2829 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2830 }
2831 }
2832
2833 static inline void
2834 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2835 {
2836 int i;
2837
2838 __get_user(regs->CP0_EPC, &sc->sc_pc);
2839
2840 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2841 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2842
2843 for (i = 1; i < 32; ++i) {
2844 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2845 }
2846
2847 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2848 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2849 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2850 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2851 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2852 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2853 {
2854 uint32_t dsp;
2855 __get_user(dsp, &sc->sc_dsp);
2856 cpu_wrdsp(dsp, 0x3ff, regs);
2857 }
2858
2859 for (i = 0; i < 32; ++i) {
2860 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2861 }
2862 }
2863
2864 /*
2865 * Determine which stack to use..
2866 */
2867 static inline abi_ulong
2868 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2869 {
2870 unsigned long sp;
2871
2872 /* Default to using normal stack */
2873 sp = regs->active_tc.gpr[29];
2874
2875 /*
2876 * FPU emulator may have its own trampoline active just
2877 * above the user stack, 16-bytes before the next lowest
2878 * 16 byte boundary. Try to avoid trashing it.
2879 */
2880 sp -= 32;
2881
2882 /* This is the X/Open sanctioned signal stack switching. */
2883 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2884 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2885 }
2886
2887 return (sp - frame_size) & ~7;
2888 }
2889
2890 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2891 {
2892 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2893 env->hflags &= ~MIPS_HFLAG_M16;
2894 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2895 env->active_tc.PC &= ~(target_ulong) 1;
2896 }
2897 }
2898
2899 # if defined(TARGET_ABI_MIPSO32)
2900 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2901 static void setup_frame(int sig, struct target_sigaction * ka,
2902 target_sigset_t *set, CPUMIPSState *regs)
2903 {
2904 struct sigframe *frame;
2905 abi_ulong frame_addr;
2906 int i;
2907
2908 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2909 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
2910 goto give_sigsegv;
2911
2912 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2913
2914 setup_sigcontext(regs, &frame->sf_sc);
2915
2916 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2917 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
2918 }
2919
2920 /*
2921 * Arguments to signal handler:
2922 *
2923 * a0 = signal number
2924 * a1 = 0 (should be cause)
2925 * a2 = pointer to struct sigcontext
2926 *
2927 * $25 and PC point to the signal handler, $29 points to the
2928 * struct sigframe.
2929 */
2930 regs->active_tc.gpr[ 4] = sig;
2931 regs->active_tc.gpr[ 5] = 0;
2932 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
2933 regs->active_tc.gpr[29] = frame_addr;
2934 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
2935 /* The original kernel code sets CP0_EPC to the handler
2936 * since it returns to userland using eret
2937 * we cannot do this here, and we must set PC directly */
2938 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
2939 mips_set_hflags_isa_mode_from_pc(regs);
2940 unlock_user_struct(frame, frame_addr, 1);
2941 return;
2942
2943 give_sigsegv:
2944 force_sig(TARGET_SIGSEGV/*, current*/);
2945 }
2946
2947 long do_sigreturn(CPUMIPSState *regs)
2948 {
2949 struct sigframe *frame;
2950 abi_ulong frame_addr;
2951 sigset_t blocked;
2952 target_sigset_t target_set;
2953 int i;
2954
2955 #if defined(DEBUG_SIGNAL)
2956 fprintf(stderr, "do_sigreturn\n");
2957 #endif
2958 frame_addr = regs->active_tc.gpr[29];
2959 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2960 goto badframe;
2961
2962 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2963 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
2964 }
2965
2966 target_to_host_sigset_internal(&blocked, &target_set);
2967 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
2968
2969 restore_sigcontext(regs, &frame->sf_sc);
2970
2971 #if 0
2972 /*
2973 * Don't let your children do this ...
2974 */
2975 __asm__ __volatile__(
2976 "move\t$29, %0\n\t"
2977 "j\tsyscall_exit"
2978 :/* no outputs */
2979 :"r" (&regs));
2980 /* Unreached */
2981 #endif
2982
2983 regs->active_tc.PC = regs->CP0_EPC;
2984 mips_set_hflags_isa_mode_from_pc(regs);
2985 /* I am not sure this is right, but it seems to work
2986 * maybe a problem with nested signals ? */
2987 regs->CP0_EPC = 0;
2988 return -TARGET_QEMU_ESIGRETURN;
2989
2990 badframe:
2991 force_sig(TARGET_SIGSEGV/*, current*/);
2992 return 0;
2993 }
2994 # endif /* O32 */
2995
2996 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2997 target_siginfo_t *info,
2998 target_sigset_t *set, CPUMIPSState *env)
2999 {
3000 struct target_rt_sigframe *frame;
3001 abi_ulong frame_addr;
3002 int i;
3003
3004 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3005 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3006 goto give_sigsegv;
3007
3008 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3009
3010 copy_siginfo_to_user(&frame->rs_info, info);
3011
3012 __put_user(0, &frame->rs_uc.tuc_flags);
3013 __put_user(0, &frame->rs_uc.tuc_link);
3014 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3015 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3016 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3017 &frame->rs_uc.tuc_stack.ss_flags);
3018
3019 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3020
3021 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3022 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3023 }
3024
3025 /*
3026 * Arguments to signal handler:
3027 *
3028 * a0 = signal number
3029 * a1 = pointer to siginfo_t
3030 * a2 = pointer to struct ucontext
3031 *
3032 * $25 and PC point to the signal handler, $29 points to the
3033 * struct sigframe.
3034 */
3035 env->active_tc.gpr[ 4] = sig;
3036 env->active_tc.gpr[ 5] = frame_addr
3037 + offsetof(struct target_rt_sigframe, rs_info);
3038 env->active_tc.gpr[ 6] = frame_addr
3039 + offsetof(struct target_rt_sigframe, rs_uc);
3040 env->active_tc.gpr[29] = frame_addr;
3041 env->active_tc.gpr[31] = frame_addr
3042 + offsetof(struct target_rt_sigframe, rs_code);
3043 /* The original kernel code sets CP0_EPC to the handler
3044 * since it returns to userland using eret
3045 * we cannot do this here, and we must set PC directly */
3046 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3047 mips_set_hflags_isa_mode_from_pc(env);
3048 unlock_user_struct(frame, frame_addr, 1);
3049 return;
3050
3051 give_sigsegv:
3052 unlock_user_struct(frame, frame_addr, 1);
3053 force_sig(TARGET_SIGSEGV/*, current*/);
3054 }
3055
3056 long do_rt_sigreturn(CPUMIPSState *env)
3057 {
3058 struct target_rt_sigframe *frame;
3059 abi_ulong frame_addr;
3060 sigset_t blocked;
3061
3062 #if defined(DEBUG_SIGNAL)
3063 fprintf(stderr, "do_rt_sigreturn\n");
3064 #endif
3065 frame_addr = env->active_tc.gpr[29];
3066 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3067 goto badframe;
3068
3069 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3070 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
3071
3072 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3073
3074 if (do_sigaltstack(frame_addr +
3075 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3076 0, get_sp_from_cpustate(env)) == -EFAULT)
3077 goto badframe;
3078
3079 env->active_tc.PC = env->CP0_EPC;
3080 mips_set_hflags_isa_mode_from_pc(env);
3081 /* I am not sure this is right, but it seems to work
3082 * maybe a problem with nested signals ? */
3083 env->CP0_EPC = 0;
3084 return -TARGET_QEMU_ESIGRETURN;
3085
3086 badframe:
3087 force_sig(TARGET_SIGSEGV/*, current*/);
3088 return 0;
3089 }
3090
3091 #elif defined(TARGET_SH4)
3092
3093 /*
3094 * code and data structures from linux kernel:
3095 * include/asm-sh/sigcontext.h
3096 * arch/sh/kernel/signal.c
3097 */
3098
3099 struct target_sigcontext {
3100 target_ulong oldmask;
3101
3102 /* CPU registers */
3103 target_ulong sc_gregs[16];
3104 target_ulong sc_pc;
3105 target_ulong sc_pr;
3106 target_ulong sc_sr;
3107 target_ulong sc_gbr;
3108 target_ulong sc_mach;
3109 target_ulong sc_macl;
3110
3111 /* FPU registers */
3112 target_ulong sc_fpregs[16];
3113 target_ulong sc_xfpregs[16];
3114 unsigned int sc_fpscr;
3115 unsigned int sc_fpul;
3116 unsigned int sc_ownedfp;
3117 };
3118
3119 struct target_sigframe
3120 {
3121 struct target_sigcontext sc;
3122 target_ulong extramask[TARGET_NSIG_WORDS-1];
3123 uint16_t retcode[3];
3124 };
3125
3126
3127 struct target_ucontext {
3128 target_ulong tuc_flags;
3129 struct target_ucontext *tuc_link;
3130 target_stack_t tuc_stack;
3131 struct target_sigcontext tuc_mcontext;
3132 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3133 };
3134
3135 struct target_rt_sigframe
3136 {
3137 struct target_siginfo info;
3138 struct target_ucontext uc;
3139 uint16_t retcode[3];
3140 };
3141
3142
3143 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3144 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3145
3146 static abi_ulong get_sigframe(struct target_sigaction *ka,
3147 unsigned long sp, size_t frame_size)
3148 {
3149 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3150 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3151 }
3152
3153 return (sp - frame_size) & -8ul;
3154 }
3155
3156 static void setup_sigcontext(struct target_sigcontext *sc,
3157 CPUSH4State *regs, unsigned long mask)
3158 {
3159 int i;
3160
3161 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3162 COPY(gregs[0]); COPY(gregs[1]);
3163 COPY(gregs[2]); COPY(gregs[3]);
3164 COPY(gregs[4]); COPY(gregs[5]);
3165 COPY(gregs[6]); COPY(gregs[7]);
3166 COPY(gregs[8]); COPY(gregs[9]);
3167 COPY(gregs[10]); COPY(gregs[11]);
3168 COPY(gregs[12]); COPY(gregs[13]);
3169 COPY(gregs[14]); COPY(gregs[15]);
3170 COPY(gbr); COPY(mach);
3171 COPY(macl); COPY(pr);
3172 COPY(sr); COPY(pc);
3173 #undef COPY
3174
3175 for (i=0; i<16; i++) {
3176 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3177 }
3178 __put_user(regs->fpscr, &sc->sc_fpscr);
3179 __put_user(regs->fpul, &sc->sc_fpul);
3180
3181 /* non-iBCS2 extensions.. */
3182 __put_user(mask, &sc->oldmask);
3183 }
3184
3185 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc,
3186 target_ulong *r0_p)
3187 {
3188 int i;
3189
3190 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3191 COPY(gregs[1]);
3192 COPY(gregs[2]); COPY(gregs[3]);
3193 COPY(gregs[4]); COPY(gregs[5]);
3194 COPY(gregs[6]); COPY(gregs[7]);
3195 COPY(gregs[8]); COPY(gregs[9]);
3196 COPY(gregs[10]); COPY(gregs[11]);
3197 COPY(gregs[12]); COPY(gregs[13]);
3198 COPY(gregs[14]); COPY(gregs[15]);
3199 COPY(gbr); COPY(mach);
3200 COPY(macl); COPY(pr);
3201 COPY(sr); COPY(pc);
3202 #undef COPY
3203
3204 for (i=0; i<16; i++) {
3205 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3206 }
3207 __get_user(regs->fpscr, &sc->sc_fpscr);
3208 __get_user(regs->fpul, &sc->sc_fpul);
3209
3210 regs->tra = -1; /* disable syscall checks */
3211 __get_user(*r0_p, &sc->sc_gregs[0]);
3212 }
3213
3214 static void setup_frame(int sig, struct target_sigaction *ka,
3215 target_sigset_t *set, CPUSH4State *regs)
3216 {
3217 struct target_sigframe *frame;
3218 abi_ulong frame_addr;
3219 int i;
3220 int err = 0;
3221 int signal;
3222
3223 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3224 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3225 goto give_sigsegv;
3226
3227 signal = current_exec_domain_sig(sig);
3228
3229 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3230
3231 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3232 __put_user(set->sig[i + 1], &frame->extramask[i]);
3233 }
3234
3235 /* Set up to return from userspace. If provided, use a stub
3236 already in userspace. */
3237 if (ka->sa_flags & TARGET_SA_RESTORER) {