linux-user/signal.c: Correct error path for AArch64 do_rt_sigreturn
[qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <stdarg.h>
23 #include <unistd.h>
24 #include <errno.h>
25 #include <assert.h>
26 #include <sys/ucontext.h>
27 #include <sys/resource.h>
28
29 #include "qemu.h"
30 #include "qemu-common.h"
31 #include "target_signal.h"
32
33 //#define DEBUG_SIGNAL
34
35 static struct target_sigaltstack target_sigaltstack_used = {
36 .ss_sp = 0,
37 .ss_size = 0,
38 .ss_flags = TARGET_SS_DISABLE,
39 };
40
41 static struct target_sigaction sigact_table[TARGET_NSIG];
42
43 static void host_signal_handler(int host_signum, siginfo_t *info,
44 void *puc);
45
46 static uint8_t host_to_target_signal_table[_NSIG] = {
47 [SIGHUP] = TARGET_SIGHUP,
48 [SIGINT] = TARGET_SIGINT,
49 [SIGQUIT] = TARGET_SIGQUIT,
50 [SIGILL] = TARGET_SIGILL,
51 [SIGTRAP] = TARGET_SIGTRAP,
52 [SIGABRT] = TARGET_SIGABRT,
53 /* [SIGIOT] = TARGET_SIGIOT,*/
54 [SIGBUS] = TARGET_SIGBUS,
55 [SIGFPE] = TARGET_SIGFPE,
56 [SIGKILL] = TARGET_SIGKILL,
57 [SIGUSR1] = TARGET_SIGUSR1,
58 [SIGSEGV] = TARGET_SIGSEGV,
59 [SIGUSR2] = TARGET_SIGUSR2,
60 [SIGPIPE] = TARGET_SIGPIPE,
61 [SIGALRM] = TARGET_SIGALRM,
62 [SIGTERM] = TARGET_SIGTERM,
63 #ifdef SIGSTKFLT
64 [SIGSTKFLT] = TARGET_SIGSTKFLT,
65 #endif
66 [SIGCHLD] = TARGET_SIGCHLD,
67 [SIGCONT] = TARGET_SIGCONT,
68 [SIGSTOP] = TARGET_SIGSTOP,
69 [SIGTSTP] = TARGET_SIGTSTP,
70 [SIGTTIN] = TARGET_SIGTTIN,
71 [SIGTTOU] = TARGET_SIGTTOU,
72 [SIGURG] = TARGET_SIGURG,
73 [SIGXCPU] = TARGET_SIGXCPU,
74 [SIGXFSZ] = TARGET_SIGXFSZ,
75 [SIGVTALRM] = TARGET_SIGVTALRM,
76 [SIGPROF] = TARGET_SIGPROF,
77 [SIGWINCH] = TARGET_SIGWINCH,
78 [SIGIO] = TARGET_SIGIO,
79 [SIGPWR] = TARGET_SIGPWR,
80 [SIGSYS] = TARGET_SIGSYS,
81 /* next signals stay the same */
82 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
83 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
84 To fix this properly we need to do manual signal delivery multiplexed
85 over a single host signal. */
86 [__SIGRTMIN] = __SIGRTMAX,
87 [__SIGRTMAX] = __SIGRTMIN,
88 };
89 static uint8_t target_to_host_signal_table[_NSIG];
90
91 static inline int on_sig_stack(unsigned long sp)
92 {
93 return (sp - target_sigaltstack_used.ss_sp
94 < target_sigaltstack_used.ss_size);
95 }
96
97 static inline int sas_ss_flags(unsigned long sp)
98 {
99 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
100 : on_sig_stack(sp) ? SS_ONSTACK : 0);
101 }
102
103 int host_to_target_signal(int sig)
104 {
105 if (sig < 0 || sig >= _NSIG)
106 return sig;
107 return host_to_target_signal_table[sig];
108 }
109
110 int target_to_host_signal(int sig)
111 {
112 if (sig < 0 || sig >= _NSIG)
113 return sig;
114 return target_to_host_signal_table[sig];
115 }
116
117 static inline void target_sigemptyset(target_sigset_t *set)
118 {
119 memset(set, 0, sizeof(*set));
120 }
121
122 static inline void target_sigaddset(target_sigset_t *set, int signum)
123 {
124 signum--;
125 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
126 set->sig[signum / TARGET_NSIG_BPW] |= mask;
127 }
128
129 static inline int target_sigismember(const target_sigset_t *set, int signum)
130 {
131 signum--;
132 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
133 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
134 }
135
136 static void host_to_target_sigset_internal(target_sigset_t *d,
137 const sigset_t *s)
138 {
139 int i;
140 target_sigemptyset(d);
141 for (i = 1; i <= TARGET_NSIG; i++) {
142 if (sigismember(s, i)) {
143 target_sigaddset(d, host_to_target_signal(i));
144 }
145 }
146 }
147
148 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
149 {
150 target_sigset_t d1;
151 int i;
152
153 host_to_target_sigset_internal(&d1, s);
154 for(i = 0;i < TARGET_NSIG_WORDS; i++)
155 d->sig[i] = tswapal(d1.sig[i]);
156 }
157
158 static void target_to_host_sigset_internal(sigset_t *d,
159 const target_sigset_t *s)
160 {
161 int i;
162 sigemptyset(d);
163 for (i = 1; i <= TARGET_NSIG; i++) {
164 if (target_sigismember(s, i)) {
165 sigaddset(d, target_to_host_signal(i));
166 }
167 }
168 }
169
170 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
171 {
172 target_sigset_t s1;
173 int i;
174
175 for(i = 0;i < TARGET_NSIG_WORDS; i++)
176 s1.sig[i] = tswapal(s->sig[i]);
177 target_to_host_sigset_internal(d, &s1);
178 }
179
180 void host_to_target_old_sigset(abi_ulong *old_sigset,
181 const sigset_t *sigset)
182 {
183 target_sigset_t d;
184 host_to_target_sigset(&d, sigset);
185 *old_sigset = d.sig[0];
186 }
187
188 void target_to_host_old_sigset(sigset_t *sigset,
189 const abi_ulong *old_sigset)
190 {
191 target_sigset_t d;
192 int i;
193
194 d.sig[0] = *old_sigset;
195 for(i = 1;i < TARGET_NSIG_WORDS; i++)
196 d.sig[i] = 0;
197 target_to_host_sigset(sigset, &d);
198 }
199
200 /* siginfo conversion */
201
202 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
203 const siginfo_t *info)
204 {
205 int sig = host_to_target_signal(info->si_signo);
206 tinfo->si_signo = sig;
207 tinfo->si_errno = 0;
208 tinfo->si_code = info->si_code;
209
210 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
211 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
212 /* Should never come here, but who knows. The information for
213 the target is irrelevant. */
214 tinfo->_sifields._sigfault._addr = 0;
215 } else if (sig == TARGET_SIGIO) {
216 tinfo->_sifields._sigpoll._band = info->si_band;
217 tinfo->_sifields._sigpoll._fd = info->si_fd;
218 } else if (sig == TARGET_SIGCHLD) {
219 tinfo->_sifields._sigchld._pid = info->si_pid;
220 tinfo->_sifields._sigchld._uid = info->si_uid;
221 tinfo->_sifields._sigchld._status
222 = host_to_target_waitstatus(info->si_status);
223 tinfo->_sifields._sigchld._utime = info->si_utime;
224 tinfo->_sifields._sigchld._stime = info->si_stime;
225 } else if (sig >= TARGET_SIGRTMIN) {
226 tinfo->_sifields._rt._pid = info->si_pid;
227 tinfo->_sifields._rt._uid = info->si_uid;
228 /* XXX: potential problem if 64 bit */
229 tinfo->_sifields._rt._sigval.sival_ptr
230 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
231 }
232 }
233
234 static void tswap_siginfo(target_siginfo_t *tinfo,
235 const target_siginfo_t *info)
236 {
237 int sig = info->si_signo;
238 tinfo->si_signo = tswap32(sig);
239 tinfo->si_errno = tswap32(info->si_errno);
240 tinfo->si_code = tswap32(info->si_code);
241
242 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
243 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
244 tinfo->_sifields._sigfault._addr
245 = tswapal(info->_sifields._sigfault._addr);
246 } else if (sig == TARGET_SIGIO) {
247 tinfo->_sifields._sigpoll._band
248 = tswap32(info->_sifields._sigpoll._band);
249 tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd);
250 } else if (sig == TARGET_SIGCHLD) {
251 tinfo->_sifields._sigchld._pid
252 = tswap32(info->_sifields._sigchld._pid);
253 tinfo->_sifields._sigchld._uid
254 = tswap32(info->_sifields._sigchld._uid);
255 tinfo->_sifields._sigchld._status
256 = tswap32(info->_sifields._sigchld._status);
257 tinfo->_sifields._sigchld._utime
258 = tswapal(info->_sifields._sigchld._utime);
259 tinfo->_sifields._sigchld._stime
260 = tswapal(info->_sifields._sigchld._stime);
261 } else if (sig >= TARGET_SIGRTMIN) {
262 tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid);
263 tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid);
264 tinfo->_sifields._rt._sigval.sival_ptr
265 = tswapal(info->_sifields._rt._sigval.sival_ptr);
266 }
267 }
268
269
270 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
271 {
272 host_to_target_siginfo_noswap(tinfo, info);
273 tswap_siginfo(tinfo, tinfo);
274 }
275
276 /* XXX: we support only POSIX RT signals are used. */
277 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
278 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
279 {
280 info->si_signo = tswap32(tinfo->si_signo);
281 info->si_errno = tswap32(tinfo->si_errno);
282 info->si_code = tswap32(tinfo->si_code);
283 info->si_pid = tswap32(tinfo->_sifields._rt._pid);
284 info->si_uid = tswap32(tinfo->_sifields._rt._uid);
285 info->si_value.sival_ptr =
286 (void *)(long)tswapal(tinfo->_sifields._rt._sigval.sival_ptr);
287 }
288
289 static int fatal_signal (int sig)
290 {
291 switch (sig) {
292 case TARGET_SIGCHLD:
293 case TARGET_SIGURG:
294 case TARGET_SIGWINCH:
295 /* Ignored by default. */
296 return 0;
297 case TARGET_SIGCONT:
298 case TARGET_SIGSTOP:
299 case TARGET_SIGTSTP:
300 case TARGET_SIGTTIN:
301 case TARGET_SIGTTOU:
302 /* Job control signals. */
303 return 0;
304 default:
305 return 1;
306 }
307 }
308
309 /* returns 1 if given signal should dump core if not handled */
310 static int core_dump_signal(int sig)
311 {
312 switch (sig) {
313 case TARGET_SIGABRT:
314 case TARGET_SIGFPE:
315 case TARGET_SIGILL:
316 case TARGET_SIGQUIT:
317 case TARGET_SIGSEGV:
318 case TARGET_SIGTRAP:
319 case TARGET_SIGBUS:
320 return (1);
321 default:
322 return (0);
323 }
324 }
325
326 void signal_init(void)
327 {
328 struct sigaction act;
329 struct sigaction oact;
330 int i, j;
331 int host_sig;
332
333 /* generate signal conversion tables */
334 for(i = 1; i < _NSIG; i++) {
335 if (host_to_target_signal_table[i] == 0)
336 host_to_target_signal_table[i] = i;
337 }
338 for(i = 1; i < _NSIG; i++) {
339 j = host_to_target_signal_table[i];
340 target_to_host_signal_table[j] = i;
341 }
342
343 /* set all host signal handlers. ALL signals are blocked during
344 the handlers to serialize them. */
345 memset(sigact_table, 0, sizeof(sigact_table));
346
347 sigfillset(&act.sa_mask);
348 act.sa_flags = SA_SIGINFO;
349 act.sa_sigaction = host_signal_handler;
350 for(i = 1; i <= TARGET_NSIG; i++) {
351 host_sig = target_to_host_signal(i);
352 sigaction(host_sig, NULL, &oact);
353 if (oact.sa_sigaction == (void *)SIG_IGN) {
354 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
355 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
356 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
357 }
358 /* If there's already a handler installed then something has
359 gone horribly wrong, so don't even try to handle that case. */
360 /* Install some handlers for our own use. We need at least
361 SIGSEGV and SIGBUS, to detect exceptions. We can not just
362 trap all signals because it affects syscall interrupt
363 behavior. But do trap all default-fatal signals. */
364 if (fatal_signal (i))
365 sigaction(host_sig, &act, NULL);
366 }
367 }
368
369 /* signal queue handling */
370
371 static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
372 {
373 CPUState *cpu = ENV_GET_CPU(env);
374 TaskState *ts = cpu->opaque;
375 struct sigqueue *q = ts->first_free;
376 if (!q)
377 return NULL;
378 ts->first_free = q->next;
379 return q;
380 }
381
382 static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
383 {
384 CPUState *cpu = ENV_GET_CPU(env);
385 TaskState *ts = cpu->opaque;
386
387 q->next = ts->first_free;
388 ts->first_free = q;
389 }
390
391 /* abort execution with signal */
392 static void QEMU_NORETURN force_sig(int target_sig)
393 {
394 CPUState *cpu = thread_cpu;
395 CPUArchState *env = cpu->env_ptr;
396 TaskState *ts = (TaskState *)cpu->opaque;
397 int host_sig, core_dumped = 0;
398 struct sigaction act;
399 host_sig = target_to_host_signal(target_sig);
400 gdb_signalled(env, target_sig);
401
402 /* dump core if supported by target binary format */
403 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
404 stop_all_tasks();
405 core_dumped =
406 ((*ts->bprm->core_dump)(target_sig, env) == 0);
407 }
408 if (core_dumped) {
409 /* we already dumped the core of target process, we don't want
410 * a coredump of qemu itself */
411 struct rlimit nodump;
412 getrlimit(RLIMIT_CORE, &nodump);
413 nodump.rlim_cur=0;
414 setrlimit(RLIMIT_CORE, &nodump);
415 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
416 target_sig, strsignal(host_sig), "core dumped" );
417 }
418
419 /* The proper exit code for dying from an uncaught signal is
420 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
421 * a negative value. To get the proper exit code we need to
422 * actually die from an uncaught signal. Here the default signal
423 * handler is installed, we send ourself a signal and we wait for
424 * it to arrive. */
425 sigfillset(&act.sa_mask);
426 act.sa_handler = SIG_DFL;
427 act.sa_flags = 0;
428 sigaction(host_sig, &act, NULL);
429
430 /* For some reason raise(host_sig) doesn't send the signal when
431 * statically linked on x86-64. */
432 kill(getpid(), host_sig);
433
434 /* Make sure the signal isn't masked (just reuse the mask inside
435 of act) */
436 sigdelset(&act.sa_mask, host_sig);
437 sigsuspend(&act.sa_mask);
438
439 /* unreachable */
440 abort();
441 }
442
443 /* queue a signal so that it will be send to the virtual CPU as soon
444 as possible */
445 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
446 {
447 CPUState *cpu = ENV_GET_CPU(env);
448 TaskState *ts = cpu->opaque;
449 struct emulated_sigtable *k;
450 struct sigqueue *q, **pq;
451 abi_ulong handler;
452 int queue;
453
454 #if defined(DEBUG_SIGNAL)
455 fprintf(stderr, "queue_signal: sig=%d\n",
456 sig);
457 #endif
458 k = &ts->sigtab[sig - 1];
459 queue = gdb_queuesig ();
460 handler = sigact_table[sig - 1]._sa_handler;
461 if (!queue && handler == TARGET_SIG_DFL) {
462 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
463 kill(getpid(),SIGSTOP);
464 return 0;
465 } else
466 /* default handler : ignore some signal. The other are fatal */
467 if (sig != TARGET_SIGCHLD &&
468 sig != TARGET_SIGURG &&
469 sig != TARGET_SIGWINCH &&
470 sig != TARGET_SIGCONT) {
471 force_sig(sig);
472 } else {
473 return 0; /* indicate ignored */
474 }
475 } else if (!queue && handler == TARGET_SIG_IGN) {
476 /* ignore signal */
477 return 0;
478 } else if (!queue && handler == TARGET_SIG_ERR) {
479 force_sig(sig);
480 } else {
481 pq = &k->first;
482 if (sig < TARGET_SIGRTMIN) {
483 /* if non real time signal, we queue exactly one signal */
484 if (!k->pending)
485 q = &k->info;
486 else
487 return 0;
488 } else {
489 if (!k->pending) {
490 /* first signal */
491 q = &k->info;
492 } else {
493 q = alloc_sigqueue(env);
494 if (!q)
495 return -EAGAIN;
496 while (*pq != NULL)
497 pq = &(*pq)->next;
498 }
499 }
500 *pq = q;
501 q->info = *info;
502 q->next = NULL;
503 k->pending = 1;
504 /* signal that a new signal is pending */
505 ts->signal_pending = 1;
506 return 1; /* indicates that the signal was queued */
507 }
508 }
509
510 static void host_signal_handler(int host_signum, siginfo_t *info,
511 void *puc)
512 {
513 CPUArchState *env = thread_cpu->env_ptr;
514 int sig;
515 target_siginfo_t tinfo;
516
517 /* the CPU emulator uses some host signals to detect exceptions,
518 we forward to it some signals */
519 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
520 && info->si_code > 0) {
521 if (cpu_signal_handler(host_signum, info, puc))
522 return;
523 }
524
525 /* get target signal number */
526 sig = host_to_target_signal(host_signum);
527 if (sig < 1 || sig > TARGET_NSIG)
528 return;
529 #if defined(DEBUG_SIGNAL)
530 fprintf(stderr, "qemu: got signal %d\n", sig);
531 #endif
532 host_to_target_siginfo_noswap(&tinfo, info);
533 if (queue_signal(env, sig, &tinfo) == 1) {
534 /* interrupt the virtual CPU as soon as possible */
535 cpu_exit(thread_cpu);
536 }
537 }
538
539 /* do_sigaltstack() returns target values and errnos. */
540 /* compare linux/kernel/signal.c:do_sigaltstack() */
541 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
542 {
543 int ret;
544 struct target_sigaltstack oss;
545
546 /* XXX: test errors */
547 if(uoss_addr)
548 {
549 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
550 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
551 __put_user(sas_ss_flags(sp), &oss.ss_flags);
552 }
553
554 if(uss_addr)
555 {
556 struct target_sigaltstack *uss;
557 struct target_sigaltstack ss;
558
559 ret = -TARGET_EFAULT;
560 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)
561 || __get_user(ss.ss_sp, &uss->ss_sp)
562 || __get_user(ss.ss_size, &uss->ss_size)
563 || __get_user(ss.ss_flags, &uss->ss_flags))
564 goto out;
565 unlock_user_struct(uss, uss_addr, 0);
566
567 ret = -TARGET_EPERM;
568 if (on_sig_stack(sp))
569 goto out;
570
571 ret = -TARGET_EINVAL;
572 if (ss.ss_flags != TARGET_SS_DISABLE
573 && ss.ss_flags != TARGET_SS_ONSTACK
574 && ss.ss_flags != 0)
575 goto out;
576
577 if (ss.ss_flags == TARGET_SS_DISABLE) {
578 ss.ss_size = 0;
579 ss.ss_sp = 0;
580 } else {
581 ret = -TARGET_ENOMEM;
582 if (ss.ss_size < MINSIGSTKSZ)
583 goto out;
584 }
585
586 target_sigaltstack_used.ss_sp = ss.ss_sp;
587 target_sigaltstack_used.ss_size = ss.ss_size;
588 }
589
590 if (uoss_addr) {
591 ret = -TARGET_EFAULT;
592 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
593 goto out;
594 }
595
596 ret = 0;
597 out:
598 return ret;
599 }
600
601 /* do_sigaction() return host values and errnos */
602 int do_sigaction(int sig, const struct target_sigaction *act,
603 struct target_sigaction *oact)
604 {
605 struct target_sigaction *k;
606 struct sigaction act1;
607 int host_sig;
608 int ret = 0;
609
610 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)
611 return -EINVAL;
612 k = &sigact_table[sig - 1];
613 #if defined(DEBUG_SIGNAL)
614 fprintf(stderr, "sigaction sig=%d act=0x%p, oact=0x%p\n",
615 sig, act, oact);
616 #endif
617 if (oact) {
618 __put_user(k->_sa_handler, &oact->_sa_handler);
619 __put_user(k->sa_flags, &oact->sa_flags);
620 #if !defined(TARGET_MIPS)
621 __put_user(k->sa_restorer, &oact->sa_restorer);
622 #endif
623 /* Not swapped. */
624 oact->sa_mask = k->sa_mask;
625 }
626 if (act) {
627 /* FIXME: This is not threadsafe. */
628 __get_user(k->_sa_handler, &act->_sa_handler);
629 __get_user(k->sa_flags, &act->sa_flags);
630 #if !defined(TARGET_MIPS)
631 __get_user(k->sa_restorer, &act->sa_restorer);
632 #endif
633 /* To be swapped in target_to_host_sigset. */
634 k->sa_mask = act->sa_mask;
635
636 /* we update the host linux signal state */
637 host_sig = target_to_host_signal(sig);
638 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
639 sigfillset(&act1.sa_mask);
640 act1.sa_flags = SA_SIGINFO;
641 if (k->sa_flags & TARGET_SA_RESTART)
642 act1.sa_flags |= SA_RESTART;
643 /* NOTE: it is important to update the host kernel signal
644 ignore state to avoid getting unexpected interrupted
645 syscalls */
646 if (k->_sa_handler == TARGET_SIG_IGN) {
647 act1.sa_sigaction = (void *)SIG_IGN;
648 } else if (k->_sa_handler == TARGET_SIG_DFL) {
649 if (fatal_signal (sig))
650 act1.sa_sigaction = host_signal_handler;
651 else
652 act1.sa_sigaction = (void *)SIG_DFL;
653 } else {
654 act1.sa_sigaction = host_signal_handler;
655 }
656 ret = sigaction(host_sig, &act1, NULL);
657 }
658 }
659 return ret;
660 }
661
662 static inline int copy_siginfo_to_user(target_siginfo_t *tinfo,
663 const target_siginfo_t *info)
664 {
665 tswap_siginfo(tinfo, info);
666 return 0;
667 }
668
669 static inline int current_exec_domain_sig(int sig)
670 {
671 return /* current->exec_domain && current->exec_domain->signal_invmap
672 && sig < 32 ? current->exec_domain->signal_invmap[sig] : */ sig;
673 }
674
675 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
676
677 /* from the Linux kernel */
678
679 struct target_fpreg {
680 uint16_t significand[4];
681 uint16_t exponent;
682 };
683
684 struct target_fpxreg {
685 uint16_t significand[4];
686 uint16_t exponent;
687 uint16_t padding[3];
688 };
689
690 struct target_xmmreg {
691 abi_ulong element[4];
692 };
693
694 struct target_fpstate {
695 /* Regular FPU environment */
696 abi_ulong cw;
697 abi_ulong sw;
698 abi_ulong tag;
699 abi_ulong ipoff;
700 abi_ulong cssel;
701 abi_ulong dataoff;
702 abi_ulong datasel;
703 struct target_fpreg _st[8];
704 uint16_t status;
705 uint16_t magic; /* 0xffff = regular FPU data only */
706
707 /* FXSR FPU environment */
708 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
709 abi_ulong mxcsr;
710 abi_ulong reserved;
711 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
712 struct target_xmmreg _xmm[8];
713 abi_ulong padding[56];
714 };
715
716 #define X86_FXSR_MAGIC 0x0000
717
718 struct target_sigcontext {
719 uint16_t gs, __gsh;
720 uint16_t fs, __fsh;
721 uint16_t es, __esh;
722 uint16_t ds, __dsh;
723 abi_ulong edi;
724 abi_ulong esi;
725 abi_ulong ebp;
726 abi_ulong esp;
727 abi_ulong ebx;
728 abi_ulong edx;
729 abi_ulong ecx;
730 abi_ulong eax;
731 abi_ulong trapno;
732 abi_ulong err;
733 abi_ulong eip;
734 uint16_t cs, __csh;
735 abi_ulong eflags;
736 abi_ulong esp_at_signal;
737 uint16_t ss, __ssh;
738 abi_ulong fpstate; /* pointer */
739 abi_ulong oldmask;
740 abi_ulong cr2;
741 };
742
743 struct target_ucontext {
744 abi_ulong tuc_flags;
745 abi_ulong tuc_link;
746 target_stack_t tuc_stack;
747 struct target_sigcontext tuc_mcontext;
748 target_sigset_t tuc_sigmask; /* mask last for extensibility */
749 };
750
751 struct sigframe
752 {
753 abi_ulong pretcode;
754 int sig;
755 struct target_sigcontext sc;
756 struct target_fpstate fpstate;
757 abi_ulong extramask[TARGET_NSIG_WORDS-1];
758 char retcode[8];
759 };
760
761 struct rt_sigframe
762 {
763 abi_ulong pretcode;
764 int sig;
765 abi_ulong pinfo;
766 abi_ulong puc;
767 struct target_siginfo info;
768 struct target_ucontext uc;
769 struct target_fpstate fpstate;
770 char retcode[8];
771 };
772
773 /*
774 * Set up a signal frame.
775 */
776
777 /* XXX: save x87 state */
778 static int
779 setup_sigcontext(struct target_sigcontext *sc, struct target_fpstate *fpstate,
780 CPUX86State *env, abi_ulong mask, abi_ulong fpstate_addr)
781 {
782 CPUState *cs = CPU(x86_env_get_cpu(env));
783 int err = 0;
784 uint16_t magic;
785
786 /* already locked in setup_frame() */
787 err |= __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
788 err |= __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
789 err |= __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
790 err |= __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
791 err |= __put_user(env->regs[R_EDI], &sc->edi);
792 err |= __put_user(env->regs[R_ESI], &sc->esi);
793 err |= __put_user(env->regs[R_EBP], &sc->ebp);
794 err |= __put_user(env->regs[R_ESP], &sc->esp);
795 err |= __put_user(env->regs[R_EBX], &sc->ebx);
796 err |= __put_user(env->regs[R_EDX], &sc->edx);
797 err |= __put_user(env->regs[R_ECX], &sc->ecx);
798 err |= __put_user(env->regs[R_EAX], &sc->eax);
799 err |= __put_user(cs->exception_index, &sc->trapno);
800 err |= __put_user(env->error_code, &sc->err);
801 err |= __put_user(env->eip, &sc->eip);
802 err |= __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
803 err |= __put_user(env->eflags, &sc->eflags);
804 err |= __put_user(env->regs[R_ESP], &sc->esp_at_signal);
805 err |= __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
806
807 cpu_x86_fsave(env, fpstate_addr, 1);
808 fpstate->status = fpstate->sw;
809 magic = 0xffff;
810 err |= __put_user(magic, &fpstate->magic);
811 err |= __put_user(fpstate_addr, &sc->fpstate);
812
813 /* non-iBCS2 extensions.. */
814 err |= __put_user(mask, &sc->oldmask);
815 err |= __put_user(env->cr[2], &sc->cr2);
816 return err;
817 }
818
819 /*
820 * Determine which stack to use..
821 */
822
823 static inline abi_ulong
824 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
825 {
826 unsigned long esp;
827
828 /* Default to using normal stack */
829 esp = env->regs[R_ESP];
830 /* This is the X/Open sanctioned signal stack switching. */
831 if (ka->sa_flags & TARGET_SA_ONSTACK) {
832 if (sas_ss_flags(esp) == 0)
833 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
834 }
835
836 /* This is the legacy signal stack switching. */
837 else
838 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
839 !(ka->sa_flags & TARGET_SA_RESTORER) &&
840 ka->sa_restorer) {
841 esp = (unsigned long) ka->sa_restorer;
842 }
843 return (esp - frame_size) & -8ul;
844 }
845
846 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
847 static void setup_frame(int sig, struct target_sigaction *ka,
848 target_sigset_t *set, CPUX86State *env)
849 {
850 abi_ulong frame_addr;
851 struct sigframe *frame;
852 int i, err = 0;
853
854 frame_addr = get_sigframe(ka, env, sizeof(*frame));
855
856 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
857 goto give_sigsegv;
858
859 err |= __put_user(current_exec_domain_sig(sig),
860 &frame->sig);
861 if (err)
862 goto give_sigsegv;
863
864 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
865 frame_addr + offsetof(struct sigframe, fpstate));
866 if (err)
867 goto give_sigsegv;
868
869 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
870 if (__put_user(set->sig[i], &frame->extramask[i - 1]))
871 goto give_sigsegv;
872 }
873
874 /* Set up to return from userspace. If provided, use a stub
875 already in userspace. */
876 if (ka->sa_flags & TARGET_SA_RESTORER) {
877 err |= __put_user(ka->sa_restorer, &frame->pretcode);
878 } else {
879 uint16_t val16;
880 abi_ulong retcode_addr;
881 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
882 err |= __put_user(retcode_addr, &frame->pretcode);
883 /* This is popl %eax ; movl $,%eax ; int $0x80 */
884 val16 = 0xb858;
885 err |= __put_user(val16, (uint16_t *)(frame->retcode+0));
886 err |= __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
887 val16 = 0x80cd;
888 err |= __put_user(val16, (uint16_t *)(frame->retcode+6));
889 }
890
891 if (err)
892 goto give_sigsegv;
893
894 /* Set up registers for signal handler */
895 env->regs[R_ESP] = frame_addr;
896 env->eip = ka->_sa_handler;
897
898 cpu_x86_load_seg(env, R_DS, __USER_DS);
899 cpu_x86_load_seg(env, R_ES, __USER_DS);
900 cpu_x86_load_seg(env, R_SS, __USER_DS);
901 cpu_x86_load_seg(env, R_CS, __USER_CS);
902 env->eflags &= ~TF_MASK;
903
904 unlock_user_struct(frame, frame_addr, 1);
905
906 return;
907
908 give_sigsegv:
909 unlock_user_struct(frame, frame_addr, 1);
910 if (sig == TARGET_SIGSEGV)
911 ka->_sa_handler = TARGET_SIG_DFL;
912 force_sig(TARGET_SIGSEGV /* , current */);
913 }
914
915 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
916 static void setup_rt_frame(int sig, struct target_sigaction *ka,
917 target_siginfo_t *info,
918 target_sigset_t *set, CPUX86State *env)
919 {
920 abi_ulong frame_addr, addr;
921 struct rt_sigframe *frame;
922 int i, err = 0;
923
924 frame_addr = get_sigframe(ka, env, sizeof(*frame));
925
926 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
927 goto give_sigsegv;
928
929 err |= __put_user(current_exec_domain_sig(sig),
930 &frame->sig);
931 addr = frame_addr + offsetof(struct rt_sigframe, info);
932 err |= __put_user(addr, &frame->pinfo);
933 addr = frame_addr + offsetof(struct rt_sigframe, uc);
934 err |= __put_user(addr, &frame->puc);
935 err |= copy_siginfo_to_user(&frame->info, info);
936 if (err)
937 goto give_sigsegv;
938
939 /* Create the ucontext. */
940 err |= __put_user(0, &frame->uc.tuc_flags);
941 err |= __put_user(0, &frame->uc.tuc_link);
942 err |= __put_user(target_sigaltstack_used.ss_sp,
943 &frame->uc.tuc_stack.ss_sp);
944 err |= __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
945 &frame->uc.tuc_stack.ss_flags);
946 err |= __put_user(target_sigaltstack_used.ss_size,
947 &frame->uc.tuc_stack.ss_size);
948 err |= setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate,
949 env, set->sig[0],
950 frame_addr + offsetof(struct rt_sigframe, fpstate));
951 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
952 if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]))
953 goto give_sigsegv;
954 }
955
956 /* Set up to return from userspace. If provided, use a stub
957 already in userspace. */
958 if (ka->sa_flags & TARGET_SA_RESTORER) {
959 err |= __put_user(ka->sa_restorer, &frame->pretcode);
960 } else {
961 uint16_t val16;
962 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
963 err |= __put_user(addr, &frame->pretcode);
964 /* This is movl $,%eax ; int $0x80 */
965 err |= __put_user(0xb8, (char *)(frame->retcode+0));
966 err |= __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
967 val16 = 0x80cd;
968 err |= __put_user(val16, (uint16_t *)(frame->retcode+5));
969 }
970
971 if (err)
972 goto give_sigsegv;
973
974 /* Set up registers for signal handler */
975 env->regs[R_ESP] = frame_addr;
976 env->eip = ka->_sa_handler;
977
978 cpu_x86_load_seg(env, R_DS, __USER_DS);
979 cpu_x86_load_seg(env, R_ES, __USER_DS);
980 cpu_x86_load_seg(env, R_SS, __USER_DS);
981 cpu_x86_load_seg(env, R_CS, __USER_CS);
982 env->eflags &= ~TF_MASK;
983
984 unlock_user_struct(frame, frame_addr, 1);
985
986 return;
987
988 give_sigsegv:
989 unlock_user_struct(frame, frame_addr, 1);
990 if (sig == TARGET_SIGSEGV)
991 ka->_sa_handler = TARGET_SIG_DFL;
992 force_sig(TARGET_SIGSEGV /* , current */);
993 }
994
995 static int
996 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc, int *peax)
997 {
998 unsigned int err = 0;
999 abi_ulong fpstate_addr;
1000 unsigned int tmpflags;
1001
1002 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1003 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1004 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1005 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1006
1007 env->regs[R_EDI] = tswapl(sc->edi);
1008 env->regs[R_ESI] = tswapl(sc->esi);
1009 env->regs[R_EBP] = tswapl(sc->ebp);
1010 env->regs[R_ESP] = tswapl(sc->esp);
1011 env->regs[R_EBX] = tswapl(sc->ebx);
1012 env->regs[R_EDX] = tswapl(sc->edx);
1013 env->regs[R_ECX] = tswapl(sc->ecx);
1014 env->eip = tswapl(sc->eip);
1015
1016 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1017 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1018
1019 tmpflags = tswapl(sc->eflags);
1020 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1021 // regs->orig_eax = -1; /* disable syscall checks */
1022
1023 fpstate_addr = tswapl(sc->fpstate);
1024 if (fpstate_addr != 0) {
1025 if (!access_ok(VERIFY_READ, fpstate_addr,
1026 sizeof(struct target_fpstate)))
1027 goto badframe;
1028 cpu_x86_frstor(env, fpstate_addr, 1);
1029 }
1030
1031 *peax = tswapl(sc->eax);
1032 return err;
1033 badframe:
1034 return 1;
1035 }
1036
1037 long do_sigreturn(CPUX86State *env)
1038 {
1039 struct sigframe *frame;
1040 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1041 target_sigset_t target_set;
1042 sigset_t set;
1043 int eax, i;
1044
1045 #if defined(DEBUG_SIGNAL)
1046 fprintf(stderr, "do_sigreturn\n");
1047 #endif
1048 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1049 goto badframe;
1050 /* set blocked signals */
1051 if (__get_user(target_set.sig[0], &frame->sc.oldmask))
1052 goto badframe;
1053 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1054 if (__get_user(target_set.sig[i], &frame->extramask[i - 1]))
1055 goto badframe;
1056 }
1057
1058 target_to_host_sigset_internal(&set, &target_set);
1059 sigprocmask(SIG_SETMASK, &set, NULL);
1060
1061 /* restore registers */
1062 if (restore_sigcontext(env, &frame->sc, &eax))
1063 goto badframe;
1064 unlock_user_struct(frame, frame_addr, 0);
1065 return eax;
1066
1067 badframe:
1068 unlock_user_struct(frame, frame_addr, 0);
1069 force_sig(TARGET_SIGSEGV);
1070 return 0;
1071 }
1072
1073 long do_rt_sigreturn(CPUX86State *env)
1074 {
1075 abi_ulong frame_addr;
1076 struct rt_sigframe *frame;
1077 sigset_t set;
1078 int eax;
1079
1080 frame_addr = env->regs[R_ESP] - 4;
1081 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1082 goto badframe;
1083 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1084 sigprocmask(SIG_SETMASK, &set, NULL);
1085
1086 if (restore_sigcontext(env, &frame->uc.tuc_mcontext, &eax))
1087 goto badframe;
1088
1089 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1090 get_sp_from_cpustate(env)) == -EFAULT)
1091 goto badframe;
1092
1093 unlock_user_struct(frame, frame_addr, 0);
1094 return eax;
1095
1096 badframe:
1097 unlock_user_struct(frame, frame_addr, 0);
1098 force_sig(TARGET_SIGSEGV);
1099 return 0;
1100 }
1101
1102 #elif defined(TARGET_AARCH64)
1103
1104 struct target_sigcontext {
1105 uint64_t fault_address;
1106 /* AArch64 registers */
1107 uint64_t regs[31];
1108 uint64_t sp;
1109 uint64_t pc;
1110 uint64_t pstate;
1111 /* 4K reserved for FP/SIMD state and future expansion */
1112 char __reserved[4096] __attribute__((__aligned__(16)));
1113 };
1114
1115 struct target_ucontext {
1116 abi_ulong tuc_flags;
1117 abi_ulong tuc_link;
1118 target_stack_t tuc_stack;
1119 target_sigset_t tuc_sigmask;
1120 /* glibc uses a 1024-bit sigset_t */
1121 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1122 /* last for future expansion */
1123 struct target_sigcontext tuc_mcontext;
1124 };
1125
1126 /*
1127 * Header to be used at the beginning of structures extending the user
1128 * context. Such structures must be placed after the rt_sigframe on the stack
1129 * and be 16-byte aligned. The last structure must be a dummy one with the
1130 * magic and size set to 0.
1131 */
1132 struct target_aarch64_ctx {
1133 uint32_t magic;
1134 uint32_t size;
1135 };
1136
1137 #define TARGET_FPSIMD_MAGIC 0x46508001
1138
1139 struct target_fpsimd_context {
1140 struct target_aarch64_ctx head;
1141 uint32_t fpsr;
1142 uint32_t fpcr;
1143 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1144 };
1145
1146 /*
1147 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1148 * user space as it will change with the addition of new context. User space
1149 * should check the magic/size information.
1150 */
1151 struct target_aux_context {
1152 struct target_fpsimd_context fpsimd;
1153 /* additional context to be added before "end" */
1154 struct target_aarch64_ctx end;
1155 };
1156
1157 struct target_rt_sigframe {
1158 struct target_siginfo info;
1159 struct target_ucontext uc;
1160 uint64_t fp;
1161 uint64_t lr;
1162 uint32_t tramp[2];
1163 };
1164
1165 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1166 CPUARMState *env, target_sigset_t *set)
1167 {
1168 int i;
1169 struct target_aux_context *aux =
1170 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1171
1172 /* set up the stack frame for unwinding */
1173 __put_user(env->xregs[29], &sf->fp);
1174 __put_user(env->xregs[30], &sf->lr);
1175
1176 for (i = 0; i < 31; i++) {
1177 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1178 }
1179 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1180 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1181 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1182
1183 __put_user(/*current->thread.fault_address*/ 0,
1184 &sf->uc.tuc_mcontext.fault_address);
1185
1186 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1187 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1188 }
1189
1190 for (i = 0; i < 32; i++) {
1191 #ifdef TARGET_WORDS_BIGENDIAN
1192 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1193 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1194 #else
1195 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1196 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1197 #endif
1198 }
1199 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1200 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1201 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1202 __put_user(sizeof(struct target_fpsimd_context),
1203 &aux->fpsimd.head.size);
1204
1205 /* set the "end" magic */
1206 __put_user(0, &aux->end.magic);
1207 __put_user(0, &aux->end.size);
1208
1209 return 0;
1210 }
1211
1212 static int target_restore_sigframe(CPUARMState *env,
1213 struct target_rt_sigframe *sf)
1214 {
1215 sigset_t set;
1216 int i;
1217 struct target_aux_context *aux =
1218 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1219 uint32_t magic, size, fpsr, fpcr;
1220 uint64_t pstate;
1221
1222 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1223 sigprocmask(SIG_SETMASK, &set, NULL);
1224
1225 for (i = 0; i < 31; i++) {
1226 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1227 }
1228
1229 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1230 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1231 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1232 pstate_write(env, pstate);
1233
1234 __get_user(magic, &aux->fpsimd.head.magic);
1235 __get_user(size, &aux->fpsimd.head.size);
1236
1237 if (magic != TARGET_FPSIMD_MAGIC
1238 || size != sizeof(struct target_fpsimd_context)) {
1239 return 1;
1240 }
1241
1242 for (i = 0; i < 32; i++) {
1243 #ifdef TARGET_WORDS_BIGENDIAN
1244 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1245 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1246 #else
1247 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1248 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1249 #endif
1250 }
1251 __get_user(fpsr, &aux->fpsimd.fpsr);
1252 vfp_set_fpsr(env, fpsr);
1253 __get_user(fpcr, &aux->fpsimd.fpcr);
1254 vfp_set_fpcr(env, fpcr);
1255
1256 return 0;
1257 }
1258
1259 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1260 {
1261 abi_ulong sp;
1262
1263 sp = env->xregs[31];
1264
1265 /*
1266 * This is the X/Open sanctioned signal stack switching.
1267 */
1268 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
1269 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1270 }
1271
1272 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1273
1274 return sp;
1275 }
1276
1277 static void target_setup_frame(int usig, struct target_sigaction *ka,
1278 target_siginfo_t *info, target_sigset_t *set,
1279 CPUARMState *env)
1280 {
1281 struct target_rt_sigframe *frame;
1282 abi_ulong frame_addr, return_addr;
1283
1284 frame_addr = get_sigframe(ka, env);
1285 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1286 goto give_sigsegv;
1287 }
1288
1289 __put_user(0, &frame->uc.tuc_flags);
1290 __put_user(0, &frame->uc.tuc_link);
1291
1292 __put_user(target_sigaltstack_used.ss_sp,
1293 &frame->uc.tuc_stack.ss_sp);
1294 __put_user(sas_ss_flags(env->xregs[31]),
1295 &frame->uc.tuc_stack.ss_flags);
1296 __put_user(target_sigaltstack_used.ss_size,
1297 &frame->uc.tuc_stack.ss_size);
1298 target_setup_sigframe(frame, env, set);
1299 if (ka->sa_flags & TARGET_SA_RESTORER) {
1300 return_addr = ka->sa_restorer;
1301 } else {
1302 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1303 __put_user(0xd2801168, &frame->tramp[0]);
1304 __put_user(0xd4000001, &frame->tramp[1]);
1305 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1306 }
1307 env->xregs[0] = usig;
1308 env->xregs[31] = frame_addr;
1309 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1310 env->pc = ka->_sa_handler;
1311 env->xregs[30] = return_addr;
1312 if (info) {
1313 if (copy_siginfo_to_user(&frame->info, info)) {
1314 goto give_sigsegv;
1315 }
1316 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1317 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1318 }
1319
1320 unlock_user_struct(frame, frame_addr, 1);
1321 return;
1322
1323 give_sigsegv:
1324 unlock_user_struct(frame, frame_addr, 1);
1325 force_sig(TARGET_SIGSEGV);
1326 }
1327
1328 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1329 target_siginfo_t *info, target_sigset_t *set,
1330 CPUARMState *env)
1331 {
1332 target_setup_frame(sig, ka, info, set, env);
1333 }
1334
1335 static void setup_frame(int sig, struct target_sigaction *ka,
1336 target_sigset_t *set, CPUARMState *env)
1337 {
1338 target_setup_frame(sig, ka, 0, set, env);
1339 }
1340
1341 long do_rt_sigreturn(CPUARMState *env)
1342 {
1343 struct target_rt_sigframe *frame = NULL;
1344 abi_ulong frame_addr = env->xregs[31];
1345
1346 if (frame_addr & 15) {
1347 goto badframe;
1348 }
1349
1350 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1351 goto badframe;
1352 }
1353
1354 if (target_restore_sigframe(env, frame)) {
1355 goto badframe;
1356 }
1357
1358 if (do_sigaltstack(frame_addr +
1359 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1360 0, get_sp_from_cpustate(env)) == -EFAULT) {
1361 goto badframe;
1362 }
1363
1364 unlock_user_struct(frame, frame_addr, 0);
1365 return env->xregs[0];
1366
1367 badframe:
1368 unlock_user_struct(frame, frame_addr, 0);
1369 force_sig(TARGET_SIGSEGV);
1370 return 0;
1371 }
1372
1373 long do_sigreturn(CPUARMState *env)
1374 {
1375 return do_rt_sigreturn(env);
1376 }
1377
1378 #elif defined(TARGET_ARM)
1379
1380 struct target_sigcontext {
1381 abi_ulong trap_no;
1382 abi_ulong error_code;
1383 abi_ulong oldmask;
1384 abi_ulong arm_r0;
1385 abi_ulong arm_r1;
1386 abi_ulong arm_r2;
1387 abi_ulong arm_r3;
1388 abi_ulong arm_r4;
1389 abi_ulong arm_r5;
1390 abi_ulong arm_r6;
1391 abi_ulong arm_r7;
1392 abi_ulong arm_r8;
1393 abi_ulong arm_r9;
1394 abi_ulong arm_r10;
1395 abi_ulong arm_fp;
1396 abi_ulong arm_ip;
1397 abi_ulong arm_sp;
1398 abi_ulong arm_lr;
1399 abi_ulong arm_pc;
1400 abi_ulong arm_cpsr;
1401 abi_ulong fault_address;
1402 };
1403
1404 struct target_ucontext_v1 {
1405 abi_ulong tuc_flags;
1406 abi_ulong tuc_link;
1407 target_stack_t tuc_stack;
1408 struct target_sigcontext tuc_mcontext;
1409 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1410 };
1411
1412 struct target_ucontext_v2 {
1413 abi_ulong tuc_flags;
1414 abi_ulong tuc_link;
1415 target_stack_t tuc_stack;
1416 struct target_sigcontext tuc_mcontext;
1417 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1418 char __unused[128 - sizeof(target_sigset_t)];
1419 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1420 };
1421
1422 struct target_user_vfp {
1423 uint64_t fpregs[32];
1424 abi_ulong fpscr;
1425 };
1426
1427 struct target_user_vfp_exc {
1428 abi_ulong fpexc;
1429 abi_ulong fpinst;
1430 abi_ulong fpinst2;
1431 };
1432
1433 struct target_vfp_sigframe {
1434 abi_ulong magic;
1435 abi_ulong size;
1436 struct target_user_vfp ufp;
1437 struct target_user_vfp_exc ufp_exc;
1438 } __attribute__((__aligned__(8)));
1439
1440 struct target_iwmmxt_sigframe {
1441 abi_ulong magic;
1442 abi_ulong size;
1443 uint64_t regs[16];
1444 /* Note that not all the coprocessor control registers are stored here */
1445 uint32_t wcssf;
1446 uint32_t wcasf;
1447 uint32_t wcgr0;
1448 uint32_t wcgr1;
1449 uint32_t wcgr2;
1450 uint32_t wcgr3;
1451 } __attribute__((__aligned__(8)));
1452
1453 #define TARGET_VFP_MAGIC 0x56465001
1454 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1455
1456 struct sigframe_v1
1457 {
1458 struct target_sigcontext sc;
1459 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1460 abi_ulong retcode;
1461 };
1462
1463 struct sigframe_v2
1464 {
1465 struct target_ucontext_v2 uc;
1466 abi_ulong retcode;
1467 };
1468
1469 struct rt_sigframe_v1
1470 {
1471 abi_ulong pinfo;
1472 abi_ulong puc;
1473 struct target_siginfo info;
1474 struct target_ucontext_v1 uc;
1475 abi_ulong retcode;
1476 };
1477
1478 struct rt_sigframe_v2
1479 {
1480 struct target_siginfo info;
1481 struct target_ucontext_v2 uc;
1482 abi_ulong retcode;
1483 };
1484
1485 #define TARGET_CONFIG_CPU_32 1
1486
1487 /*
1488 * For ARM syscalls, we encode the syscall number into the instruction.
1489 */
1490 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1491 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1492
1493 /*
1494 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1495 * need two 16-bit instructions.
1496 */
1497 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1498 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1499
1500 static const abi_ulong retcodes[4] = {
1501 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1502 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1503 };
1504
1505
1506 #define __get_user_error(x,p,e) __get_user(x, p)
1507
1508 static inline int valid_user_regs(CPUARMState *regs)
1509 {
1510 return 1;
1511 }
1512
1513 static void
1514 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1515 CPUARMState *env, abi_ulong mask)
1516 {
1517 __put_user(env->regs[0], &sc->arm_r0);
1518 __put_user(env->regs[1], &sc->arm_r1);
1519 __put_user(env->regs[2], &sc->arm_r2);
1520 __put_user(env->regs[3], &sc->arm_r3);
1521 __put_user(env->regs[4], &sc->arm_r4);
1522 __put_user(env->regs[5], &sc->arm_r5);
1523 __put_user(env->regs[6], &sc->arm_r6);
1524 __put_user(env->regs[7], &sc->arm_r7);
1525 __put_user(env->regs[8], &sc->arm_r8);
1526 __put_user(env->regs[9], &sc->arm_r9);
1527 __put_user(env->regs[10], &sc->arm_r10);
1528 __put_user(env->regs[11], &sc->arm_fp);
1529 __put_user(env->regs[12], &sc->arm_ip);
1530 __put_user(env->regs[13], &sc->arm_sp);
1531 __put_user(env->regs[14], &sc->arm_lr);
1532 __put_user(env->regs[15], &sc->arm_pc);
1533 #ifdef TARGET_CONFIG_CPU_32
1534 __put_user(cpsr_read(env), &sc->arm_cpsr);
1535 #endif
1536
1537 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1538 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1539 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1540 __put_user(mask, &sc->oldmask);
1541 }
1542
1543 static inline abi_ulong
1544 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1545 {
1546 unsigned long sp = regs->regs[13];
1547
1548 /*
1549 * This is the X/Open sanctioned signal stack switching.
1550 */
1551 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp))
1552 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1553 /*
1554 * ATPCS B01 mandates 8-byte alignment
1555 */
1556 return (sp - framesize) & ~7;
1557 }
1558
1559 static int
1560 setup_return(CPUARMState *env, struct target_sigaction *ka,
1561 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1562 {
1563 abi_ulong handler = ka->_sa_handler;
1564 abi_ulong retcode;
1565 int thumb = handler & 1;
1566 uint32_t cpsr = cpsr_read(env);
1567
1568 cpsr &= ~CPSR_IT;
1569 if (thumb) {
1570 cpsr |= CPSR_T;
1571 } else {
1572 cpsr &= ~CPSR_T;
1573 }
1574
1575 if (ka->sa_flags & TARGET_SA_RESTORER) {
1576 retcode = ka->sa_restorer;
1577 } else {
1578 unsigned int idx = thumb;
1579
1580 if (ka->sa_flags & TARGET_SA_SIGINFO)
1581 idx += 2;
1582
1583 if (__put_user(retcodes[idx], rc))
1584 return 1;
1585
1586 retcode = rc_addr + thumb;
1587 }
1588
1589 env->regs[0] = usig;
1590 env->regs[13] = frame_addr;
1591 env->regs[14] = retcode;
1592 env->regs[15] = handler & (thumb ? ~1 : ~3);
1593 cpsr_write(env, cpsr, 0xffffffff);
1594
1595 return 0;
1596 }
1597
1598 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1599 {
1600 int i;
1601 struct target_vfp_sigframe *vfpframe;
1602 vfpframe = (struct target_vfp_sigframe *)regspace;
1603 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1604 __put_user(sizeof(*vfpframe), &vfpframe->size);
1605 for (i = 0; i < 32; i++) {
1606 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1607 }
1608 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1609 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1610 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1611 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1612 return (abi_ulong*)(vfpframe+1);
1613 }
1614
1615 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1616 CPUARMState *env)
1617 {
1618 int i;
1619 struct target_iwmmxt_sigframe *iwmmxtframe;
1620 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1621 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1622 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1623 for (i = 0; i < 16; i++) {
1624 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1625 }
1626 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1627 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1628 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1629 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1630 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1631 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1632 return (abi_ulong*)(iwmmxtframe+1);
1633 }
1634
1635 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1636 target_sigset_t *set, CPUARMState *env)
1637 {
1638 struct target_sigaltstack stack;
1639 int i;
1640 abi_ulong *regspace;
1641
1642 /* Clear all the bits of the ucontext we don't use. */
1643 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1644
1645 memset(&stack, 0, sizeof(stack));
1646 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1647 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1648 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1649 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1650
1651 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1652 /* Save coprocessor signal frame. */
1653 regspace = uc->tuc_regspace;
1654 if (arm_feature(env, ARM_FEATURE_VFP)) {
1655 regspace = setup_sigframe_v2_vfp(regspace, env);
1656 }
1657 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1658 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1659 }
1660
1661 /* Write terminating magic word */
1662 __put_user(0, regspace);
1663
1664 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1665 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1666 }
1667 }
1668
1669 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1670 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1671 target_sigset_t *set, CPUARMState *regs)
1672 {
1673 struct sigframe_v1 *frame;
1674 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1675 int i;
1676
1677 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1678 return;
1679
1680 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1681
1682 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1683 if (__put_user(set->sig[i], &frame->extramask[i - 1]))
1684 goto end;
1685 }
1686
1687 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1688 frame_addr + offsetof(struct sigframe_v1, retcode));
1689
1690 end:
1691 unlock_user_struct(frame, frame_addr, 1);
1692 }
1693
1694 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1695 target_sigset_t *set, CPUARMState *regs)
1696 {
1697 struct sigframe_v2 *frame;
1698 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1699
1700 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1701 return;
1702
1703 setup_sigframe_v2(&frame->uc, set, regs);
1704
1705 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1706 frame_addr + offsetof(struct sigframe_v2, retcode));
1707
1708 unlock_user_struct(frame, frame_addr, 1);
1709 }
1710
1711 static void setup_frame(int usig, struct target_sigaction *ka,
1712 target_sigset_t *set, CPUARMState *regs)
1713 {
1714 if (get_osversion() >= 0x020612) {
1715 setup_frame_v2(usig, ka, set, regs);
1716 } else {
1717 setup_frame_v1(usig, ka, set, regs);
1718 }
1719 }
1720
1721 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1722 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1723 target_siginfo_t *info,
1724 target_sigset_t *set, CPUARMState *env)
1725 {
1726 struct rt_sigframe_v1 *frame;
1727 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1728 struct target_sigaltstack stack;
1729 int i;
1730 abi_ulong info_addr, uc_addr;
1731
1732 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1733 return /* 1 */;
1734
1735 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1736 __put_user(info_addr, &frame->pinfo);
1737 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1738 __put_user(uc_addr, &frame->puc);
1739 copy_siginfo_to_user(&frame->info, info);
1740
1741 /* Clear all the bits of the ucontext we don't use. */
1742 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1743
1744 memset(&stack, 0, sizeof(stack));
1745 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1746 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1747 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1748 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1749
1750 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1751 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1752 if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]))
1753 goto end;
1754 }
1755
1756 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1757 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1758
1759 env->regs[1] = info_addr;
1760 env->regs[2] = uc_addr;
1761
1762 end:
1763 unlock_user_struct(frame, frame_addr, 1);
1764 }
1765
1766 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1767 target_siginfo_t *info,
1768 target_sigset_t *set, CPUARMState *env)
1769 {
1770 struct rt_sigframe_v2 *frame;
1771 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1772 abi_ulong info_addr, uc_addr;
1773
1774 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1775 return /* 1 */;
1776
1777 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1778 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1779 copy_siginfo_to_user(&frame->info, info);
1780
1781 setup_sigframe_v2(&frame->uc, set, env);
1782
1783 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1784 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1785
1786 env->regs[1] = info_addr;
1787 env->regs[2] = uc_addr;
1788
1789 unlock_user_struct(frame, frame_addr, 1);
1790 }
1791
1792 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1793 target_siginfo_t *info,
1794 target_sigset_t *set, CPUARMState *env)
1795 {
1796 if (get_osversion() >= 0x020612) {
1797 setup_rt_frame_v2(usig, ka, info, set, env);
1798 } else {
1799 setup_rt_frame_v1(usig, ka, info, set, env);
1800 }
1801 }
1802
1803 static int
1804 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1805 {
1806 int err = 0;
1807 uint32_t cpsr;
1808
1809 __get_user_error(env->regs[0], &sc->arm_r0, err);
1810 __get_user_error(env->regs[1], &sc->arm_r1, err);
1811 __get_user_error(env->regs[2], &sc->arm_r2, err);
1812 __get_user_error(env->regs[3], &sc->arm_r3, err);
1813 __get_user_error(env->regs[4], &sc->arm_r4, err);
1814 __get_user_error(env->regs[5], &sc->arm_r5, err);
1815 __get_user_error(env->regs[6], &sc->arm_r6, err);
1816 __get_user_error(env->regs[7], &sc->arm_r7, err);
1817 __get_user_error(env->regs[8], &sc->arm_r8, err);
1818 __get_user_error(env->regs[9], &sc->arm_r9, err);
1819 __get_user_error(env->regs[10], &sc->arm_r10, err);
1820 __get_user_error(env->regs[11], &sc->arm_fp, err);
1821 __get_user_error(env->regs[12], &sc->arm_ip, err);
1822 __get_user_error(env->regs[13], &sc->arm_sp, err);
1823 __get_user_error(env->regs[14], &sc->arm_lr, err);
1824 __get_user_error(env->regs[15], &sc->arm_pc, err);
1825 #ifdef TARGET_CONFIG_CPU_32
1826 __get_user_error(cpsr, &sc->arm_cpsr, err);
1827 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC);
1828 #endif
1829
1830 err |= !valid_user_regs(env);
1831
1832 return err;
1833 }
1834
1835 static long do_sigreturn_v1(CPUARMState *env)
1836 {
1837 abi_ulong frame_addr;
1838 struct sigframe_v1 *frame = NULL;
1839 target_sigset_t set;
1840 sigset_t host_set;
1841 int i;
1842
1843 /*
1844 * Since we stacked the signal on a 64-bit boundary,
1845 * then 'sp' should be word aligned here. If it's
1846 * not, then the user is trying to mess with us.
1847 */
1848 frame_addr = env->regs[13];
1849 if (frame_addr & 7) {
1850 goto badframe;
1851 }
1852
1853 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1854 goto badframe;
1855
1856 if (__get_user(set.sig[0], &frame->sc.oldmask))
1857 goto badframe;
1858 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1859 if (__get_user(set.sig[i], &frame->extramask[i - 1]))
1860 goto badframe;
1861 }
1862
1863 target_to_host_sigset_internal(&host_set, &set);
1864 sigprocmask(SIG_SETMASK, &host_set, NULL);
1865
1866 if (restore_sigcontext(env, &frame->sc))
1867 goto badframe;
1868
1869 #if 0
1870 /* Send SIGTRAP if we're single-stepping */
1871 if (ptrace_cancel_bpt(current))
1872 send_sig(SIGTRAP, current, 1);
1873 #endif
1874 unlock_user_struct(frame, frame_addr, 0);
1875 return env->regs[0];
1876
1877 badframe:
1878 unlock_user_struct(frame, frame_addr, 0);
1879 force_sig(TARGET_SIGSEGV /* , current */);
1880 return 0;
1881 }
1882
1883 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1884 {
1885 int i;
1886 abi_ulong magic, sz;
1887 uint32_t fpscr, fpexc;
1888 struct target_vfp_sigframe *vfpframe;
1889 vfpframe = (struct target_vfp_sigframe *)regspace;
1890
1891 __get_user(magic, &vfpframe->magic);
1892 __get_user(sz, &vfpframe->size);
1893 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1894 return 0;
1895 }
1896 for (i = 0; i < 32; i++) {
1897 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1898 }
1899 __get_user(fpscr, &vfpframe->ufp.fpscr);
1900 vfp_set_fpscr(env, fpscr);
1901 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1902 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1903 * and the exception flag is cleared
1904 */
1905 fpexc |= (1 << 30);
1906 fpexc &= ~((1 << 31) | (1 << 28));
1907 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
1908 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1909 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1910 return (abi_ulong*)(vfpframe + 1);
1911 }
1912
1913 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
1914 abi_ulong *regspace)
1915 {
1916 int i;
1917 abi_ulong magic, sz;
1918 struct target_iwmmxt_sigframe *iwmmxtframe;
1919 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1920
1921 __get_user(magic, &iwmmxtframe->magic);
1922 __get_user(sz, &iwmmxtframe->size);
1923 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
1924 return 0;
1925 }
1926 for (i = 0; i < 16; i++) {
1927 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1928 }
1929 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1930 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1931 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1932 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1933 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1934 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1935 return (abi_ulong*)(iwmmxtframe + 1);
1936 }
1937
1938 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
1939 struct target_ucontext_v2 *uc)
1940 {
1941 sigset_t host_set;
1942 abi_ulong *regspace;
1943
1944 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
1945 sigprocmask(SIG_SETMASK, &host_set, NULL);
1946
1947 if (restore_sigcontext(env, &uc->tuc_mcontext))
1948 return 1;
1949
1950 /* Restore coprocessor signal frame */
1951 regspace = uc->tuc_regspace;
1952 if (arm_feature(env, ARM_FEATURE_VFP)) {
1953 regspace = restore_sigframe_v2_vfp(env, regspace);
1954 if (!regspace) {
1955 return 1;
1956 }
1957 }
1958 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1959 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
1960 if (!regspace) {
1961 return 1;
1962 }
1963 }
1964
1965 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
1966 return 1;
1967
1968 #if 0
1969 /* Send SIGTRAP if we're single-stepping */
1970 if (ptrace_cancel_bpt(current))
1971 send_sig(SIGTRAP, current, 1);
1972 #endif
1973
1974 return 0;
1975 }
1976
1977 static long do_sigreturn_v2(CPUARMState *env)
1978 {
1979 abi_ulong frame_addr;
1980 struct sigframe_v2 *frame = NULL;
1981
1982 /*
1983 * Since we stacked the signal on a 64-bit boundary,
1984 * then 'sp' should be word aligned here. If it's
1985 * not, then the user is trying to mess with us.
1986 */
1987 frame_addr = env->regs[13];
1988 if (frame_addr & 7) {
1989 goto badframe;
1990 }
1991
1992 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1993 goto badframe;
1994
1995 if (do_sigframe_return_v2(env, frame_addr, &frame->uc))
1996 goto badframe;
1997
1998 unlock_user_struct(frame, frame_addr, 0);
1999 return env->regs[0];
2000
2001 badframe:
2002 unlock_user_struct(frame, frame_addr, 0);
2003 force_sig(TARGET_SIGSEGV /* , current */);
2004 return 0;
2005 }
2006
2007 long do_sigreturn(CPUARMState *env)
2008 {
2009 if (get_osversion() >= 0x020612) {
2010 return do_sigreturn_v2(env);
2011 } else {
2012 return do_sigreturn_v1(env);
2013 }
2014 }
2015
2016 static long do_rt_sigreturn_v1(CPUARMState *env)
2017 {
2018 abi_ulong frame_addr;
2019 struct rt_sigframe_v1 *frame = NULL;
2020 sigset_t host_set;
2021
2022 /*
2023 * Since we stacked the signal on a 64-bit boundary,
2024 * then 'sp' should be word aligned here. If it's
2025 * not, then the user is trying to mess with us.
2026 */
2027 frame_addr = env->regs[13];
2028 if (frame_addr & 7) {
2029 goto badframe;
2030 }
2031
2032 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2033 goto badframe;
2034
2035 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2036 sigprocmask(SIG_SETMASK, &host_set, NULL);
2037
2038 if (restore_sigcontext(env, &frame->uc.tuc_mcontext))
2039 goto badframe;
2040
2041 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2042 goto badframe;
2043
2044 #if 0
2045 /* Send SIGTRAP if we're single-stepping */
2046 if (ptrace_cancel_bpt(current))
2047 send_sig(SIGTRAP, current, 1);
2048 #endif
2049 unlock_user_struct(frame, frame_addr, 0);
2050 return env->regs[0];
2051
2052 badframe:
2053 unlock_user_struct(frame, frame_addr, 0);
2054 force_sig(TARGET_SIGSEGV /* , current */);
2055 return 0;
2056 }
2057
2058 static long do_rt_sigreturn_v2(CPUARMState *env)
2059 {
2060 abi_ulong frame_addr;
2061 struct rt_sigframe_v2 *frame = NULL;
2062
2063 /*
2064 * Since we stacked the signal on a 64-bit boundary,
2065 * then 'sp' should be word aligned here. If it's
2066 * not, then the user is trying to mess with us.
2067 */
2068 frame_addr = env->regs[13];
2069 if (frame_addr & 7) {
2070 goto badframe;
2071 }
2072
2073 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2074 goto badframe;
2075
2076 if (do_sigframe_return_v2(env, frame_addr, &frame->uc))
2077 goto badframe;
2078
2079 unlock_user_struct(frame, frame_addr, 0);
2080 return env->regs[0];
2081
2082 badframe:
2083 unlock_user_struct(frame, frame_addr, 0);
2084 force_sig(TARGET_SIGSEGV /* , current */);
2085 return 0;
2086 }
2087
2088 long do_rt_sigreturn(CPUARMState *env)
2089 {
2090 if (get_osversion() >= 0x020612) {
2091 return do_rt_sigreturn_v2(env);
2092 } else {
2093 return do_rt_sigreturn_v1(env);
2094 }
2095 }
2096
2097 #elif defined(TARGET_SPARC)
2098
2099 #define __SUNOS_MAXWIN 31
2100
2101 /* This is what SunOS does, so shall I. */
2102 struct target_sigcontext {
2103 abi_ulong sigc_onstack; /* state to restore */
2104
2105 abi_ulong sigc_mask; /* sigmask to restore */
2106 abi_ulong sigc_sp; /* stack pointer */
2107 abi_ulong sigc_pc; /* program counter */
2108 abi_ulong sigc_npc; /* next program counter */
2109 abi_ulong sigc_psr; /* for condition codes etc */
2110 abi_ulong sigc_g1; /* User uses these two registers */
2111 abi_ulong sigc_o0; /* within the trampoline code. */
2112
2113 /* Now comes information regarding the users window set
2114 * at the time of the signal.
2115 */
2116 abi_ulong sigc_oswins; /* outstanding windows */
2117
2118 /* stack ptrs for each regwin buf */
2119 char *sigc_spbuf[__SUNOS_MAXWIN];
2120
2121 /* Windows to restore after signal */
2122 struct {
2123 abi_ulong locals[8];
2124 abi_ulong ins[8];
2125 } sigc_wbuf[__SUNOS_MAXWIN];
2126 };
2127 /* A Sparc stack frame */
2128 struct sparc_stackf {
2129 abi_ulong locals[8];
2130 abi_ulong ins[8];
2131 /* It's simpler to treat fp and callers_pc as elements of ins[]
2132 * since we never need to access them ourselves.
2133 */
2134 char *structptr;
2135 abi_ulong xargs[6];
2136 abi_ulong xxargs[1];
2137 };
2138
2139 typedef struct {
2140 struct {
2141 abi_ulong psr;
2142 abi_ulong pc;
2143 abi_ulong npc;
2144 abi_ulong y;
2145 abi_ulong u_regs[16]; /* globals and ins */
2146 } si_regs;
2147 int si_mask;
2148 } __siginfo_t;
2149
2150 typedef struct {
2151 abi_ulong si_float_regs[32];
2152 unsigned long si_fsr;
2153 unsigned long si_fpqdepth;
2154 struct {
2155 unsigned long *insn_addr;
2156 unsigned long insn;
2157 } si_fpqueue [16];
2158 } qemu_siginfo_fpu_t;
2159
2160
2161 struct target_signal_frame {
2162 struct sparc_stackf ss;
2163 __siginfo_t info;
2164 abi_ulong fpu_save;
2165 abi_ulong insns[2] __attribute__ ((aligned (8)));
2166 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2167 abi_ulong extra_size; /* Should be 0 */
2168 qemu_siginfo_fpu_t fpu_state;
2169 };
2170 struct target_rt_signal_frame {
2171 struct sparc_stackf ss;
2172 siginfo_t info;
2173 abi_ulong regs[20];
2174 sigset_t mask;
2175 abi_ulong fpu_save;
2176 unsigned int insns[2];
2177 stack_t stack;
2178 unsigned int extra_size; /* Should be 0 */
2179 qemu_siginfo_fpu_t fpu_state;
2180 };
2181
2182 #define UREG_O0 16
2183 #define UREG_O6 22
2184 #define UREG_I0 0
2185 #define UREG_I1 1
2186 #define UREG_I2 2
2187 #define UREG_I3 3
2188 #define UREG_I4 4
2189 #define UREG_I5 5
2190 #define UREG_I6 6
2191 #define UREG_I7 7
2192 #define UREG_L0 8
2193 #define UREG_FP UREG_I6
2194 #define UREG_SP UREG_O6
2195
2196 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2197 CPUSPARCState *env,
2198 unsigned long framesize)
2199 {
2200 abi_ulong sp;
2201
2202 sp = env->regwptr[UREG_FP];
2203
2204 /* This is the X/Open sanctioned signal stack switching. */
2205 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2206 if (!on_sig_stack(sp)
2207 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7))
2208 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2209 }
2210 return sp - framesize;
2211 }
2212
2213 static int
2214 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2215 {
2216 int err = 0, i;
2217
2218 err |= __put_user(env->psr, &si->si_regs.psr);
2219 err |= __put_user(env->pc, &si->si_regs.pc);
2220 err |= __put_user(env->npc, &si->si_regs.npc);
2221 err |= __put_user(env->y, &si->si_regs.y);
2222 for (i=0; i < 8; i++) {
2223 err |= __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2224 }
2225 for (i=0; i < 8; i++) {
2226 err |= __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2227 }
2228 err |= __put_user(mask, &si->si_mask);
2229 return err;
2230 }
2231
2232 #if 0
2233 static int
2234 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2235 CPUSPARCState *env, unsigned long mask)
2236 {
2237 int err = 0;
2238
2239 err |= __put_user(mask, &sc->sigc_mask);
2240 err |= __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2241 err |= __put_user(env->pc, &sc->sigc_pc);
2242 err |= __put_user(env->npc, &sc->sigc_npc);
2243 err |= __put_user(env->psr, &sc->sigc_psr);
2244 err |= __put_user(env->gregs[1], &sc->sigc_g1);
2245 err |= __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2246
2247 return err;
2248 }
2249 #endif
2250 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2251
2252 static void setup_frame(int sig, struct target_sigaction *ka,
2253 target_sigset_t *set, CPUSPARCState *env)
2254 {
2255 abi_ulong sf_addr;
2256 struct target_signal_frame *sf;
2257 int sigframe_size, err, i;
2258
2259 /* 1. Make sure everything is clean */
2260 //synchronize_user_stack();
2261
2262 sigframe_size = NF_ALIGNEDSZ;
2263 sf_addr = get_sigframe(ka, env, sigframe_size);
2264
2265 sf = lock_user(VERIFY_WRITE, sf_addr,
2266 sizeof(struct target_signal_frame), 0);
2267 if (!sf)
2268 goto sigsegv;
2269
2270 //fprintf(stderr, "sf: %x pc %x fp %x sp %x\n", sf, env->pc, env->regwptr[UREG_FP], env->regwptr[UREG_SP]);
2271 #if 0
2272 if (invalid_frame_pointer(sf, sigframe_size))
2273 goto sigill_and_return;
2274 #endif
2275 /* 2. Save the current process state */
2276 err = setup___siginfo(&sf->info, env, set->sig[0]);
2277 err |= __put_user(0, &sf->extra_size);
2278
2279 //err |= save_fpu_state(regs, &sf->fpu_state);
2280 //err |= __put_user(&sf->fpu_state, &sf->fpu_save);
2281
2282 err |= __put_user(set->sig[0], &sf->info.si_mask);
2283 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2284 err |= __put_user(set->sig[i + 1], &sf->extramask[i]);
2285 }
2286
2287 for (i = 0; i < 8; i++) {
2288 err |= __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2289 }
2290 for (i = 0; i < 8; i++) {
2291 err |= __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2292 }
2293 if (err)
2294 goto sigsegv;
2295
2296 /* 3. signal handler back-trampoline and parameters */
2297 env->regwptr[UREG_FP] = sf_addr;
2298 env->regwptr[UREG_I0] = sig;
2299 env->regwptr[UREG_I1] = sf_addr +
2300 offsetof(struct target_signal_frame, info);
2301 env->regwptr[UREG_I2] = sf_addr +
2302 offsetof(struct target_signal_frame, info);
2303
2304 /* 4. signal handler */
2305 env->pc = ka->_sa_handler;
2306 env->npc = (env->pc + 4);
2307 /* 5. return to kernel instructions */
2308 if (ka->sa_restorer)
2309 env->regwptr[UREG_I7] = ka->sa_restorer;
2310 else {
2311 uint32_t val32;
2312
2313 env->regwptr[UREG_I7] = sf_addr +
2314 offsetof(struct target_signal_frame, insns) - 2 * 4;
2315
2316 /* mov __NR_sigreturn, %g1 */
2317 val32 = 0x821020d8;
2318 err |= __put_user(val32, &sf->insns[0]);
2319
2320 /* t 0x10 */
2321 val32 = 0x91d02010;
2322 err |= __put_user(val32, &sf->insns[1]);
2323 if (err)
2324 goto sigsegv;
2325
2326 /* Flush instruction space. */
2327 //flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2328 // tb_flush(env);
2329 }
2330 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2331 return;
2332 #if 0
2333 sigill_and_return:
2334 force_sig(TARGET_SIGILL);
2335 #endif
2336 sigsegv:
2337 //fprintf(stderr, "force_sig\n");
2338 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2339 force_sig(TARGET_SIGSEGV);
2340 }
2341 static inline int
2342 restore_fpu_state(CPUSPARCState *env, qemu_siginfo_fpu_t *fpu)
2343 {
2344 int err;
2345 #if 0
2346 #ifdef CONFIG_SMP
2347 if (current->flags & PF_USEDFPU)
2348 regs->psr &= ~PSR_EF;
2349 #else
2350 if (current == last_task_used_math) {
2351 last_task_used_math = 0;
2352 regs->psr &= ~PSR_EF;
2353 }
2354 #endif
2355 current->used_math = 1;
2356 current->flags &= ~PF_USEDFPU;
2357 #endif
2358 #if 0
2359 if (verify_area (VERIFY_READ, fpu, sizeof(*fpu)))
2360 return -EFAULT;
2361 #endif
2362
2363 /* XXX: incorrect */
2364 err = copy_from_user(&env->fpr[0], fpu->si_float_regs[0],
2365 (sizeof(abi_ulong) * 32));
2366 err |= __get_user(env->fsr, &fpu->si_fsr);
2367 #if 0
2368 err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
2369 if (current->thread.fpqdepth != 0)
2370 err |= __copy_from_user(&current->thread.fpqueue[0],
2371 &fpu->si_fpqueue[0],
2372 ((sizeof(unsigned long) +
2373 (sizeof(unsigned long *)))*16));
2374 #endif
2375 return err;
2376 }
2377
2378
2379 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2380 target_siginfo_t *info,
2381 target_sigset_t *set, CPUSPARCState *env)
2382 {
2383 fprintf(stderr, "setup_rt_frame: not implemented\n");
2384 }
2385
2386 long do_sigreturn(CPUSPARCState *env)
2387 {
2388 abi_ulong sf_addr;
2389 struct target_signal_frame *sf;
2390 uint32_t up_psr, pc, npc;
2391 target_sigset_t set;
2392 sigset_t host_set;
2393 int err, i;
2394
2395 sf_addr = env->regwptr[UREG_FP];
2396 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1))
2397 goto segv_and_exit;
2398 #if 0
2399 fprintf(stderr, "sigreturn\n");
2400 fprintf(stderr, "sf: %x pc %x fp %x sp %x\n", sf, env->pc, env->regwptr[UREG_FP], env->regwptr[UREG_SP]);
2401 #endif
2402 //cpu_dump_state(env, stderr, fprintf, 0);
2403
2404 /* 1. Make sure we are not getting garbage from the user */
2405
2406 if (sf_addr & 3)
2407 goto segv_and_exit;
2408
2409 err = __get_user(pc, &sf->info.si_regs.pc);
2410 err |= __get_user(npc, &sf->info.si_regs.npc);
2411
2412 if ((pc | npc) & 3)
2413 goto segv_and_exit;
2414
2415 /* 2. Restore the state */
2416 err |= __get_user(up_psr, &sf->info.si_regs.psr);
2417
2418 /* User can only change condition codes and FPU enabling in %psr. */
2419 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2420 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2421
2422 env->pc = pc;
2423 env->npc = npc;
2424 err |= __get_user(env->y, &sf->info.si_regs.y);
2425 for (i=0; i < 8; i++) {
2426 err |= __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2427 }
2428 for (i=0; i < 8; i++) {
2429 err |= __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2430 }
2431
2432 /* FIXME: implement FPU save/restore:
2433 * __get_user(fpu_save, &sf->fpu_save);
2434 * if (fpu_save)
2435 * err |= restore_fpu_state(env, fpu_save);
2436 */
2437
2438 /* This is pretty much atomic, no amount locking would prevent
2439 * the races which exist anyways.
2440 */
2441 err |= __get_user(set.sig[0], &sf->info.si_mask);
2442 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2443 err |= (__get_user(set.sig[i], &sf->extramask[i - 1]));
2444 }
2445
2446 target_to_host_sigset_internal(&host_set, &set);
2447 sigprocmask(SIG_SETMASK, &host_set, NULL);
2448
2449 if (err)
2450 goto segv_and_exit;
2451 unlock_user_struct(sf, sf_addr, 0);
2452 return env->regwptr[0];
2453
2454 segv_and_exit:
2455 unlock_user_struct(sf, sf_addr, 0);
2456 force_sig(TARGET_SIGSEGV);
2457 }
2458
2459 long do_rt_sigreturn(CPUSPARCState *env)
2460 {
2461 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2462 return -TARGET_ENOSYS;
2463 }
2464
2465 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2466 #define MC_TSTATE 0
2467 #define MC_PC 1
2468 #define MC_NPC 2
2469 #define MC_Y 3
2470 #define MC_G1 4
2471 #define MC_G2 5
2472 #define MC_G3 6
2473 #define MC_G4 7
2474 #define MC_G5 8
2475 #define MC_G6 9
2476 #define MC_G7 10
2477 #define MC_O0 11
2478 #define MC_O1 12
2479 #define MC_O2 13
2480 #define MC_O3 14
2481 #define MC_O4 15
2482 #define MC_O5 16
2483 #define MC_O6 17
2484 #define MC_O7 18
2485 #define MC_NGREG 19
2486
2487 typedef abi_ulong target_mc_greg_t;
2488 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2489
2490 struct target_mc_fq {
2491 abi_ulong *mcfq_addr;
2492 uint32_t mcfq_insn;
2493 };
2494
2495 struct target_mc_fpu {
2496 union {
2497 uint32_t sregs[32];
2498 uint64_t dregs[32];
2499 //uint128_t qregs[16];
2500 } mcfpu_fregs;
2501 abi_ulong mcfpu_fsr;
2502 abi_ulong mcfpu_fprs;
2503 abi_ulong mcfpu_gsr;
2504 struct target_mc_fq *mcfpu_fq;
2505 unsigned char mcfpu_qcnt;
2506 unsigned char mcfpu_qentsz;
2507 unsigned char mcfpu_enab;
2508 };
2509 typedef struct target_mc_fpu target_mc_fpu_t;
2510
2511 typedef struct {
2512 target_mc_gregset_t mc_gregs;
2513 target_mc_greg_t mc_fp;
2514 target_mc_greg_t mc_i7;
2515 target_mc_fpu_t mc_fpregs;
2516 } target_mcontext_t;
2517
2518 struct target_ucontext {
2519 struct target_ucontext *tuc_link;
2520 abi_ulong tuc_flags;
2521 target_sigset_t tuc_sigmask;
2522 target_mcontext_t tuc_mcontext;
2523 };
2524
2525 /* A V9 register window */
2526 struct target_reg_window {
2527 abi_ulong locals[8];
2528 abi_ulong ins[8];
2529 };
2530
2531 #define TARGET_STACK_BIAS 2047
2532
2533 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2534 void sparc64_set_context(CPUSPARCState *env)
2535 {
2536 abi_ulong ucp_addr;
2537 struct target_ucontext *ucp;
2538 target_mc_gregset_t *grp;
2539 abi_ulong pc, npc, tstate;
2540 abi_ulong fp, i7, w_addr;
2541 int err;
2542 unsigned int i;
2543
2544 ucp_addr = env->regwptr[UREG_I0];
2545 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1))
2546 goto do_sigsegv;
2547 grp = &ucp->tuc_mcontext.mc_gregs;
2548 err = __get_user(pc, &((*grp)[MC_PC]));
2549 err |= __get_user(npc, &((*grp)[MC_NPC]));
2550 if (err || ((pc | npc) & 3))
2551 goto do_sigsegv;
2552 if (env->regwptr[UREG_I1]) {
2553 target_sigset_t target_set;
2554 sigset_t set;
2555
2556 if (TARGET_NSIG_WORDS == 1) {
2557 if (__get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]))
2558 goto do_sigsegv;
2559 } else {
2560 abi_ulong *src, *dst;
2561 src = ucp->tuc_sigmask.sig;
2562 dst = target_set.sig;
2563 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2564 err |= __get_user(*dst, src);
2565 }
2566 if (err)
2567 goto do_sigsegv;
2568 }
2569 target_to_host_sigset_internal(&set, &target_set);
2570 sigprocmask(SIG_SETMASK, &set, NULL);
2571 }
2572 env->pc = pc;
2573 env->npc = npc;
2574 err |= __get_user(env->y, &((*grp)[MC_Y]));
2575 err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
2576 env->asi = (tstate >> 24) & 0xff;
2577 cpu_put_ccr(env, tstate >> 32);
2578 cpu_put_cwp64(env, tstate & 0x1f);
2579 err |= __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2580 err |= __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2581 err |= __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2582 err |= __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2583 err |= __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2584 err |= __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2585 err |= __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2586 err |= __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2587 err |= __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2588 err |= __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2589 err |= __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2590 err |= __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2591 err |= __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2592 err |= __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2593 err |= __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2594
2595 err |= __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2596 err |= __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2597
2598 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2599 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2600 abi_ulong) != 0)
2601 goto do_sigsegv;
2602 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2603 abi_ulong) != 0)
2604 goto do_sigsegv;
2605 /* FIXME this does not match how the kernel handles the FPU in
2606 * its sparc64_set_context implementation. In particular the FPU
2607 * is only restored if fenab is non-zero in:
2608 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2609 */
2610 err |= __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2611 {
2612 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2613 for (i = 0; i < 64; i++, src++) {
2614 if (i & 1) {
2615 err |= __get_user(env->fpr[i/2].l.lower, src);
2616 } else {
2617 err |= __get_user(env->fpr[i/2].l.upper, src);
2618 }
2619 }
2620 }
2621 err |= __get_user(env->fsr,
2622 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2623 err |= __get_user(env->gsr,
2624 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2625 if (err)
2626 goto do_sigsegv;
2627 unlock_user_struct(ucp, ucp_addr, 0);
2628 return;
2629 do_sigsegv:
2630 unlock_user_struct(ucp, ucp_addr, 0);
2631 force_sig(TARGET_SIGSEGV);
2632 }
2633
2634 void sparc64_get_context(CPUSPARCState *env)
2635 {
2636 abi_ulong ucp_addr;
2637 struct target_ucontext *ucp;
2638 target_mc_gregset_t *grp;
2639 target_mcontext_t *mcp;
2640 abi_ulong fp, i7, w_addr;
2641 int err;
2642 unsigned int i;
2643 target_sigset_t target_set;
2644 sigset_t set;
2645
2646 ucp_addr = env->regwptr[UREG_I0];
2647 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0))
2648 goto do_sigsegv;
2649
2650 mcp = &ucp->tuc_mcontext;
2651 grp = &mcp->mc_gregs;
2652
2653 /* Skip over the trap instruction, first. */
2654 env->pc = env->npc;
2655 env->npc += 4;
2656
2657 err = 0;
2658
2659 sigprocmask(0, NULL, &set);
2660 host_to_target_sigset_internal(&target_set, &set);
2661 if (TARGET_NSIG_WORDS == 1) {
2662 err |= __put_user(target_set.sig[0],
2663 (abi_ulong *)&ucp->tuc_sigmask);
2664 } else {
2665 abi_ulong *src, *dst;
2666 src = target_set.sig;
2667 dst = ucp->tuc_sigmask.sig;
2668 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2669 err |= __put_user(*src, dst);
2670 }
2671 if (err)
2672 goto do_sigsegv;
2673 }
2674
2675 /* XXX: tstate must be saved properly */
2676 // err |= __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2677 err |= __put_user(env->pc, &((*grp)[MC_PC]));
2678 err |= __put_user(env->npc, &((*grp)[MC_NPC]));
2679 err |= __put_user(env->y, &((*grp)[MC_Y]));
2680 err |= __put_user(env->gregs[1], &((*grp)[MC_G1]));
2681 err |= __put_user(env->gregs[2], &((*grp)[MC_G2]));
2682 err |= __put_user(env->gregs[3], &((*grp)[MC_G3]));
2683 err |= __put_user(env->gregs[4], &((*grp)[MC_G4]));
2684 err |= __put_user(env->gregs[5], &((*grp)[MC_G5]));
2685 err |= __put_user(env->gregs[6], &((*grp)[MC_G6]));
2686 err |= __put_user(env->gregs[7], &((*grp)[MC_G7]));
2687 err |= __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2688 err |= __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2689 err |= __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2690 err |= __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2691 err |= __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2692 err |= __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2693 err |= __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2694 err |= __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2695
2696 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2697 fp = i7 = 0;
2698 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2699 abi_ulong) != 0)
2700 goto do_sigsegv;
2701 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2702 abi_ulong) != 0)
2703 goto do_sigsegv;
2704 err |= __put_user(fp, &(mcp->mc_fp));
2705 err |= __put_user(i7, &(mcp->mc_i7));
2706
2707 {
2708 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2709 for (i = 0; i < 64; i++, dst++) {
2710 if (i & 1) {
2711 err |= __put_user(env->fpr[i/2].l.lower, dst);
2712 } else {
2713 err |= __put_user(env->fpr[i/2].l.upper, dst);
2714 }
2715 }
2716 }
2717 err |= __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2718 err |= __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2719 err |= __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2720
2721 if (err)
2722 goto do_sigsegv;
2723 unlock_user_struct(ucp, ucp_addr, 1);
2724 return;
2725 do_sigsegv:
2726 unlock_user_struct(ucp, ucp_addr, 1);
2727 force_sig(TARGET_SIGSEGV);
2728 }
2729 #endif
2730 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2731
2732 # if defined(TARGET_ABI_MIPSO32)
2733 struct target_sigcontext {
2734 uint32_t sc_regmask; /* Unused */
2735 uint32_t sc_status;
2736 uint64_t sc_pc;
2737 uint64_t sc_regs[32];
2738 uint64_t sc_fpregs[32];
2739 uint32_t sc_ownedfp; /* Unused */
2740 uint32_t sc_fpc_csr;
2741 uint32_t sc_fpc_eir; /* Unused */
2742 uint32_t sc_used_math;
2743 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2744 uint32_t pad0;
2745 uint64_t sc_mdhi;
2746 uint64_t sc_mdlo;
2747 target_ulong sc_hi1; /* Was sc_cause */
2748 target_ulong sc_lo1; /* Was sc_badvaddr */
2749 target_ulong sc_hi2; /* Was sc_sigset[4] */
2750 target_ulong sc_lo2;
2751 target_ulong sc_hi3;
2752 target_ulong sc_lo3;
2753 };
2754 # else /* N32 || N64 */
2755 struct target_sigcontext {
2756 uint64_t sc_regs[32];
2757 uint64_t sc_fpregs[32];
2758 uint64_t sc_mdhi;
2759 uint64_t sc_hi1;
2760 uint64_t sc_hi2;
2761 uint64_t sc_hi3;
2762 uint64_t sc_mdlo;
2763 uint64_t sc_lo1;
2764 uint64_t sc_lo2;
2765 uint64_t sc_lo3;
2766 uint64_t sc_pc;
2767 uint32_t sc_fpc_csr;
2768 uint32_t sc_used_math;
2769 uint32_t sc_dsp;
2770 uint32_t sc_reserved;
2771 };
2772 # endif /* O32 */
2773
2774 struct sigframe {
2775 uint32_t sf_ass[4]; /* argument save space for o32 */
2776 uint32_t sf_code[2]; /* signal trampoline */
2777 struct target_sigcontext sf_sc;
2778 target_sigset_t sf_mask;
2779 };
2780
2781 struct target_ucontext {
2782 target_ulong tuc_flags;
2783 target_ulong tuc_link;
2784 target_stack_t tuc_stack;
2785 target_ulong pad0;
2786 struct target_sigcontext tuc_mcontext;
2787 target_sigset_t tuc_sigmask;
2788 };
2789
2790 struct target_rt_sigframe {
2791 uint32_t rs_ass[4]; /* argument save space for o32 */
2792 uint32_t rs_code[2]; /* signal trampoline */
2793 struct target_siginfo rs_info;
2794 struct target_ucontext rs_uc;
2795 };
2796
2797 /* Install trampoline to jump back from signal handler */
2798 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2799 {
2800 int err = 0;
2801
2802 /*
2803 * Set up the return code ...
2804 *
2805 * li v0, __NR__foo_sigreturn
2806 * syscall
2807 */
2808
2809 err |= __put_user(0x24020000 + syscall, tramp + 0);
2810 err |= __put_user(0x0000000c , tramp + 1);
2811 return err;
2812 }
2813
2814 static inline int
2815 setup_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2816 {
2817 int err = 0;
2818 int i;
2819
2820 err |= __put_user(exception_resume_pc(regs), &sc->sc_pc);
2821 regs->hflags &= ~MIPS_HFLAG_BMASK;
2822
2823 __put_user(0, &sc->sc_regs[0]);
2824 for (i = 1; i < 32; ++i) {
2825 err |= __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2826 }
2827
2828 err |= __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2829 err |= __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2830
2831 /* Rather than checking for dsp existence, always copy. The storage
2832 would just be garbage otherwise. */
2833 err |= __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2834 err |= __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2835 err |= __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2836 err |= __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2837 err |= __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2838 err |= __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2839 {
2840 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2841 err |= __put_user(dsp, &sc->sc_dsp);
2842 }
2843
2844 err |= __put_user(1, &sc->sc_used_math);
2845
2846 for (i = 0; i < 32; ++i) {
2847 err |= __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2848 }
2849
2850 return err;
2851 }
2852
2853 static inline int
2854 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2855 {
2856 int err = 0;
2857 int i;
2858
2859 err |= __get_user(regs->CP0_EPC, &sc->sc_pc);
2860
2861 err |= __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2862 err |= __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2863
2864 for (i = 1; i < 32; ++i) {
2865 err |= __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2866 }
2867
2868 err |= __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2869 err |= __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2870 err |= __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2871 err |= __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2872 err |= __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2873 err |= __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2874 {
2875 uint32_t dsp;
2876 err |= __get_user(dsp, &sc->sc_dsp);
2877 cpu_wrdsp(dsp, 0x3ff, regs);
2878 }
2879
2880 for (i = 0; i < 32; ++i) {
2881 err |= __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2882 }
2883
2884 return err;
2885 }
2886
2887 /*
2888 * Determine which stack to use..
2889 */
2890 static inline abi_ulong
2891 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2892 {
2893 unsigned long sp;
2894
2895 /* Default to using normal stack */
2896 sp = regs->active_tc.gpr[29];
2897
2898 /*
2899 * FPU emulator may have its own trampoline active just
2900 * above the user stack, 16-bytes before the next lowest
2901 * 16 byte boundary. Try to avoid trashing it.
2902 */
2903 sp -= 32;
2904
2905 /* This is the X/Open sanctioned signal stack switching. */
2906 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2907 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2908 }
2909
2910 return (sp - frame_size) & ~7;
2911 }
2912
2913 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2914 {
2915 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2916 env->hflags &= ~MIPS_HFLAG_M16;
2917 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2918 env->active_tc.PC &= ~(target_ulong) 1;
2919 }
2920 }
2921
2922 # if defined(TARGET_ABI_MIPSO32)
2923 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2924 static void setup_frame(int sig, struct target_sigaction * ka,
2925 target_sigset_t *set, CPUMIPSState *regs)
2926 {
2927 struct sigframe *frame;
2928 abi_ulong frame_addr;
2929 int i;
2930
2931 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2932 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
2933 goto give_sigsegv;
2934
2935 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2936
2937 if(setup_sigcontext(regs, &frame->sf_sc))
2938 goto give_sigsegv;
2939
2940 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2941 if(__put_user(set->sig[i], &frame->sf_mask.sig[i]))
2942 goto give_sigsegv;
2943 }
2944
2945 /*
2946 * Arguments to signal handler:
2947 *
2948 * a0 = signal number
2949 * a1 = 0 (should be cause)
2950 * a2 = pointer to struct sigcontext
2951 *
2952 * $25 and PC point to the signal handler, $29 points to the
2953 * struct sigframe.
2954 */
2955 regs->active_tc.gpr[ 4] = sig;
2956 regs->active_tc.gpr[ 5] = 0;
2957 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
2958 regs->active_tc.gpr[29] = frame_addr;
2959 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
2960 /* The original kernel code sets CP0_EPC to the handler
2961 * since it returns to userland using eret
2962 * we cannot do this here, and we must set PC directly */
2963 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
2964 mips_set_hflags_isa_mode_from_pc(regs);
2965 unlock_user_struct(frame, frame_addr, 1);
2966 return;
2967
2968 give_sigsegv:
2969 unlock_user_struct(frame, frame_addr, 1);
2970 force_sig(TARGET_SIGSEGV/*, current*/);
2971 }
2972
2973 long do_sigreturn(CPUMIPSState *regs)
2974 {
2975 struct sigframe *frame;
2976 abi_ulong frame_addr;
2977 sigset_t blocked;
2978 target_sigset_t target_set;
2979 int i;
2980
2981 #if defined(DEBUG_SIGNAL)
2982 fprintf(stderr, "do_sigreturn\n");
2983 #endif
2984 frame_addr = regs->active_tc.gpr[29];
2985 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2986 goto badframe;
2987
2988 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2989 if(__get_user(target_set.sig[i], &frame->sf_mask.sig[i]))
2990 goto badframe;
2991 }
2992
2993 target_to_host_sigset_internal(&blocked, &target_set);
2994 sigprocmask(SIG_SETMASK, &blocked, NULL);
2995
2996 if (restore_sigcontext(regs, &frame->sf_sc))
2997 goto badframe;
2998
2999 #if 0
3000 /*
3001 * Don't let your children do this ...
3002 */
3003 __asm__ __volatile__(
3004 "move\t$29, %0\n\t"
3005 "j\tsyscall_exit"
3006 :/* no outputs */
3007 :"r" (&regs));
3008 /* Unreached */
3009 #endif
3010
3011 regs->active_tc.PC = regs->CP0_EPC;
3012 mips_set_hflags_isa_mode_from_pc(regs);
3013 /* I am not sure this is right, but it seems to work
3014 * maybe a problem with nested signals ? */
3015 regs->CP0_EPC = 0;
3016 return -TARGET_QEMU_ESIGRETURN;
3017
3018 badframe:
3019 force_sig(TARGET_SIGSEGV/*, current*/);
3020 return 0;
3021 }
3022 # endif /* O32 */
3023
3024 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3025 target_siginfo_t *info,
3026 target_sigset_t *set, CPUMIPSState *env)
3027 {
3028 struct target_rt_sigframe *frame;
3029 abi_ulong frame_addr;
3030 int i;
3031
3032 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3033 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3034 goto give_sigsegv;
3035
3036 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3037
3038 copy_siginfo_to_user(&frame->rs_info, info);
3039
3040 __put_user(0, &frame->rs_uc.tuc_flags);
3041 __put_user(0, &frame->rs_uc.tuc_link);
3042 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3043 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3044 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3045 &frame->rs_uc.tuc_stack.ss_flags);
3046
3047 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3048
3049 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3050 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3051 }
3052
3053 /*
3054 * Arguments to signal handler:
3055 *
3056 * a0 = signal number
3057 * a1 = pointer to siginfo_t
3058 * a2 = pointer to struct ucontext
3059 *
3060 * $25 and PC point to the signal handler, $29 points to the
3061 * struct sigframe.
3062 */
3063 env->active_tc.gpr[ 4] = sig;
3064 env->active_tc.gpr[ 5] = frame_addr
3065 + offsetof(struct target_rt_sigframe, rs_info);
3066 env->active_tc.gpr[ 6] = frame_addr
3067 + offsetof(struct target_rt_sigframe, rs_uc);
3068 env->active_tc.gpr[29] = frame_addr;
3069 env->active_tc.gpr[31] = frame_addr
3070 + offsetof(struct target_rt_sigframe, rs_code);
3071 /* The original kernel code sets CP0_EPC to the handler
3072 * since it returns to userland using eret
3073 * we cannot do this here, and we must set PC directly */
3074 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3075 mips_set_hflags_isa_mode_from_pc(env);
3076 unlock_user_struct(frame, frame_addr, 1);
3077 return;
3078
3079 give_sigsegv:
3080 unlock_user_struct(frame, frame_addr, 1);
3081 force_sig(TARGET_SIGSEGV/*, current*/);
3082 }
3083
3084 long do_rt_sigreturn(CPUMIPSState *env)
3085 {
3086 struct target_rt_sigframe *frame;
3087 abi_ulong frame_addr;
3088 sigset_t blocked;
3089
3090 #if defined(DEBUG_SIGNAL)
3091 fprintf(stderr, "do_rt_sigreturn\n");
3092 #endif
3093 frame_addr = env->active_tc.gpr[29];
3094 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3095 goto badframe;
3096
3097 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3098 sigprocmask(SIG_SETMASK, &blocked, NULL);
3099
3100 if (restore_sigcontext(env, &frame->rs_uc.tuc_mcontext))
3101 goto badframe;
3102
3103 if (do_sigaltstack(frame_addr +
3104 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3105 0, get_sp_from_cpustate(env)) == -EFAULT)
3106 goto badframe;
3107
3108 env->active_tc.PC = env->CP0_EPC;
3109 mips_set_hflags_isa_mode_from_pc(env);
3110 /* I am not sure this is right, but it seems to work
3111 * maybe a problem with nested signals ? */
3112 env->CP0_EPC = 0;
3113 return -TARGET_QEMU_ESIGRETURN;
3114
3115 badframe:
3116 force_sig(TARGET_SIGSEGV/*, current*/);
3117 return 0;
3118 }
3119
3120 #elif defined(TARGET_SH4)
3121
3122 /*
3123 * code and data structures from linux kernel:
3124 * include/asm-sh/sigcontext.h
3125 * arch/sh/kernel/signal.c
3126 */
3127
3128 struct target_sigcontext {
3129 target_ulong oldmask;
3130
3131 /* CPU registers */
3132 target_ulong sc_gregs[16];
3133 target_ulong sc_pc;
3134 target_ulong sc_pr;
3135 target_ulong sc_sr;
3136 target_ulong sc_gbr;
3137 target_ulong sc_mach;
3138 target_ulong sc_macl;
3139
3140 /* FPU registers */
3141 target_ulong sc_fpregs[16];
3142 target_ulong sc_xfpregs[16];
3143 unsigned int sc_fpscr;
3144 unsigned int sc_fpul;
3145 unsigned int sc_ownedfp;
3146 };
3147
3148 struct target_sigframe
3149 {
3150 struct target_sigcontext sc;
3151 target_ulong extramask[TARGET_NSIG_WORDS-1];
3152 uint16_t retcode[3];
3153 };
3154
3155
3156 struct target_ucontext {
3157 target_ulong tuc_flags;
3158 struct target_ucontext *tuc_link;
3159 target_stack_t tuc_stack;
3160 struct target_sigcontext tuc_mcontext;
3161 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3162 };
3163
3164 struct target_rt_sigframe
3165 {
3166 struct target_siginfo info;
3167 struct target_ucontext uc;
3168 uint16_t retcode[3];
3169 };
3170
3171
3172 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3173 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3174
3175 static abi_ulong get_sigframe(struct target_sigaction *ka,
3176 unsigned long sp, size_t frame_size)
3177 {
3178 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3179 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3180 }
3181
3182 return (sp - frame_size) & -8ul;
3183 }
3184
3185 static int setup_sigcontext(struct target_sigcontext *sc,
3186 CPUSH4State *regs, unsigned long mask)
3187 {
3188 int err = 0;
3189 int i;
3190
3191 #define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
3192 COPY(gregs[0]); COPY(gregs[1]);
3193 COPY(gregs[2]); COPY(gregs[3]);
3194 COPY(gregs[4]); COPY(gregs[5]);
3195 COPY(gregs[6]); COPY(gregs[7]);
3196 COPY(gregs[8]); COPY(gregs[9]);
3197 COPY(gregs[10]); COPY(gregs[11]);
3198 COPY(gregs[12]); COPY(gregs[13]);
3199 COPY(gregs[14]); COPY(gregs[15]);
3200 COPY(gbr); COPY(mach);
3201 COPY(macl); COPY(pr);
3202 COPY(sr); COPY(pc);
3203 #undef COPY
3204
3205 for (i=0; i<16; i++) {
3206 err |= __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3207 }
3208 err |= __put_user(regs->fpscr, &sc->sc_fpscr);
3209 err |= __put_user(regs->fpul, &sc->sc_fpul);
3210
3211 /* non-iBCS2 extensions.. */
3212 err |= __put_user(mask, &sc->oldmask);
3213
3214 return err;
3215 }
3216
3217 static int restore_sigcontext(CPUSH4State